diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/__main__.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..efb7fb79bf64bbe58d3a92cd14ac67562dba26d4 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/__main__.py @@ -0,0 +1,273 @@ +import colorsys +import io +from time import process_time + +from pip._vendor.rich import box +from pip._vendor.rich.color import Color +from pip._vendor.rich.console import Console, ConsoleOptions, Group, RenderableType, RenderResult +from pip._vendor.rich.markdown import Markdown +from pip._vendor.rich.measure import Measurement +from pip._vendor.rich.pretty import Pretty +from pip._vendor.rich.segment import Segment +from pip._vendor.rich.style import Style +from pip._vendor.rich.syntax import Syntax +from pip._vendor.rich.table import Table +from pip._vendor.rich.text import Text + + +class ColorBox: + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + for y in range(0, 5): + for x in range(options.max_width): + h = x / options.max_width + l = 0.1 + ((y / 5) * 0.7) + r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0) + r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0) + bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255) + color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255) + yield Segment("▄", Style(color=color, bgcolor=bgcolor)) + yield Segment.line() + + def __rich_measure__( + self, console: "Console", options: ConsoleOptions + ) -> Measurement: + return Measurement(1, options.max_width) + + +def make_test_card() -> Table: + """Get a renderable that demonstrates a number of features.""" + table = Table.grid(padding=1, pad_edge=True) + table.title = "Rich features" + table.add_column("Feature", no_wrap=True, justify="center", style="bold red") + table.add_column("Demonstration") + + color_table = Table( + box=None, + expand=False, + show_header=False, + show_edge=False, + pad_edge=False, + ) + color_table.add_row( + ( + "✓ [bold green]4-bit color[/]\n" + "✓ [bold blue]8-bit color[/]\n" + "✓ [bold magenta]Truecolor (16.7 million)[/]\n" + "✓ [bold yellow]Dumb terminals[/]\n" + "✓ [bold cyan]Automatic color conversion" + ), + ColorBox(), + ) + + table.add_row("Colors", color_table) + + table.add_row( + "Styles", + "All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].", + ) + + lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus." + lorem_table = Table.grid(padding=1, collapse_padding=True) + lorem_table.pad_edge = False + lorem_table.add_row( + Text(lorem, justify="left", style="green"), + Text(lorem, justify="center", style="yellow"), + Text(lorem, justify="right", style="blue"), + Text(lorem, justify="full", style="red"), + ) + table.add_row( + "Text", + Group( + Text.from_markup( + """Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n""" + ), + lorem_table, + ), + ) + + def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table: + table = Table(show_header=False, pad_edge=False, box=None, expand=True) + table.add_column("1", ratio=1) + table.add_column("2", ratio=1) + table.add_row(renderable1, renderable2) + return table + + table.add_row( + "Asian\nlanguage\nsupport", + ":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다", + ) + + markup_example = ( + "[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! " + ":+1: :apple: :ant: :bear: :baguette_bread: :bus: " + ) + table.add_row("Markup", markup_example) + + example_table = Table( + show_edge=False, + show_header=True, + expand=False, + row_styles=["none", "dim"], + box=box.SIMPLE, + ) + example_table.add_column("[green]Date", style="green", no_wrap=True) + example_table.add_column("[blue]Title", style="blue") + example_table.add_column( + "[cyan]Production Budget", + style="cyan", + justify="right", + no_wrap=True, + ) + example_table.add_column( + "[magenta]Box Office", + style="magenta", + justify="right", + no_wrap=True, + ) + example_table.add_row( + "Dec 20, 2019", + "Star Wars: The Rise of Skywalker", + "$275,000,000", + "$375,126,118", + ) + example_table.add_row( + "May 25, 2018", + "[b]Solo[/]: A Star Wars Story", + "$275,000,000", + "$393,151,347", + ) + example_table.add_row( + "Dec 15, 2017", + "Star Wars Ep. VIII: The Last Jedi", + "$262,000,000", + "[bold]$1,332,539,889[/bold]", + ) + example_table.add_row( + "May 19, 1999", + "Star Wars Ep. [b]I[/b]: [i]The phantom Menace", + "$115,000,000", + "$1,027,044,677", + ) + + table.add_row("Tables", example_table) + + code = '''\ +def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + for value in iter_values: + yield False, previous_value + previous_value = value + yield True, previous_value''' + + pretty_data = { + "foo": [ + 3.1427, + ( + "Paul Atreides", + "Vladimir Harkonnen", + "Thufir Hawat", + ), + ], + "atomic": (False, True, None), + } + table.add_row( + "Syntax\nhighlighting\n&\npretty\nprinting", + comparison( + Syntax(code, "python3", line_numbers=True, indent_guides=True), + Pretty(pretty_data, indent_guides=True), + ), + ) + + markdown_example = """\ +# Markdown + +Supports much of the *markdown* __syntax__! + +- Headers +- Basic formatting: **bold**, *italic*, `code` +- Block quotes +- Lists, and more... + """ + table.add_row( + "Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example)) + ) + + table.add_row( + "+more!", + """Progress bars, columns, styled logging handler, tracebacks, etc...""", + ) + return table + + +if __name__ == "__main__": # pragma: no cover + console = Console( + file=io.StringIO(), + force_terminal=True, + ) + test_card = make_test_card() + + # Print once to warm cache + start = process_time() + console.print(test_card) + pre_cache_taken = round((process_time() - start) * 1000.0, 1) + + console.file = io.StringIO() + + start = process_time() + console.print(test_card) + taken = round((process_time() - start) * 1000.0, 1) + + c = Console(record=True) + c.print(test_card) + + print(f"rendered in {pre_cache_taken}ms (cold cache)") + print(f"rendered in {taken}ms (warm cache)") + + from pip._vendor.rich.panel import Panel + + console = Console() + + sponsor_message = Table.grid(padding=1) + sponsor_message.add_column(style="green", justify="right") + sponsor_message.add_column(no_wrap=True) + + sponsor_message.add_row( + "Textualize", + "[u blue link=https://github.com/textualize]https://github.com/textualize", + ) + sponsor_message.add_row( + "Twitter", + "[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan", + ) + + intro_message = Text.from_markup( + """\ +We hope you enjoy using Rich! + +Rich is maintained with [red]:heart:[/] by [link=https://www.textualize.io]Textualize.io[/] + +- Will McGugan""" + ) + + message = Table.grid(padding=2) + message.add_column() + message.add_column(no_wrap=True) + message.add_row(intro_message, sponsor_message) + + console.print( + Panel.fit( + message, + box=box.ROUNDED, + padding=(1, 2), + title="[b red]Thanks for trying out Rich!", + border_style="bright_blue", + ), + justify="center", + ) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/_emoji_replace.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_emoji_replace.py new file mode 100644 index 0000000000000000000000000000000000000000..bb2cafa18011e7115773055338291c366f173d6f --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_emoji_replace.py @@ -0,0 +1,32 @@ +from typing import Callable, Match, Optional +import re + +from ._emoji_codes import EMOJI + + +_ReStringMatch = Match[str] # regex match object +_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub +_EmojiSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re + + +def _emoji_replace( + text: str, + default_variant: Optional[str] = None, + _emoji_sub: _EmojiSubMethod = re.compile(r"(:(\S*?)(?:(?:\-)(emoji|text))?:)").sub, +) -> str: + """Replace emoji code in text.""" + get_emoji = EMOJI.__getitem__ + variants = {"text": "\uFE0E", "emoji": "\uFE0F"} + get_variant = variants.get + default_variant_code = variants.get(default_variant, "") if default_variant else "" + + def do_replace(match: Match[str]) -> str: + emoji_code, emoji_name, variant = match.groups() + try: + return get_emoji(emoji_name.lower()) + get_variant( + variant, default_variant_code + ) + except KeyError: + return emoji_code + + return _emoji_sub(do_replace, text) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/_log_render.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_log_render.py new file mode 100644 index 0000000000000000000000000000000000000000..fc16c84437a8a34231c44d3f0a331459ddcb0f34 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_log_render.py @@ -0,0 +1,94 @@ +from datetime import datetime +from typing import Iterable, List, Optional, TYPE_CHECKING, Union, Callable + + +from .text import Text, TextType + +if TYPE_CHECKING: + from .console import Console, ConsoleRenderable, RenderableType + from .table import Table + +FormatTimeCallable = Callable[[datetime], Text] + + +class LogRender: + def __init__( + self, + show_time: bool = True, + show_level: bool = False, + show_path: bool = True, + time_format: Union[str, FormatTimeCallable] = "[%x %X]", + omit_repeated_times: bool = True, + level_width: Optional[int] = 8, + ) -> None: + self.show_time = show_time + self.show_level = show_level + self.show_path = show_path + self.time_format = time_format + self.omit_repeated_times = omit_repeated_times + self.level_width = level_width + self._last_time: Optional[Text] = None + + def __call__( + self, + console: "Console", + renderables: Iterable["ConsoleRenderable"], + log_time: Optional[datetime] = None, + time_format: Optional[Union[str, FormatTimeCallable]] = None, + level: TextType = "", + path: Optional[str] = None, + line_no: Optional[int] = None, + link_path: Optional[str] = None, + ) -> "Table": + from .containers import Renderables + from .table import Table + + output = Table.grid(padding=(0, 1)) + output.expand = True + if self.show_time: + output.add_column(style="log.time") + if self.show_level: + output.add_column(style="log.level", width=self.level_width) + output.add_column(ratio=1, style="log.message", overflow="fold") + if self.show_path and path: + output.add_column(style="log.path") + row: List["RenderableType"] = [] + if self.show_time: + log_time = log_time or console.get_datetime() + time_format = time_format or self.time_format + if callable(time_format): + log_time_display = time_format(log_time) + else: + log_time_display = Text(log_time.strftime(time_format)) + if log_time_display == self._last_time and self.omit_repeated_times: + row.append(Text(" " * len(log_time_display))) + else: + row.append(log_time_display) + self._last_time = log_time_display + if self.show_level: + row.append(level) + + row.append(Renderables(renderables)) + if self.show_path and path: + path_text = Text() + path_text.append( + path, style=f"link file://{link_path}" if link_path else "" + ) + if line_no: + path_text.append(":") + path_text.append( + f"{line_no}", + style=f"link file://{link_path}#{line_no}" if link_path else "", + ) + row.append(path_text) + + output.add_row(*row) + return output + + +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.console import Console + + c = Console() + c.print("[on blue]Hello", justify="right") + c.log("[on blue]hello", justify="right") diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/_loop.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..01c6cafbe53f1fcb12f7b382b2b35e2fd2c69933 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_loop.py @@ -0,0 +1,43 @@ +from typing import Iterable, Tuple, TypeVar + +T = TypeVar("T") + + +def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for first value.""" + iter_values = iter(values) + try: + value = next(iter_values) + except StopIteration: + return + yield True, value + for value in iter_values: + yield False, value + + +def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + for value in iter_values: + yield False, previous_value + previous_value = value + yield True, previous_value + + +def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]: + """Iterate and generate a tuple with a flag for first and last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + first = True + for value in iter_values: + yield first, False, previous_value + first = False + previous_value = value + yield first, True, previous_value diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/_palettes.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_palettes.py new file mode 100644 index 0000000000000000000000000000000000000000..3c748d33e45bfcdc690ceee490cbb50b516cd2b3 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_palettes.py @@ -0,0 +1,309 @@ +from .palette import Palette + + +# Taken from https://en.wikipedia.org/wiki/ANSI_escape_code (Windows 10 column) +WINDOWS_PALETTE = Palette( + [ + (12, 12, 12), + (197, 15, 31), + (19, 161, 14), + (193, 156, 0), + (0, 55, 218), + (136, 23, 152), + (58, 150, 221), + (204, 204, 204), + (118, 118, 118), + (231, 72, 86), + (22, 198, 12), + (249, 241, 165), + (59, 120, 255), + (180, 0, 158), + (97, 214, 214), + (242, 242, 242), + ] +) + +# # The standard ansi colors (including bright variants) +STANDARD_PALETTE = Palette( + [ + (0, 0, 0), + (170, 0, 0), + (0, 170, 0), + (170, 85, 0), + (0, 0, 170), + (170, 0, 170), + (0, 170, 170), + (170, 170, 170), + (85, 85, 85), + (255, 85, 85), + (85, 255, 85), + (255, 255, 85), + (85, 85, 255), + (255, 85, 255), + (85, 255, 255), + (255, 255, 255), + ] +) + + +# The 256 color palette +EIGHT_BIT_PALETTE = Palette( + [ + (0, 0, 0), + (128, 0, 0), + (0, 128, 0), + (128, 128, 0), + (0, 0, 128), + (128, 0, 128), + (0, 128, 128), + (192, 192, 192), + (128, 128, 128), + (255, 0, 0), + (0, 255, 0), + (255, 255, 0), + (0, 0, 255), + (255, 0, 255), + (0, 255, 255), + (255, 255, 255), + (0, 0, 0), + (0, 0, 95), + (0, 0, 135), + (0, 0, 175), + (0, 0, 215), + (0, 0, 255), + (0, 95, 0), + (0, 95, 95), + (0, 95, 135), + (0, 95, 175), + (0, 95, 215), + (0, 95, 255), + (0, 135, 0), + (0, 135, 95), + (0, 135, 135), + (0, 135, 175), + (0, 135, 215), + (0, 135, 255), + (0, 175, 0), + (0, 175, 95), + (0, 175, 135), + (0, 175, 175), + (0, 175, 215), + (0, 175, 255), + (0, 215, 0), + (0, 215, 95), + (0, 215, 135), + (0, 215, 175), + (0, 215, 215), + (0, 215, 255), + (0, 255, 0), + (0, 255, 95), + (0, 255, 135), + (0, 255, 175), + (0, 255, 215), + (0, 255, 255), + (95, 0, 0), + (95, 0, 95), + (95, 0, 135), + (95, 0, 175), + (95, 0, 215), + (95, 0, 255), + (95, 95, 0), + (95, 95, 95), + (95, 95, 135), + (95, 95, 175), + (95, 95, 215), + (95, 95, 255), + (95, 135, 0), + (95, 135, 95), + (95, 135, 135), + (95, 135, 175), + (95, 135, 215), + (95, 135, 255), + (95, 175, 0), + (95, 175, 95), + (95, 175, 135), + (95, 175, 175), + (95, 175, 215), + (95, 175, 255), + (95, 215, 0), + (95, 215, 95), + (95, 215, 135), + (95, 215, 175), + (95, 215, 215), + (95, 215, 255), + (95, 255, 0), + (95, 255, 95), + (95, 255, 135), + (95, 255, 175), + (95, 255, 215), + (95, 255, 255), + (135, 0, 0), + (135, 0, 95), + (135, 0, 135), + (135, 0, 175), + (135, 0, 215), + (135, 0, 255), + (135, 95, 0), + (135, 95, 95), + (135, 95, 135), + (135, 95, 175), + (135, 95, 215), + (135, 95, 255), + (135, 135, 0), + (135, 135, 95), + (135, 135, 135), + (135, 135, 175), + (135, 135, 215), + (135, 135, 255), + (135, 175, 0), + (135, 175, 95), + (135, 175, 135), + (135, 175, 175), + (135, 175, 215), + (135, 175, 255), + (135, 215, 0), + (135, 215, 95), + (135, 215, 135), + (135, 215, 175), + (135, 215, 215), + (135, 215, 255), + (135, 255, 0), + (135, 255, 95), + (135, 255, 135), + (135, 255, 175), + (135, 255, 215), + (135, 255, 255), + (175, 0, 0), + (175, 0, 95), + (175, 0, 135), + (175, 0, 175), + (175, 0, 215), + (175, 0, 255), + (175, 95, 0), + (175, 95, 95), + (175, 95, 135), + (175, 95, 175), + (175, 95, 215), + (175, 95, 255), + (175, 135, 0), + (175, 135, 95), + (175, 135, 135), + (175, 135, 175), + (175, 135, 215), + (175, 135, 255), + (175, 175, 0), + (175, 175, 95), + (175, 175, 135), + (175, 175, 175), + (175, 175, 215), + (175, 175, 255), + (175, 215, 0), + (175, 215, 95), + (175, 215, 135), + (175, 215, 175), + (175, 215, 215), + (175, 215, 255), + (175, 255, 0), + (175, 255, 95), + (175, 255, 135), + (175, 255, 175), + (175, 255, 215), + (175, 255, 255), + (215, 0, 0), + (215, 0, 95), + (215, 0, 135), + (215, 0, 175), + (215, 0, 215), + (215, 0, 255), + (215, 95, 0), + (215, 95, 95), + (215, 95, 135), + (215, 95, 175), + (215, 95, 215), + (215, 95, 255), + (215, 135, 0), + (215, 135, 95), + (215, 135, 135), + (215, 135, 175), + (215, 135, 215), + (215, 135, 255), + (215, 175, 0), + (215, 175, 95), + (215, 175, 135), + (215, 175, 175), + (215, 175, 215), + (215, 175, 255), + (215, 215, 0), + (215, 215, 95), + (215, 215, 135), + (215, 215, 175), + (215, 215, 215), + (215, 215, 255), + (215, 255, 0), + (215, 255, 95), + (215, 255, 135), + (215, 255, 175), + (215, 255, 215), + (215, 255, 255), + (255, 0, 0), + (255, 0, 95), + (255, 0, 135), + (255, 0, 175), + (255, 0, 215), + (255, 0, 255), + (255, 95, 0), + (255, 95, 95), + (255, 95, 135), + (255, 95, 175), + (255, 95, 215), + (255, 95, 255), + (255, 135, 0), + (255, 135, 95), + (255, 135, 135), + (255, 135, 175), + (255, 135, 215), + (255, 135, 255), + (255, 175, 0), + (255, 175, 95), + (255, 175, 135), + (255, 175, 175), + (255, 175, 215), + (255, 175, 255), + (255, 215, 0), + (255, 215, 95), + (255, 215, 135), + (255, 215, 175), + (255, 215, 215), + (255, 215, 255), + (255, 255, 0), + (255, 255, 95), + (255, 255, 135), + (255, 255, 175), + (255, 255, 215), + (255, 255, 255), + (8, 8, 8), + (18, 18, 18), + (28, 28, 28), + (38, 38, 38), + (48, 48, 48), + (58, 58, 58), + (68, 68, 68), + (78, 78, 78), + (88, 88, 88), + (98, 98, 98), + (108, 108, 108), + (118, 118, 118), + (128, 128, 128), + (138, 138, 138), + (148, 148, 148), + (158, 158, 158), + (168, 168, 168), + (178, 178, 178), + (188, 188, 188), + (198, 198, 198), + (208, 208, 208), + (218, 218, 218), + (228, 228, 228), + (238, 238, 238), + ] +) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/_spinners.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_spinners.py new file mode 100644 index 0000000000000000000000000000000000000000..d0bb1fe751677f0ee83fc6bb876ed72443fdcde7 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_spinners.py @@ -0,0 +1,482 @@ +""" +Spinners are from: +* cli-spinners: + MIT License + Copyright (c) Sindre Sorhus (sindresorhus.com) + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights to + use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, + INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE + FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + IN THE SOFTWARE. +""" + +SPINNERS = { + "dots": { + "interval": 80, + "frames": "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏", + }, + "dots2": {"interval": 80, "frames": "⣾⣽⣻⢿⡿⣟⣯⣷"}, + "dots3": { + "interval": 80, + "frames": "⠋⠙⠚⠞⠖⠦⠴⠲⠳⠓", + }, + "dots4": { + "interval": 80, + "frames": "⠄⠆⠇⠋⠙⠸⠰⠠⠰⠸⠙⠋⠇⠆", + }, + "dots5": { + "interval": 80, + "frames": "⠋⠙⠚⠒⠂⠂⠒⠲⠴⠦⠖⠒⠐⠐⠒⠓⠋", + }, + "dots6": { + "interval": 80, + "frames": "⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠴⠲⠒⠂⠂⠒⠚⠙⠉⠁", + }, + "dots7": { + "interval": 80, + "frames": "⠈⠉⠋⠓⠒⠐⠐⠒⠖⠦⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈", + }, + "dots8": { + "interval": 80, + "frames": "⠁⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈⠈", + }, + "dots9": {"interval": 80, "frames": "⢹⢺⢼⣸⣇⡧⡗⡏"}, + "dots10": {"interval": 80, "frames": "⢄⢂⢁⡁⡈⡐⡠"}, + "dots11": {"interval": 100, "frames": "⠁⠂⠄⡀⢀⠠⠐⠈"}, + "dots12": { + "interval": 80, + "frames": [ + "⢀⠀", + "⡀⠀", + "⠄⠀", + "⢂⠀", + "⡂⠀", + "⠅⠀", + "⢃⠀", + "⡃⠀", + "⠍⠀", + "⢋⠀", + "⡋⠀", + "⠍⠁", + "⢋⠁", + "⡋⠁", + "⠍⠉", + "⠋⠉", + "⠋⠉", + "⠉⠙", + "⠉⠙", + "⠉⠩", + "⠈⢙", + "⠈⡙", + "⢈⠩", + "⡀⢙", + "⠄⡙", + "⢂⠩", + "⡂⢘", + "⠅⡘", + "⢃⠨", + "⡃⢐", + "⠍⡐", + "⢋⠠", + "⡋⢀", + "⠍⡁", + "⢋⠁", + "⡋⠁", + "⠍⠉", + "⠋⠉", + "⠋⠉", + "⠉⠙", + "⠉⠙", + "⠉⠩", + "⠈⢙", + "⠈⡙", + "⠈⠩", + "⠀⢙", + "⠀⡙", + "⠀⠩", + "⠀⢘", + "⠀⡘", + "⠀⠨", + "⠀⢐", + "⠀⡐", + "⠀⠠", + "⠀⢀", + "⠀⡀", + ], + }, + "dots8Bit": { + "interval": 80, + "frames": "⠀⠁⠂⠃⠄⠅⠆⠇⡀⡁⡂⡃⡄⡅⡆⡇⠈⠉⠊⠋⠌⠍⠎⠏⡈⡉⡊⡋⡌⡍⡎⡏⠐⠑⠒⠓⠔⠕⠖⠗⡐⡑⡒⡓⡔⡕⡖⡗⠘⠙⠚⠛⠜⠝⠞⠟⡘⡙" + "⡚⡛⡜⡝⡞⡟⠠⠡⠢⠣⠤⠥⠦⠧⡠⡡⡢⡣⡤⡥⡦⡧⠨⠩⠪⠫⠬⠭⠮⠯⡨⡩⡪⡫⡬⡭⡮⡯⠰⠱⠲⠳⠴⠵⠶⠷⡰⡱⡲⡳⡴⡵⡶⡷⠸⠹⠺⠻" + "⠼⠽⠾⠿⡸⡹⡺⡻⡼⡽⡾⡿⢀⢁⢂⢃⢄⢅⢆⢇⣀⣁⣂⣃⣄⣅⣆⣇⢈⢉⢊⢋⢌⢍⢎⢏⣈⣉⣊⣋⣌⣍⣎⣏⢐⢑⢒⢓⢔⢕⢖⢗⣐⣑⣒⣓⣔⣕" + "⣖⣗⢘⢙⢚⢛⢜⢝⢞⢟⣘⣙⣚⣛⣜⣝⣞⣟⢠⢡⢢⢣⢤⢥⢦⢧⣠⣡⣢⣣⣤⣥⣦⣧⢨⢩⢪⢫⢬⢭⢮⢯⣨⣩⣪⣫⣬⣭⣮⣯⢰⢱⢲⢳⢴⢵⢶⢷" + "⣰⣱⣲⣳⣴⣵⣶⣷⢸⢹⢺⢻⢼⢽⢾⢿⣸⣹⣺⣻⣼⣽⣾⣿", + }, + "line": {"interval": 130, "frames": ["-", "\\", "|", "/"]}, + "line2": {"interval": 100, "frames": "⠂-–—–-"}, + "pipe": {"interval": 100, "frames": "┤┘┴└├┌┬┐"}, + "simpleDots": {"interval": 400, "frames": [". ", ".. ", "...", " "]}, + "simpleDotsScrolling": { + "interval": 200, + "frames": [". ", ".. ", "...", " ..", " .", " "], + }, + "star": {"interval": 70, "frames": "✶✸✹✺✹✷"}, + "star2": {"interval": 80, "frames": "+x*"}, + "flip": { + "interval": 70, + "frames": "___-``'´-___", + }, + "hamburger": {"interval": 100, "frames": "☱☲☴"}, + "growVertical": { + "interval": 120, + "frames": "▁▃▄▅▆▇▆▅▄▃", + }, + "growHorizontal": { + "interval": 120, + "frames": "▏▎▍▌▋▊▉▊▋▌▍▎", + }, + "balloon": {"interval": 140, "frames": " .oO@* "}, + "balloon2": {"interval": 120, "frames": ".oO°Oo."}, + "noise": {"interval": 100, "frames": "▓▒░"}, + "bounce": {"interval": 120, "frames": "⠁⠂⠄⠂"}, + "boxBounce": {"interval": 120, "frames": "▖▘▝▗"}, + "boxBounce2": {"interval": 100, "frames": "▌▀▐▄"}, + "triangle": {"interval": 50, "frames": "◢◣◤◥"}, + "arc": {"interval": 100, "frames": "◜◠◝◞◡◟"}, + "circle": {"interval": 120, "frames": "◡⊙◠"}, + "squareCorners": {"interval": 180, "frames": "◰◳◲◱"}, + "circleQuarters": {"interval": 120, "frames": "◴◷◶◵"}, + "circleHalves": {"interval": 50, "frames": "◐◓◑◒"}, + "squish": {"interval": 100, "frames": "╫╪"}, + "toggle": {"interval": 250, "frames": "⊶⊷"}, + "toggle2": {"interval": 80, "frames": "▫▪"}, + "toggle3": {"interval": 120, "frames": "□■"}, + "toggle4": {"interval": 100, "frames": "■□▪▫"}, + "toggle5": {"interval": 100, "frames": "▮▯"}, + "toggle6": {"interval": 300, "frames": "ဝ၀"}, + "toggle7": {"interval": 80, "frames": "⦾⦿"}, + "toggle8": {"interval": 100, "frames": "◍◌"}, + "toggle9": {"interval": 100, "frames": "◉◎"}, + "toggle10": {"interval": 100, "frames": "㊂㊀㊁"}, + "toggle11": {"interval": 50, "frames": "⧇⧆"}, + "toggle12": {"interval": 120, "frames": "☗☖"}, + "toggle13": {"interval": 80, "frames": "=*-"}, + "arrow": {"interval": 100, "frames": "←↖↑↗→↘↓↙"}, + "arrow2": { + "interval": 80, + "frames": ["⬆️ ", "↗️ ", "➡️ ", "↘️ ", "⬇️ ", "↙️ ", "⬅️ ", "↖️ "], + }, + "arrow3": { + "interval": 120, + "frames": ["▹▹▹▹▹", "▸▹▹▹▹", "▹▸▹▹▹", "▹▹▸▹▹", "▹▹▹▸▹", "▹▹▹▹▸"], + }, + "bouncingBar": { + "interval": 80, + "frames": [ + "[ ]", + "[= ]", + "[== ]", + "[=== ]", + "[ ===]", + "[ ==]", + "[ =]", + "[ ]", + "[ =]", + "[ ==]", + "[ ===]", + "[====]", + "[=== ]", + "[== ]", + "[= ]", + ], + }, + "bouncingBall": { + "interval": 80, + "frames": [ + "( ● )", + "( ● )", + "( ● )", + "( ● )", + "( ●)", + "( ● )", + "( ● )", + "( ● )", + "( ● )", + "(● )", + ], + }, + "smiley": {"interval": 200, "frames": ["😄 ", "😝 "]}, + "monkey": {"interval": 300, "frames": ["🙈 ", "🙈 ", "🙉 ", "🙊 "]}, + "hearts": {"interval": 100, "frames": ["💛 ", "💙 ", "💜 ", "💚 ", "❤️ "]}, + "clock": { + "interval": 100, + "frames": [ + "🕛 ", + "🕐 ", + "🕑 ", + "🕒 ", + "🕓 ", + "🕔 ", + "🕕 ", + "🕖 ", + "🕗 ", + "🕘 ", + "🕙 ", + "🕚 ", + ], + }, + "earth": {"interval": 180, "frames": ["🌍 ", "🌎 ", "🌏 "]}, + "material": { + "interval": 17, + "frames": [ + "█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "███▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "████▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "███████▁▁▁▁▁▁▁▁▁▁▁▁▁", + "████████▁▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "██████████▁▁▁▁▁▁▁▁▁▁", + "███████████▁▁▁▁▁▁▁▁▁", + "█████████████▁▁▁▁▁▁▁", + "██████████████▁▁▁▁▁▁", + "██████████████▁▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁▁██████████████▁▁▁▁", + "▁▁▁██████████████▁▁▁", + "▁▁▁▁█████████████▁▁▁", + "▁▁▁▁██████████████▁▁", + "▁▁▁▁██████████████▁▁", + "▁▁▁▁▁██████████████▁", + "▁▁▁▁▁██████████████▁", + "▁▁▁▁▁██████████████▁", + "▁▁▁▁▁▁██████████████", + "▁▁▁▁▁▁██████████████", + "▁▁▁▁▁▁▁█████████████", + "▁▁▁▁▁▁▁█████████████", + "▁▁▁▁▁▁▁▁████████████", + "▁▁▁▁▁▁▁▁████████████", + "▁▁▁▁▁▁▁▁▁███████████", + "▁▁▁▁▁▁▁▁▁███████████", + "▁▁▁▁▁▁▁▁▁▁██████████", + "▁▁▁▁▁▁▁▁▁▁██████████", + "▁▁▁▁▁▁▁▁▁▁▁▁████████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁███████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁██████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████", + "█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████", + "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "███▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "████▁▁▁▁▁▁▁▁▁▁▁▁▁▁██", + "█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "██████▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "████████▁▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "███████████▁▁▁▁▁▁▁▁▁", + "████████████▁▁▁▁▁▁▁▁", + "████████████▁▁▁▁▁▁▁▁", + "██████████████▁▁▁▁▁▁", + "██████████████▁▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁▁▁█████████████▁▁▁▁", + "▁▁▁▁▁████████████▁▁▁", + "▁▁▁▁▁████████████▁▁▁", + "▁▁▁▁▁▁███████████▁▁▁", + "▁▁▁▁▁▁▁▁█████████▁▁▁", + "▁▁▁▁▁▁▁▁█████████▁▁▁", + "▁▁▁▁▁▁▁▁▁█████████▁▁", + "▁▁▁▁▁▁▁▁▁█████████▁▁", + "▁▁▁▁▁▁▁▁▁▁█████████▁", + "▁▁▁▁▁▁▁▁▁▁▁████████▁", + "▁▁▁▁▁▁▁▁▁▁▁████████▁", + "▁▁▁▁▁▁▁▁▁▁▁▁███████▁", + "▁▁▁▁▁▁▁▁▁▁▁▁███████▁", + "▁▁▁▁▁▁▁▁▁▁▁▁▁███████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁███████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + ], + }, + "moon": { + "interval": 80, + "frames": ["🌑 ", "🌒 ", "🌓 ", "🌔 ", "🌕 ", "🌖 ", "🌗 ", "🌘 "], + }, + "runner": {"interval": 140, "frames": ["🚶 ", "🏃 "]}, + "pong": { + "interval": 80, + "frames": [ + "▐⠂ ▌", + "▐⠈ ▌", + "▐ ⠂ ▌", + "▐ ⠠ ▌", + "▐ ⡀ ▌", + "▐ ⠠ ▌", + "▐ ⠂ ▌", + "▐ ⠈ ▌", + "▐ ⠂ ▌", + "▐ ⠠ ▌", + "▐ ⡀ ▌", + "▐ ⠠ ▌", + "▐ ⠂ ▌", + "▐ ⠈ ▌", + "▐ ⠂▌", + "▐ ⠠▌", + "▐ ⡀▌", + "▐ ⠠ ▌", + "▐ ⠂ ▌", + "▐ ⠈ ▌", + "▐ ⠂ ▌", + "▐ ⠠ ▌", + "▐ ⡀ ▌", + "▐ ⠠ ▌", + "▐ ⠂ ▌", + "▐ ⠈ ▌", + "▐ ⠂ ▌", + "▐ ⠠ ▌", + "▐ ⡀ ▌", + "▐⠠ ▌", + ], + }, + "shark": { + "interval": 120, + "frames": [ + "▐|\\____________▌", + "▐_|\\___________▌", + "▐__|\\__________▌", + "▐___|\\_________▌", + "▐____|\\________▌", + "▐_____|\\_______▌", + "▐______|\\______▌", + "▐_______|\\_____▌", + "▐________|\\____▌", + "▐_________|\\___▌", + "▐__________|\\__▌", + "▐___________|\\_▌", + "▐____________|\\▌", + "▐____________/|▌", + "▐___________/|_▌", + "▐__________/|__▌", + "▐_________/|___▌", + "▐________/|____▌", + "▐_______/|_____▌", + "▐______/|______▌", + "▐_____/|_______▌", + "▐____/|________▌", + "▐___/|_________▌", + "▐__/|__________▌", + "▐_/|___________▌", + "▐/|____________▌", + ], + }, + "dqpb": {"interval": 100, "frames": "dqpb"}, + "weather": { + "interval": 100, + "frames": [ + "☀️ ", + "☀️ ", + "☀️ ", + "🌤 ", + "⛅️ ", + "🌥 ", + "☁️ ", + "🌧 ", + "🌨 ", + "🌧 ", + "🌨 ", + "🌧 ", + "🌨 ", + "⛈ ", + "🌨 ", + "🌧 ", + "🌨 ", + "☁️ ", + "🌥 ", + "⛅️ ", + "🌤 ", + "☀️ ", + "☀️ ", + ], + }, + "christmas": {"interval": 400, "frames": "🌲🎄"}, + "grenade": { + "interval": 80, + "frames": [ + "، ", + "′ ", + " ´ ", + " ‾ ", + " ⸌", + " ⸊", + " |", + " ⁎", + " ⁕", + " ෴ ", + " ⁓", + " ", + " ", + " ", + ], + }, + "point": {"interval": 125, "frames": ["∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"]}, + "layer": {"interval": 150, "frames": "-=≡"}, + "betaWave": { + "interval": 80, + "frames": [ + "ρββββββ", + "βρβββββ", + "ββρββββ", + "βββρβββ", + "ββββρββ", + "βββββρβ", + "ββββββρ", + ], + }, + "aesthetic": { + "interval": 80, + "frames": [ + "▰▱▱▱▱▱▱", + "▰▰▱▱▱▱▱", + "▰▰▰▱▱▱▱", + "▰▰▰▰▱▱▱", + "▰▰▰▰▰▱▱", + "▰▰▰▰▰▰▱", + "▰▰▰▰▰▰▰", + "▰▱▱▱▱▱▱", + ], + }, +} diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/_stack.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..194564e761ddae165b39ef6598877e2e3820af0a --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/_stack.py @@ -0,0 +1,16 @@ +from typing import List, TypeVar + +T = TypeVar("T") + + +class Stack(List[T]): + """A small shim over builtin list.""" + + @property + def top(self) -> T: + """Get top of stack.""" + return self[-1] + + def push(self, item: T) -> None: + """Push an item on to the stack (append in stack nomenclature).""" + self.append(item) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py new file mode 100644 index 0000000000000000000000000000000000000000..7de86ce5043feeee4b6c28302cc1e72c2ad4cfe1 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py @@ -0,0 +1,241 @@ +import re +import sys +from contextlib import suppress +from typing import Iterable, NamedTuple, Optional + +from .color import Color +from .style import Style +from .text import Text + +re_ansi = re.compile( + r""" +(?:\x1b[0-?])| +(?:\x1b\](.*?)\x1b\\)| +(?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~])) +""", + re.VERBOSE, +) + + +class _AnsiToken(NamedTuple): + """Result of ansi tokenized string.""" + + plain: str = "" + sgr: Optional[str] = "" + osc: Optional[str] = "" + + +def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]: + """Tokenize a string in to plain text and ANSI codes. + + Args: + ansi_text (str): A String containing ANSI codes. + + Yields: + AnsiToken: A named tuple of (plain, sgr, osc) + """ + + position = 0 + sgr: Optional[str] + osc: Optional[str] + for match in re_ansi.finditer(ansi_text): + start, end = match.span(0) + osc, sgr = match.groups() + if start > position: + yield _AnsiToken(ansi_text[position:start]) + if sgr: + if sgr == "(": + position = end + 1 + continue + if sgr.endswith("m"): + yield _AnsiToken("", sgr[1:-1], osc) + else: + yield _AnsiToken("", sgr, osc) + position = end + if position < len(ansi_text): + yield _AnsiToken(ansi_text[position:]) + + +SGR_STYLE_MAP = { + 1: "bold", + 2: "dim", + 3: "italic", + 4: "underline", + 5: "blink", + 6: "blink2", + 7: "reverse", + 8: "conceal", + 9: "strike", + 21: "underline2", + 22: "not dim not bold", + 23: "not italic", + 24: "not underline", + 25: "not blink", + 26: "not blink2", + 27: "not reverse", + 28: "not conceal", + 29: "not strike", + 30: "color(0)", + 31: "color(1)", + 32: "color(2)", + 33: "color(3)", + 34: "color(4)", + 35: "color(5)", + 36: "color(6)", + 37: "color(7)", + 39: "default", + 40: "on color(0)", + 41: "on color(1)", + 42: "on color(2)", + 43: "on color(3)", + 44: "on color(4)", + 45: "on color(5)", + 46: "on color(6)", + 47: "on color(7)", + 49: "on default", + 51: "frame", + 52: "encircle", + 53: "overline", + 54: "not frame not encircle", + 55: "not overline", + 90: "color(8)", + 91: "color(9)", + 92: "color(10)", + 93: "color(11)", + 94: "color(12)", + 95: "color(13)", + 96: "color(14)", + 97: "color(15)", + 100: "on color(8)", + 101: "on color(9)", + 102: "on color(10)", + 103: "on color(11)", + 104: "on color(12)", + 105: "on color(13)", + 106: "on color(14)", + 107: "on color(15)", +} + + +class AnsiDecoder: + """Translate ANSI code in to styled Text.""" + + def __init__(self) -> None: + self.style = Style.null() + + def decode(self, terminal_text: str) -> Iterable[Text]: + """Decode ANSI codes in an iterable of lines. + + Args: + lines (Iterable[str]): An iterable of lines of terminal output. + + Yields: + Text: Marked up Text. + """ + for line in terminal_text.splitlines(): + yield self.decode_line(line) + + def decode_line(self, line: str) -> Text: + """Decode a line containing ansi codes. + + Args: + line (str): A line of terminal output. + + Returns: + Text: A Text instance marked up according to ansi codes. + """ + from_ansi = Color.from_ansi + from_rgb = Color.from_rgb + _Style = Style + text = Text() + append = text.append + line = line.rsplit("\r", 1)[-1] + for plain_text, sgr, osc in _ansi_tokenize(line): + if plain_text: + append(plain_text, self.style or None) + elif osc is not None: + if osc.startswith("8;"): + _params, semicolon, link = osc[2:].partition(";") + if semicolon: + self.style = self.style.update_link(link or None) + elif sgr is not None: + # Translate in to semi-colon separated codes + # Ignore invalid codes, because we want to be lenient + codes = [ + min(255, int(_code) if _code else 0) + for _code in sgr.split(";") + if _code.isdigit() or _code == "" + ] + iter_codes = iter(codes) + for code in iter_codes: + if code == 0: + # reset + self.style = _Style.null() + elif code in SGR_STYLE_MAP: + # styles + self.style += _Style.parse(SGR_STYLE_MAP[code]) + elif code == 38: + #  Foreground + with suppress(StopIteration): + color_type = next(iter_codes) + if color_type == 5: + self.style += _Style.from_color( + from_ansi(next(iter_codes)) + ) + elif color_type == 2: + self.style += _Style.from_color( + from_rgb( + next(iter_codes), + next(iter_codes), + next(iter_codes), + ) + ) + elif code == 48: + # Background + with suppress(StopIteration): + color_type = next(iter_codes) + if color_type == 5: + self.style += _Style.from_color( + None, from_ansi(next(iter_codes)) + ) + elif color_type == 2: + self.style += _Style.from_color( + None, + from_rgb( + next(iter_codes), + next(iter_codes), + next(iter_codes), + ), + ) + + return text + + +if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover + import io + import os + import pty + import sys + + decoder = AnsiDecoder() + + stdout = io.BytesIO() + + def read(fd: int) -> bytes: + data = os.read(fd, 1024) + stdout.write(data) + return data + + pty.spawn(sys.argv[1:], read) + + from .console import Console + + console = Console(record=True) + + stdout_result = stdout.getvalue().decode("utf-8") + print(stdout_result) + + for line in decoder.decode(stdout_result): + console.print(line) + + console.save_html("stdout.html") diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/console.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/console.py new file mode 100644 index 0000000000000000000000000000000000000000..572884542a17c149f4bd8c1ea8af9e42f4f3633a --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/console.py @@ -0,0 +1,2661 @@ +import inspect +import os +import sys +import threading +import zlib +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from datetime import datetime +from functools import wraps +from getpass import getpass +from html import escape +from inspect import isclass +from itertools import islice +from math import ceil +from time import monotonic +from types import FrameType, ModuleType, TracebackType +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Mapping, + NamedTuple, + Optional, + TextIO, + Tuple, + Type, + Union, + cast, +) + +from pip._vendor.rich._null_file import NULL_FILE + +if sys.version_info >= (3, 8): + from typing import Literal, Protocol, runtime_checkable +else: + from pip._vendor.typing_extensions import ( + Literal, + Protocol, + runtime_checkable, + ) # pragma: no cover + +from . import errors, themes +from ._emoji_replace import _emoji_replace +from ._export_format import CONSOLE_HTML_FORMAT, CONSOLE_SVG_FORMAT +from ._fileno import get_fileno +from ._log_render import FormatTimeCallable, LogRender +from .align import Align, AlignMethod +from .color import ColorSystem, blend_rgb +from .control import Control +from .emoji import EmojiVariant +from .highlighter import NullHighlighter, ReprHighlighter +from .markup import render as render_markup +from .measure import Measurement, measure_renderables +from .pager import Pager, SystemPager +from .pretty import Pretty, is_expandable +from .protocol import rich_cast +from .region import Region +from .scope import render_scope +from .screen import Screen +from .segment import Segment +from .style import Style, StyleType +from .styled import Styled +from .terminal_theme import DEFAULT_TERMINAL_THEME, SVG_EXPORT_THEME, TerminalTheme +from .text import Text, TextType +from .theme import Theme, ThemeStack + +if TYPE_CHECKING: + from ._windows import WindowsConsoleFeatures + from .live import Live + from .status import Status + +JUPYTER_DEFAULT_COLUMNS = 115 +JUPYTER_DEFAULT_LINES = 100 +WINDOWS = sys.platform == "win32" + +HighlighterType = Callable[[Union[str, "Text"]], "Text"] +JustifyMethod = Literal["default", "left", "center", "right", "full"] +OverflowMethod = Literal["fold", "crop", "ellipsis", "ignore"] + + +class NoChange: + pass + + +NO_CHANGE = NoChange() + +try: + _STDIN_FILENO = sys.__stdin__.fileno() # type: ignore[union-attr] +except Exception: + _STDIN_FILENO = 0 +try: + _STDOUT_FILENO = sys.__stdout__.fileno() # type: ignore[union-attr] +except Exception: + _STDOUT_FILENO = 1 +try: + _STDERR_FILENO = sys.__stderr__.fileno() # type: ignore[union-attr] +except Exception: + _STDERR_FILENO = 2 + +_STD_STREAMS = (_STDIN_FILENO, _STDOUT_FILENO, _STDERR_FILENO) +_STD_STREAMS_OUTPUT = (_STDOUT_FILENO, _STDERR_FILENO) + + +_TERM_COLORS = { + "kitty": ColorSystem.EIGHT_BIT, + "256color": ColorSystem.EIGHT_BIT, + "16color": ColorSystem.STANDARD, +} + + +class ConsoleDimensions(NamedTuple): + """Size of the terminal.""" + + width: int + """The width of the console in 'cells'.""" + height: int + """The height of the console in lines.""" + + +@dataclass +class ConsoleOptions: + """Options for __rich_console__ method.""" + + size: ConsoleDimensions + """Size of console.""" + legacy_windows: bool + """legacy_windows: flag for legacy windows.""" + min_width: int + """Minimum width of renderable.""" + max_width: int + """Maximum width of renderable.""" + is_terminal: bool + """True if the target is a terminal, otherwise False.""" + encoding: str + """Encoding of terminal.""" + max_height: int + """Height of container (starts as terminal)""" + justify: Optional[JustifyMethod] = None + """Justify value override for renderable.""" + overflow: Optional[OverflowMethod] = None + """Overflow value override for renderable.""" + no_wrap: Optional[bool] = False + """Disable wrapping for text.""" + highlight: Optional[bool] = None + """Highlight override for render_str.""" + markup: Optional[bool] = None + """Enable markup when rendering strings.""" + height: Optional[int] = None + + @property + def ascii_only(self) -> bool: + """Check if renderables should use ascii only.""" + return not self.encoding.startswith("utf") + + def copy(self) -> "ConsoleOptions": + """Return a copy of the options. + + Returns: + ConsoleOptions: a copy of self. + """ + options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions) + options.__dict__ = self.__dict__.copy() + return options + + def update( + self, + *, + width: Union[int, NoChange] = NO_CHANGE, + min_width: Union[int, NoChange] = NO_CHANGE, + max_width: Union[int, NoChange] = NO_CHANGE, + justify: Union[Optional[JustifyMethod], NoChange] = NO_CHANGE, + overflow: Union[Optional[OverflowMethod], NoChange] = NO_CHANGE, + no_wrap: Union[Optional[bool], NoChange] = NO_CHANGE, + highlight: Union[Optional[bool], NoChange] = NO_CHANGE, + markup: Union[Optional[bool], NoChange] = NO_CHANGE, + height: Union[Optional[int], NoChange] = NO_CHANGE, + ) -> "ConsoleOptions": + """Update values, return a copy.""" + options = self.copy() + if not isinstance(width, NoChange): + options.min_width = options.max_width = max(0, width) + if not isinstance(min_width, NoChange): + options.min_width = min_width + if not isinstance(max_width, NoChange): + options.max_width = max_width + if not isinstance(justify, NoChange): + options.justify = justify + if not isinstance(overflow, NoChange): + options.overflow = overflow + if not isinstance(no_wrap, NoChange): + options.no_wrap = no_wrap + if not isinstance(highlight, NoChange): + options.highlight = highlight + if not isinstance(markup, NoChange): + options.markup = markup + if not isinstance(height, NoChange): + if height is not None: + options.max_height = height + options.height = None if height is None else max(0, height) + return options + + def update_width(self, width: int) -> "ConsoleOptions": + """Update just the width, return a copy. + + Args: + width (int): New width (sets both min_width and max_width) + + Returns: + ~ConsoleOptions: New console options instance. + """ + options = self.copy() + options.min_width = options.max_width = max(0, width) + return options + + def update_height(self, height: int) -> "ConsoleOptions": + """Update the height, and return a copy. + + Args: + height (int): New height + + Returns: + ~ConsoleOptions: New Console options instance. + """ + options = self.copy() + options.max_height = options.height = height + return options + + def reset_height(self) -> "ConsoleOptions": + """Return a copy of the options with height set to ``None``. + + Returns: + ~ConsoleOptions: New console options instance. + """ + options = self.copy() + options.height = None + return options + + def update_dimensions(self, width: int, height: int) -> "ConsoleOptions": + """Update the width and height, and return a copy. + + Args: + width (int): New width (sets both min_width and max_width). + height (int): New height. + + Returns: + ~ConsoleOptions: New console options instance. + """ + options = self.copy() + options.min_width = options.max_width = max(0, width) + options.height = options.max_height = height + return options + + +@runtime_checkable +class RichCast(Protocol): + """An object that may be 'cast' to a console renderable.""" + + def __rich__( + self, + ) -> Union["ConsoleRenderable", "RichCast", str]: # pragma: no cover + ... + + +@runtime_checkable +class ConsoleRenderable(Protocol): + """An object that supports the console protocol.""" + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": # pragma: no cover + ... + + +# A type that may be rendered by Console. +RenderableType = Union[ConsoleRenderable, RichCast, str] +"""A string or any object that may be rendered by Rich.""" + +# The result of calling a __rich_console__ method. +RenderResult = Iterable[Union[RenderableType, Segment]] + +_null_highlighter = NullHighlighter() + + +class CaptureError(Exception): + """An error in the Capture context manager.""" + + +class NewLine: + """A renderable to generate new line(s)""" + + def __init__(self, count: int = 1) -> None: + self.count = count + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> Iterable[Segment]: + yield Segment("\n" * self.count) + + +class ScreenUpdate: + """Render a list of lines at a given offset.""" + + def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None: + self._lines = lines + self.x = x + self.y = y + + def __rich_console__( + self, console: "Console", options: ConsoleOptions + ) -> RenderResult: + x = self.x + move_to = Control.move_to + for offset, line in enumerate(self._lines, self.y): + yield move_to(x, offset) + yield from line + + +class Capture: + """Context manager to capture the result of printing to the console. + See :meth:`~rich.console.Console.capture` for how to use. + + Args: + console (Console): A console instance to capture output. + """ + + def __init__(self, console: "Console") -> None: + self._console = console + self._result: Optional[str] = None + + def __enter__(self) -> "Capture": + self._console.begin_capture() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self._result = self._console.end_capture() + + def get(self) -> str: + """Get the result of the capture.""" + if self._result is None: + raise CaptureError( + "Capture result is not available until context manager exits." + ) + return self._result + + +class ThemeContext: + """A context manager to use a temporary theme. See :meth:`~rich.console.Console.use_theme` for usage.""" + + def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None: + self.console = console + self.theme = theme + self.inherit = inherit + + def __enter__(self) -> "ThemeContext": + self.console.push_theme(self.theme) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.console.pop_theme() + + +class PagerContext: + """A context manager that 'pages' content. See :meth:`~rich.console.Console.pager` for usage.""" + + def __init__( + self, + console: "Console", + pager: Optional[Pager] = None, + styles: bool = False, + links: bool = False, + ) -> None: + self._console = console + self.pager = SystemPager() if pager is None else pager + self.styles = styles + self.links = links + + def __enter__(self) -> "PagerContext": + self._console._enter_buffer() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if exc_type is None: + with self._console._lock: + buffer: List[Segment] = self._console._buffer[:] + del self._console._buffer[:] + segments: Iterable[Segment] = buffer + if not self.styles: + segments = Segment.strip_styles(segments) + elif not self.links: + segments = Segment.strip_links(segments) + content = self._console._render_buffer(segments) + self.pager.show(content) + self._console._exit_buffer() + + +class ScreenContext: + """A context manager that enables an alternative screen. See :meth:`~rich.console.Console.screen` for usage.""" + + def __init__( + self, console: "Console", hide_cursor: bool, style: StyleType = "" + ) -> None: + self.console = console + self.hide_cursor = hide_cursor + self.screen = Screen(style=style) + self._changed = False + + def update( + self, *renderables: RenderableType, style: Optional[StyleType] = None + ) -> None: + """Update the screen. + + Args: + renderable (RenderableType, optional): Optional renderable to replace current renderable, + or None for no change. Defaults to None. + style: (Style, optional): Replacement style, or None for no change. Defaults to None. + """ + if renderables: + self.screen.renderable = ( + Group(*renderables) if len(renderables) > 1 else renderables[0] + ) + if style is not None: + self.screen.style = style + self.console.print(self.screen, end="") + + def __enter__(self) -> "ScreenContext": + self._changed = self.console.set_alt_screen(True) + if self._changed and self.hide_cursor: + self.console.show_cursor(False) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if self._changed: + self.console.set_alt_screen(False) + if self.hide_cursor: + self.console.show_cursor(True) + + +class Group: + """Takes a group of renderables and returns a renderable object that renders the group. + + Args: + renderables (Iterable[RenderableType]): An iterable of renderable objects. + fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. + """ + + def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None: + self._renderables = renderables + self.fit = fit + self._render: Optional[List[RenderableType]] = None + + @property + def renderables(self) -> List["RenderableType"]: + if self._render is None: + self._render = list(self._renderables) + return self._render + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + if self.fit: + return measure_renderables(console, options, self.renderables) + else: + return Measurement(options.max_width, options.max_width) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> RenderResult: + yield from self.renderables + + +def group(fit: bool = True) -> Callable[..., Callable[..., Group]]: + """A decorator that turns an iterable of renderables in to a group. + + Args: + fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. + """ + + def decorator( + method: Callable[..., Iterable[RenderableType]] + ) -> Callable[..., Group]: + """Convert a method that returns an iterable of renderables in to a Group.""" + + @wraps(method) + def _replace(*args: Any, **kwargs: Any) -> Group: + renderables = method(*args, **kwargs) + return Group(*renderables, fit=fit) + + return _replace + + return decorator + + +def _is_jupyter() -> bool: # pragma: no cover + """Check if we're running in a Jupyter notebook.""" + try: + get_ipython # type: ignore[name-defined] + except NameError: + return False + ipython = get_ipython() # type: ignore[name-defined] + shell = ipython.__class__.__name__ + if ( + "google.colab" in str(ipython.__class__) + or os.getenv("DATABRICKS_RUNTIME_VERSION") + or shell == "ZMQInteractiveShell" + ): + return True # Jupyter notebook or qtconsole + elif shell == "TerminalInteractiveShell": + return False # Terminal running IPython + else: + return False # Other type (?) + + +COLOR_SYSTEMS = { + "standard": ColorSystem.STANDARD, + "256": ColorSystem.EIGHT_BIT, + "truecolor": ColorSystem.TRUECOLOR, + "windows": ColorSystem.WINDOWS, +} + +_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()} + + +@dataclass +class ConsoleThreadLocals(threading.local): + """Thread local values for Console context.""" + + theme_stack: ThemeStack + buffer: List[Segment] = field(default_factory=list) + buffer_index: int = 0 + + +class RenderHook(ABC): + """Provides hooks in to the render process.""" + + @abstractmethod + def process_renderables( + self, renderables: List[ConsoleRenderable] + ) -> List[ConsoleRenderable]: + """Called with a list of objects to render. + + This method can return a new list of renderables, or modify and return the same list. + + Args: + renderables (List[ConsoleRenderable]): A number of renderable objects. + + Returns: + List[ConsoleRenderable]: A replacement list of renderables. + """ + + +_windows_console_features: Optional["WindowsConsoleFeatures"] = None + + +def get_windows_console_features() -> "WindowsConsoleFeatures": # pragma: no cover + global _windows_console_features + if _windows_console_features is not None: + return _windows_console_features + from ._windows import get_windows_console_features + + _windows_console_features = get_windows_console_features() + return _windows_console_features + + +def detect_legacy_windows() -> bool: + """Detect legacy Windows.""" + return WINDOWS and not get_windows_console_features().vt + + +class Console: + """A high level console interface. + + Args: + color_system (str, optional): The color system supported by your terminal, + either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect. + force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None. + force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None. + force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None. + soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False. + theme (Theme, optional): An optional style theme object, or ``None`` for default theme. + stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False. + file (IO, optional): A file object where the console should write to. Defaults to stdout. + quiet (bool, Optional): Boolean to suppress all output. Defaults to False. + width (int, optional): The width of the terminal. Leave as default to auto-detect width. + height (int, optional): The height of the terminal. Leave as default to auto-detect height. + style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None. + no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None. + tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8. + record (bool, optional): Boolean to enable recording of terminal output, + required to call :meth:`export_html`, :meth:`export_svg`, and :meth:`export_text`. Defaults to False. + markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True. + emoji (bool, optional): Enable emoji code. Defaults to True. + emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None. + highlight (bool, optional): Enable automatic highlighting. Defaults to True. + log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True. + log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True. + log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ". + highlighter (HighlighterType, optional): Default highlighter. + legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``. + safe_box (bool, optional): Restrict box options that don't render on legacy Windows. + get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log), + or None for datetime.now. + get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic. + """ + + _environ: Mapping[str, str] = os.environ + + def __init__( + self, + *, + color_system: Optional[ + Literal["auto", "standard", "256", "truecolor", "windows"] + ] = "auto", + force_terminal: Optional[bool] = None, + force_jupyter: Optional[bool] = None, + force_interactive: Optional[bool] = None, + soft_wrap: bool = False, + theme: Optional[Theme] = None, + stderr: bool = False, + file: Optional[IO[str]] = None, + quiet: bool = False, + width: Optional[int] = None, + height: Optional[int] = None, + style: Optional[StyleType] = None, + no_color: Optional[bool] = None, + tab_size: int = 8, + record: bool = False, + markup: bool = True, + emoji: bool = True, + emoji_variant: Optional[EmojiVariant] = None, + highlight: bool = True, + log_time: bool = True, + log_path: bool = True, + log_time_format: Union[str, FormatTimeCallable] = "[%X]", + highlighter: Optional["HighlighterType"] = ReprHighlighter(), + legacy_windows: Optional[bool] = None, + safe_box: bool = True, + get_datetime: Optional[Callable[[], datetime]] = None, + get_time: Optional[Callable[[], float]] = None, + _environ: Optional[Mapping[str, str]] = None, + ): + # Copy of os.environ allows us to replace it for testing + if _environ is not None: + self._environ = _environ + + self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter + if self.is_jupyter: + if width is None: + jupyter_columns = self._environ.get("JUPYTER_COLUMNS") + if jupyter_columns is not None and jupyter_columns.isdigit(): + width = int(jupyter_columns) + else: + width = JUPYTER_DEFAULT_COLUMNS + if height is None: + jupyter_lines = self._environ.get("JUPYTER_LINES") + if jupyter_lines is not None and jupyter_lines.isdigit(): + height = int(jupyter_lines) + else: + height = JUPYTER_DEFAULT_LINES + + self.tab_size = tab_size + self.record = record + self._markup = markup + self._emoji = emoji + self._emoji_variant: Optional[EmojiVariant] = emoji_variant + self._highlight = highlight + self.legacy_windows: bool = ( + (detect_legacy_windows() and not self.is_jupyter) + if legacy_windows is None + else legacy_windows + ) + + if width is None: + columns = self._environ.get("COLUMNS") + if columns is not None and columns.isdigit(): + width = int(columns) - self.legacy_windows + if height is None: + lines = self._environ.get("LINES") + if lines is not None and lines.isdigit(): + height = int(lines) + + self.soft_wrap = soft_wrap + self._width = width + self._height = height + + self._color_system: Optional[ColorSystem] + + self._force_terminal = None + if force_terminal is not None: + self._force_terminal = force_terminal + + self._file = file + self.quiet = quiet + self.stderr = stderr + + if color_system is None: + self._color_system = None + elif color_system == "auto": + self._color_system = self._detect_color_system() + else: + self._color_system = COLOR_SYSTEMS[color_system] + + self._lock = threading.RLock() + self._log_render = LogRender( + show_time=log_time, + show_path=log_path, + time_format=log_time_format, + ) + self.highlighter: HighlighterType = highlighter or _null_highlighter + self.safe_box = safe_box + self.get_datetime = get_datetime or datetime.now + self.get_time = get_time or monotonic + self.style = style + self.no_color = ( + no_color if no_color is not None else "NO_COLOR" in self._environ + ) + self.is_interactive = ( + (self.is_terminal and not self.is_dumb_terminal) + if force_interactive is None + else force_interactive + ) + + self._record_buffer_lock = threading.RLock() + self._thread_locals = ConsoleThreadLocals( + theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme) + ) + self._record_buffer: List[Segment] = [] + self._render_hooks: List[RenderHook] = [] + self._live: Optional["Live"] = None + self._is_alt_screen = False + + def __repr__(self) -> str: + return f"" + + @property + def file(self) -> IO[str]: + """Get the file object to write to.""" + file = self._file or (sys.stderr if self.stderr else sys.stdout) + file = getattr(file, "rich_proxied_file", file) + if file is None: + file = NULL_FILE + return file + + @file.setter + def file(self, new_file: IO[str]) -> None: + """Set a new file object.""" + self._file = new_file + + @property + def _buffer(self) -> List[Segment]: + """Get a thread local buffer.""" + return self._thread_locals.buffer + + @property + def _buffer_index(self) -> int: + """Get a thread local buffer.""" + return self._thread_locals.buffer_index + + @_buffer_index.setter + def _buffer_index(self, value: int) -> None: + self._thread_locals.buffer_index = value + + @property + def _theme_stack(self) -> ThemeStack: + """Get the thread local theme stack.""" + return self._thread_locals.theme_stack + + def _detect_color_system(self) -> Optional[ColorSystem]: + """Detect color system from env vars.""" + if self.is_jupyter: + return ColorSystem.TRUECOLOR + if not self.is_terminal or self.is_dumb_terminal: + return None + if WINDOWS: # pragma: no cover + if self.legacy_windows: # pragma: no cover + return ColorSystem.WINDOWS + windows_console_features = get_windows_console_features() + return ( + ColorSystem.TRUECOLOR + if windows_console_features.truecolor + else ColorSystem.EIGHT_BIT + ) + else: + color_term = self._environ.get("COLORTERM", "").strip().lower() + if color_term in ("truecolor", "24bit"): + return ColorSystem.TRUECOLOR + term = self._environ.get("TERM", "").strip().lower() + _term_name, _hyphen, colors = term.rpartition("-") + color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD) + return color_system + + def _enter_buffer(self) -> None: + """Enter in to a buffer context, and buffer all output.""" + self._buffer_index += 1 + + def _exit_buffer(self) -> None: + """Leave buffer context, and render content if required.""" + self._buffer_index -= 1 + self._check_buffer() + + def set_live(self, live: "Live") -> None: + """Set Live instance. Used by Live context manager. + + Args: + live (Live): Live instance using this Console. + + Raises: + errors.LiveError: If this Console has a Live context currently active. + """ + with self._lock: + if self._live is not None: + raise errors.LiveError("Only one live display may be active at once") + self._live = live + + def clear_live(self) -> None: + """Clear the Live instance.""" + with self._lock: + self._live = None + + def push_render_hook(self, hook: RenderHook) -> None: + """Add a new render hook to the stack. + + Args: + hook (RenderHook): Render hook instance. + """ + with self._lock: + self._render_hooks.append(hook) + + def pop_render_hook(self) -> None: + """Pop the last renderhook from the stack.""" + with self._lock: + self._render_hooks.pop() + + def __enter__(self) -> "Console": + """Own context manager to enter buffer context.""" + self._enter_buffer() + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + """Exit buffer context.""" + self._exit_buffer() + + def begin_capture(self) -> None: + """Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output.""" + self._enter_buffer() + + def end_capture(self) -> str: + """End capture mode and return captured string. + + Returns: + str: Console output. + """ + render_result = self._render_buffer(self._buffer) + del self._buffer[:] + self._exit_buffer() + return render_result + + def push_theme(self, theme: Theme, *, inherit: bool = True) -> None: + """Push a new theme on to the top of the stack, replacing the styles from the previous theme. + Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather + than calling this method directly. + + Args: + theme (Theme): A theme instance. + inherit (bool, optional): Inherit existing styles. Defaults to True. + """ + self._theme_stack.push_theme(theme, inherit=inherit) + + def pop_theme(self) -> None: + """Remove theme from top of stack, restoring previous theme.""" + self._theme_stack.pop_theme() + + def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext: + """Use a different theme for the duration of the context manager. + + Args: + theme (Theme): Theme instance to user. + inherit (bool, optional): Inherit existing console styles. Defaults to True. + + Returns: + ThemeContext: [description] + """ + return ThemeContext(self, theme, inherit) + + @property + def color_system(self) -> Optional[str]: + """Get color system string. + + Returns: + Optional[str]: "standard", "256" or "truecolor". + """ + + if self._color_system is not None: + return _COLOR_SYSTEMS_NAMES[self._color_system] + else: + return None + + @property + def encoding(self) -> str: + """Get the encoding of the console file, e.g. ``"utf-8"``. + + Returns: + str: A standard encoding string. + """ + return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower() + + @property + def is_terminal(self) -> bool: + """Check if the console is writing to a terminal. + + Returns: + bool: True if the console writing to a device capable of + understanding terminal codes, otherwise False. + """ + if self._force_terminal is not None: + return self._force_terminal + + if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith( + "idlelib" + ): + # Return False for Idle which claims to be a tty but can't handle ansi codes + return False + + if self.is_jupyter: + # return False for Jupyter, which may have FORCE_COLOR set + return False + + # If FORCE_COLOR env var has any value at all, we assume a terminal. + force_color = self._environ.get("FORCE_COLOR") + if force_color is not None: + self._force_terminal = True + return True + + isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None) + try: + return False if isatty is None else isatty() + except ValueError: + # in some situation (at the end of a pytest run for example) isatty() can raise + # ValueError: I/O operation on closed file + # return False because we aren't in a terminal anymore + return False + + @property + def is_dumb_terminal(self) -> bool: + """Detect dumb terminal. + + Returns: + bool: True if writing to a dumb terminal, otherwise False. + + """ + _term = self._environ.get("TERM", "") + is_dumb = _term.lower() in ("dumb", "unknown") + return self.is_terminal and is_dumb + + @property + def options(self) -> ConsoleOptions: + """Get default console options.""" + return ConsoleOptions( + max_height=self.size.height, + size=self.size, + legacy_windows=self.legacy_windows, + min_width=1, + max_width=self.width, + encoding=self.encoding, + is_terminal=self.is_terminal, + ) + + @property + def size(self) -> ConsoleDimensions: + """Get the size of the console. + + Returns: + ConsoleDimensions: A named tuple containing the dimensions. + """ + + if self._width is not None and self._height is not None: + return ConsoleDimensions(self._width - self.legacy_windows, self._height) + + if self.is_dumb_terminal: + return ConsoleDimensions(80, 25) + + width: Optional[int] = None + height: Optional[int] = None + + streams = _STD_STREAMS_OUTPUT if WINDOWS else _STD_STREAMS + for file_descriptor in streams: + try: + width, height = os.get_terminal_size(file_descriptor) + except (AttributeError, ValueError, OSError): # Probably not a terminal + pass + else: + break + + columns = self._environ.get("COLUMNS") + if columns is not None and columns.isdigit(): + width = int(columns) + lines = self._environ.get("LINES") + if lines is not None and lines.isdigit(): + height = int(lines) + + # get_terminal_size can report 0, 0 if run from pseudo-terminal + width = width or 80 + height = height or 25 + return ConsoleDimensions( + width - self.legacy_windows if self._width is None else self._width, + height if self._height is None else self._height, + ) + + @size.setter + def size(self, new_size: Tuple[int, int]) -> None: + """Set a new size for the terminal. + + Args: + new_size (Tuple[int, int]): New width and height. + """ + width, height = new_size + self._width = width + self._height = height + + @property + def width(self) -> int: + """Get the width of the console. + + Returns: + int: The width (in characters) of the console. + """ + return self.size.width + + @width.setter + def width(self, width: int) -> None: + """Set width. + + Args: + width (int): New width. + """ + self._width = width + + @property + def height(self) -> int: + """Get the height of the console. + + Returns: + int: The height (in lines) of the console. + """ + return self.size.height + + @height.setter + def height(self, height: int) -> None: + """Set height. + + Args: + height (int): new height. + """ + self._height = height + + def bell(self) -> None: + """Play a 'bell' sound (if supported by the terminal).""" + self.control(Control.bell()) + + def capture(self) -> Capture: + """A context manager to *capture* the result of print() or log() in a string, + rather than writing it to the console. + + Example: + >>> from rich.console import Console + >>> console = Console() + >>> with console.capture() as capture: + ... console.print("[bold magenta]Hello World[/]") + >>> print(capture.get()) + + Returns: + Capture: Context manager with disables writing to the terminal. + """ + capture = Capture(self) + return capture + + def pager( + self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False + ) -> PagerContext: + """A context manager to display anything printed within a "pager". The pager application + is defined by the system and will typically support at least pressing a key to scroll. + + Args: + pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None. + styles (bool, optional): Show styles in pager. Defaults to False. + links (bool, optional): Show links in pager. Defaults to False. + + Example: + >>> from rich.console import Console + >>> from rich.__main__ import make_test_card + >>> console = Console() + >>> with console.pager(): + console.print(make_test_card()) + + Returns: + PagerContext: A context manager. + """ + return PagerContext(self, pager=pager, styles=styles, links=links) + + def line(self, count: int = 1) -> None: + """Write new line(s). + + Args: + count (int, optional): Number of new lines. Defaults to 1. + """ + + assert count >= 0, "count must be >= 0" + self.print(NewLine(count)) + + def clear(self, home: bool = True) -> None: + """Clear the screen. + + Args: + home (bool, optional): Also move the cursor to 'home' position. Defaults to True. + """ + if home: + self.control(Control.clear(), Control.home()) + else: + self.control(Control.clear()) + + def status( + self, + status: RenderableType, + *, + spinner: str = "dots", + spinner_style: StyleType = "status.spinner", + speed: float = 1.0, + refresh_per_second: float = 12.5, + ) -> "Status": + """Display a status and spinner. + + Args: + status (RenderableType): A status renderable (str or Text typically). + spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots". + spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner". + speed (float, optional): Speed factor for spinner animation. Defaults to 1.0. + refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5. + + Returns: + Status: A Status object that may be used as a context manager. + """ + from .status import Status + + status_renderable = Status( + status, + console=self, + spinner=spinner, + spinner_style=spinner_style, + speed=speed, + refresh_per_second=refresh_per_second, + ) + return status_renderable + + def show_cursor(self, show: bool = True) -> bool: + """Show or hide the cursor. + + Args: + show (bool, optional): Set visibility of the cursor. + """ + if self.is_terminal: + self.control(Control.show_cursor(show)) + return True + return False + + def set_alt_screen(self, enable: bool = True) -> bool: + """Enables alternative screen mode. + + Note, if you enable this mode, you should ensure that is disabled before + the application exits. See :meth:`~rich.Console.screen` for a context manager + that handles this for you. + + Args: + enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True. + + Returns: + bool: True if the control codes were written. + + """ + changed = False + if self.is_terminal and not self.legacy_windows: + self.control(Control.alt_screen(enable)) + changed = True + self._is_alt_screen = enable + return changed + + @property + def is_alt_screen(self) -> bool: + """Check if the alt screen was enabled. + + Returns: + bool: True if the alt screen was enabled, otherwise False. + """ + return self._is_alt_screen + + def set_window_title(self, title: str) -> bool: + """Set the title of the console terminal window. + + Warning: There is no means within Rich of "resetting" the window title to its + previous value, meaning the title you set will persist even after your application + exits. + + ``fish`` shell resets the window title before and after each command by default, + negating this issue. Windows Terminal and command prompt will also reset the title for you. + Most other shells and terminals, however, do not do this. + + Some terminals may require configuration changes before you can set the title. + Some terminals may not support setting the title at all. + + Other software (including the terminal itself, the shell, custom prompts, plugins, etc.) + may also set the terminal window title. This could result in whatever value you write + using this method being overwritten. + + Args: + title (str): The new title of the terminal window. + + Returns: + bool: True if the control code to change the terminal title was + written, otherwise False. Note that a return value of True + does not guarantee that the window title has actually changed, + since the feature may be unsupported/disabled in some terminals. + """ + if self.is_terminal: + self.control(Control.title(title)) + return True + return False + + def screen( + self, hide_cursor: bool = True, style: Optional[StyleType] = None + ) -> "ScreenContext": + """Context manager to enable and disable 'alternative screen' mode. + + Args: + hide_cursor (bool, optional): Also hide the cursor. Defaults to False. + style (Style, optional): Optional style for screen. Defaults to None. + + Returns: + ~ScreenContext: Context which enables alternate screen on enter, and disables it on exit. + """ + return ScreenContext(self, hide_cursor=hide_cursor, style=style or "") + + def measure( + self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None + ) -> Measurement: + """Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains + information regarding the number of characters required to print the renderable. + + Args: + renderable (RenderableType): Any renderable or string. + options (Optional[ConsoleOptions], optional): Options to use when measuring, or None + to use default options. Defaults to None. + + Returns: + Measurement: A measurement of the renderable. + """ + measurement = Measurement.get(self, options or self.options, renderable) + return measurement + + def render( + self, renderable: RenderableType, options: Optional[ConsoleOptions] = None + ) -> Iterable[Segment]: + """Render an object in to an iterable of `Segment` instances. + + This method contains the logic for rendering objects with the console protocol. + You are unlikely to need to use it directly, unless you are extending the library. + + Args: + renderable (RenderableType): An object supporting the console protocol, or + an object that may be converted to a string. + options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None. + + Returns: + Iterable[Segment]: An iterable of segments that may be rendered. + """ + + _options = options or self.options + if _options.max_width < 1: + # No space to render anything. This prevents potential recursion errors. + return + render_iterable: RenderResult + + renderable = rich_cast(renderable) + if hasattr(renderable, "__rich_console__") and not isclass(renderable): + render_iterable = renderable.__rich_console__(self, _options) + elif isinstance(renderable, str): + text_renderable = self.render_str( + renderable, highlight=_options.highlight, markup=_options.markup + ) + render_iterable = text_renderable.__rich_console__(self, _options) + else: + raise errors.NotRenderableError( + f"Unable to render {renderable!r}; " + "A str, Segment or object with __rich_console__ method is required" + ) + + try: + iter_render = iter(render_iterable) + except TypeError: + raise errors.NotRenderableError( + f"object {render_iterable!r} is not renderable" + ) + _Segment = Segment + _options = _options.reset_height() + for render_output in iter_render: + if isinstance(render_output, _Segment): + yield render_output + else: + yield from self.render(render_output, _options) + + def render_lines( + self, + renderable: RenderableType, + options: Optional[ConsoleOptions] = None, + *, + style: Optional[Style] = None, + pad: bool = True, + new_lines: bool = False, + ) -> List[List[Segment]]: + """Render objects in to a list of lines. + + The output of render_lines is useful when further formatting of rendered console text + is required, such as the Panel class which draws a border around any renderable object. + + Args: + renderable (RenderableType): Any object renderable in the console. + options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``. + style (Style, optional): Optional style to apply to renderables. Defaults to ``None``. + pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``. + new_lines (bool, optional): Include "\n" characters at end of lines. + + Returns: + List[List[Segment]]: A list of lines, where a line is a list of Segment objects. + """ + with self._lock: + render_options = options or self.options + _rendered = self.render(renderable, render_options) + if style: + _rendered = Segment.apply_style(_rendered, style) + + render_height = render_options.height + if render_height is not None: + render_height = max(0, render_height) + + lines = list( + islice( + Segment.split_and_crop_lines( + _rendered, + render_options.max_width, + include_new_lines=new_lines, + pad=pad, + style=style, + ), + None, + render_height, + ) + ) + if render_options.height is not None: + extra_lines = render_options.height - len(lines) + if extra_lines > 0: + pad_line = [ + ( + [ + Segment(" " * render_options.max_width, style), + Segment("\n"), + ] + if new_lines + else [Segment(" " * render_options.max_width, style)] + ) + ] + lines.extend(pad_line * extra_lines) + + return lines + + def render_str( + self, + text: str, + *, + style: Union[str, Style] = "", + justify: Optional[JustifyMethod] = None, + overflow: Optional[OverflowMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + highlighter: Optional[HighlighterType] = None, + ) -> "Text": + """Convert a string to a Text instance. This is called automatically if + you print or log a string. + + Args: + text (str): Text to render. + style (Union[str, Style], optional): Style to apply to rendered text. + justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``. + overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``. + emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default. + markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default. + highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default. + highlighter (HighlighterType, optional): Optional highlighter to apply. + Returns: + ConsoleRenderable: Renderable object. + + """ + emoji_enabled = emoji or (emoji is None and self._emoji) + markup_enabled = markup or (markup is None and self._markup) + highlight_enabled = highlight or (highlight is None and self._highlight) + + if markup_enabled: + rich_text = render_markup( + text, + style=style, + emoji=emoji_enabled, + emoji_variant=self._emoji_variant, + ) + rich_text.justify = justify + rich_text.overflow = overflow + else: + rich_text = Text( + ( + _emoji_replace(text, default_variant=self._emoji_variant) + if emoji_enabled + else text + ), + justify=justify, + overflow=overflow, + style=style, + ) + + _highlighter = (highlighter or self.highlighter) if highlight_enabled else None + if _highlighter is not None: + highlight_text = _highlighter(str(rich_text)) + highlight_text.copy_styles(rich_text) + return highlight_text + + return rich_text + + def get_style( + self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None + ) -> Style: + """Get a Style instance by its theme name or parse a definition. + + Args: + name (str): The name of a style or a style definition. + + Returns: + Style: A Style object. + + Raises: + MissingStyle: If no style could be parsed from name. + + """ + if isinstance(name, Style): + return name + + try: + style = self._theme_stack.get(name) + if style is None: + style = Style.parse(name) + return style.copy() if style.link else style + except errors.StyleSyntaxError as error: + if default is not None: + return self.get_style(default) + raise errors.MissingStyle( + f"Failed to get style {name!r}; {error}" + ) from None + + def _collect_renderables( + self, + objects: Iterable[Any], + sep: str, + end: str, + *, + justify: Optional[JustifyMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + ) -> List[ConsoleRenderable]: + """Combine a number of renderables and text into one renderable. + + Args: + objects (Iterable[Any]): Anything that Rich can render. + sep (str): String to write between print data. + end (str): String to write at end of print data. + justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. + + Returns: + List[ConsoleRenderable]: A list of things to render. + """ + renderables: List[ConsoleRenderable] = [] + _append = renderables.append + text: List[Text] = [] + append_text = text.append + + append = _append + if justify in ("left", "center", "right"): + + def align_append(renderable: RenderableType) -> None: + _append(Align(renderable, cast(AlignMethod, justify))) + + append = align_append + + _highlighter: HighlighterType = _null_highlighter + if highlight or (highlight is None and self._highlight): + _highlighter = self.highlighter + + def check_text() -> None: + if text: + sep_text = Text(sep, justify=justify, end=end) + append(sep_text.join(text)) + text.clear() + + for renderable in objects: + renderable = rich_cast(renderable) + if isinstance(renderable, str): + append_text( + self.render_str( + renderable, + emoji=emoji, + markup=markup, + highlight=highlight, + highlighter=_highlighter, + ) + ) + elif isinstance(renderable, Text): + append_text(renderable) + elif isinstance(renderable, ConsoleRenderable): + check_text() + append(renderable) + elif is_expandable(renderable): + check_text() + append(Pretty(renderable, highlighter=_highlighter)) + else: + append_text(_highlighter(str(renderable))) + + check_text() + + if self.style is not None: + style = self.get_style(self.style) + renderables = [Styled(renderable, style) for renderable in renderables] + + return renderables + + def rule( + self, + title: TextType = "", + *, + characters: str = "─", + style: Union[str, Style] = "rule.line", + align: AlignMethod = "center", + ) -> None: + """Draw a line with optional centered title. + + Args: + title (str, optional): Text to render over the rule. Defaults to "". + characters (str, optional): Character(s) to form the line. Defaults to "─". + style (str, optional): Style of line. Defaults to "rule.line". + align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center". + """ + from .rule import Rule + + rule = Rule(title=title, characters=characters, style=style, align=align) + self.print(rule) + + def control(self, *control: Control) -> None: + """Insert non-printing control codes. + + Args: + control_codes (str): Control codes, such as those that may move the cursor. + """ + if not self.is_dumb_terminal: + with self: + self._buffer.extend(_control.segment for _control in control) + + def out( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + highlight: Optional[bool] = None, + ) -> None: + """Output to the terminal. This is a low-level way of writing to the terminal which unlike + :meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will + optionally apply highlighting and a basic style. + + Args: + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use + console default. Defaults to ``None``. + """ + raw_output: str = sep.join(str(_object) for _object in objects) + self.print( + raw_output, + style=style, + highlight=highlight, + emoji=False, + markup=False, + no_wrap=True, + overflow="ignore", + crop=False, + end=end, + ) + + def print( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + justify: Optional[JustifyMethod] = None, + overflow: Optional[OverflowMethod] = None, + no_wrap: Optional[bool] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + width: Optional[int] = None, + height: Optional[int] = None, + crop: bool = True, + soft_wrap: Optional[bool] = None, + new_line_start: bool = False, + ) -> None: + """Print to the console. + + Args: + objects (positional args): Objects to log to the terminal. + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``. + overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None. + no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``. + width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``. + crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True. + soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for + Console default. Defaults to ``None``. + new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``. + """ + if not objects: + objects = (NewLine(),) + + if soft_wrap is None: + soft_wrap = self.soft_wrap + if soft_wrap: + if no_wrap is None: + no_wrap = True + if overflow is None: + overflow = "ignore" + crop = False + render_hooks = self._render_hooks[:] + with self: + renderables = self._collect_renderables( + objects, + sep, + end, + justify=justify, + emoji=emoji, + markup=markup, + highlight=highlight, + ) + for hook in render_hooks: + renderables = hook.process_renderables(renderables) + render_options = self.options.update( + justify=justify, + overflow=overflow, + width=min(width, self.width) if width is not None else NO_CHANGE, + height=height, + no_wrap=no_wrap, + markup=markup, + highlight=highlight, + ) + + new_segments: List[Segment] = [] + extend = new_segments.extend + render = self.render + if style is None: + for renderable in renderables: + extend(render(renderable, render_options)) + else: + for renderable in renderables: + extend( + Segment.apply_style( + render(renderable, render_options), self.get_style(style) + ) + ) + if new_line_start: + if ( + len("".join(segment.text for segment in new_segments).splitlines()) + > 1 + ): + new_segments.insert(0, Segment.line()) + if crop: + buffer_extend = self._buffer.extend + for line in Segment.split_and_crop_lines( + new_segments, self.width, pad=False + ): + buffer_extend(line) + else: + self._buffer.extend(new_segments) + + def print_json( + self, + json: Optional[str] = None, + *, + data: Any = None, + indent: Union[None, int, str] = 2, + highlight: bool = True, + skip_keys: bool = False, + ensure_ascii: bool = False, + check_circular: bool = True, + allow_nan: bool = True, + default: Optional[Callable[[Any], Any]] = None, + sort_keys: bool = False, + ) -> None: + """Pretty prints JSON. Output will be valid JSON. + + Args: + json (Optional[str]): A string containing JSON. + data (Any): If json is not supplied, then encode this data. + indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2. + highlight (bool, optional): Enable highlighting of output: Defaults to True. + skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. + ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. + check_circular (bool, optional): Check for circular references. Defaults to True. + allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. + default (Callable, optional): A callable that converts values that can not be encoded + in to something that can be JSON encoded. Defaults to None. + sort_keys (bool, optional): Sort dictionary keys. Defaults to False. + """ + from pip._vendor.rich.json import JSON + + if json is None: + json_renderable = JSON.from_data( + data, + indent=indent, + highlight=highlight, + skip_keys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + else: + if not isinstance(json, str): + raise TypeError( + f"json must be str. Did you mean print_json(data={json!r}) ?" + ) + json_renderable = JSON( + json, + indent=indent, + highlight=highlight, + skip_keys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + self.print(json_renderable, soft_wrap=True) + + def update_screen( + self, + renderable: RenderableType, + *, + region: Optional[Region] = None, + options: Optional[ConsoleOptions] = None, + ) -> None: + """Update the screen at a given offset. + + Args: + renderable (RenderableType): A Rich renderable. + region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None. + x (int, optional): x offset. Defaults to 0. + y (int, optional): y offset. Defaults to 0. + + Raises: + errors.NoAltScreen: If the Console isn't in alt screen mode. + + """ + if not self.is_alt_screen: + raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") + render_options = options or self.options + if region is None: + x = y = 0 + render_options = render_options.update_dimensions( + render_options.max_width, render_options.height or self.height + ) + else: + x, y, width, height = region + render_options = render_options.update_dimensions(width, height) + + lines = self.render_lines(renderable, options=render_options) + self.update_screen_lines(lines, x, y) + + def update_screen_lines( + self, lines: List[List[Segment]], x: int = 0, y: int = 0 + ) -> None: + """Update lines of the screen at a given offset. + + Args: + lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`). + x (int, optional): x offset (column no). Defaults to 0. + y (int, optional): y offset (column no). Defaults to 0. + + Raises: + errors.NoAltScreen: If the Console isn't in alt screen mode. + """ + if not self.is_alt_screen: + raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") + screen_update = ScreenUpdate(lines, x, y) + segments = self.render(screen_update) + self._buffer.extend(segments) + self._check_buffer() + + def print_exception( + self, + *, + width: Optional[int] = 100, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, + ) -> None: + """Prints a rich render of the last exception and traceback. + + Args: + width (Optional[int], optional): Number of characters used to render code. Defaults to 100. + extra_lines (int, optional): Additional lines of code to render. Defaults to 3. + theme (str, optional): Override pygments theme used in traceback + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. + """ + from .traceback import Traceback + + traceback = Traceback( + width=width, + extra_lines=extra_lines, + theme=theme, + word_wrap=word_wrap, + show_locals=show_locals, + suppress=suppress, + max_frames=max_frames, + ) + self.print(traceback) + + @staticmethod + def _caller_frame_info( + offset: int, + currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe, + ) -> Tuple[str, int, Dict[str, Any]]: + """Get caller frame information. + + Args: + offset (int): the caller offset within the current frame stack. + currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to + retrieve the current frame. Defaults to ``inspect.currentframe``. + + Returns: + Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and + the dictionary of local variables associated with the caller frame. + + Raises: + RuntimeError: If the stack offset is invalid. + """ + # Ignore the frame of this local helper + offset += 1 + + frame = currentframe() + if frame is not None: + # Use the faster currentframe where implemented + while offset and frame is not None: + frame = frame.f_back + offset -= 1 + assert frame is not None + return frame.f_code.co_filename, frame.f_lineno, frame.f_locals + else: + # Fallback to the slower stack + frame_info = inspect.stack()[offset] + return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals + + def log( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + justify: Optional[JustifyMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + log_locals: bool = False, + _stack_offset: int = 1, + ) -> None: + """Log rich content to the terminal. + + Args: + objects (positional args): Objects to log to the terminal. + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None. + log_locals (bool, optional): Boolean to enable logging of locals where ``log()`` + was called. Defaults to False. + _stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1. + """ + if not objects: + objects = (NewLine(),) + + render_hooks = self._render_hooks[:] + + with self: + renderables = self._collect_renderables( + objects, + sep, + end, + justify=justify, + emoji=emoji, + markup=markup, + highlight=highlight, + ) + if style is not None: + renderables = [Styled(renderable, style) for renderable in renderables] + + filename, line_no, locals = self._caller_frame_info(_stack_offset) + link_path = None if filename.startswith("<") else os.path.abspath(filename) + path = filename.rpartition(os.sep)[-1] + if log_locals: + locals_map = { + key: value + for key, value in locals.items() + if not key.startswith("__") + } + renderables.append(render_scope(locals_map, title="[i]locals")) + + renderables = [ + self._log_render( + self, + renderables, + log_time=self.get_datetime(), + path=path, + line_no=line_no, + link_path=link_path, + ) + ] + for hook in render_hooks: + renderables = hook.process_renderables(renderables) + new_segments: List[Segment] = [] + extend = new_segments.extend + render = self.render + render_options = self.options + for renderable in renderables: + extend(render(renderable, render_options)) + buffer_extend = self._buffer.extend + for line in Segment.split_and_crop_lines( + new_segments, self.width, pad=False + ): + buffer_extend(line) + + def on_broken_pipe(self) -> None: + """This function is called when a `BrokenPipeError` is raised. + + This can occur when piping Textual output in Linux and macOS. + The default implementation is to exit the app, but you could implement + this method in a subclass to change the behavior. + + See https://docs.python.org/3/library/signal.html#note-on-sigpipe for details. + """ + self.quiet = True + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + raise SystemExit(1) + + def _check_buffer(self) -> None: + """Check if the buffer may be rendered. Render it if it can (e.g. Console.quiet is False) + Rendering is supported on Windows, Unix and Jupyter environments. For + legacy Windows consoles, the win32 API is called directly. + This method will also record what it renders if recording is enabled via Console.record. + """ + if self.quiet: + del self._buffer[:] + return + + try: + self._write_buffer() + except BrokenPipeError: + self.on_broken_pipe() + + def _write_buffer(self) -> None: + """Write the buffer to the output file.""" + + with self._lock: + if self.record and not self._buffer_index: + with self._record_buffer_lock: + self._record_buffer.extend(self._buffer[:]) + + if self._buffer_index == 0: + if self.is_jupyter: # pragma: no cover + from .jupyter import display + + display(self._buffer, self._render_buffer(self._buffer[:])) + del self._buffer[:] + else: + if WINDOWS: + use_legacy_windows_render = False + if self.legacy_windows: + fileno = get_fileno(self.file) + if fileno is not None: + use_legacy_windows_render = ( + fileno in _STD_STREAMS_OUTPUT + ) + + if use_legacy_windows_render: + from pip._vendor.rich._win32_console import LegacyWindowsTerm + from pip._vendor.rich._windows_renderer import legacy_windows_render + + buffer = self._buffer[:] + if self.no_color and self._color_system: + buffer = list(Segment.remove_color(buffer)) + + legacy_windows_render(buffer, LegacyWindowsTerm(self.file)) + else: + # Either a non-std stream on legacy Windows, or modern Windows. + text = self._render_buffer(self._buffer[:]) + # https://bugs.python.org/issue37871 + # https://github.com/python/cpython/issues/82052 + # We need to avoid writing more than 32Kb in a single write, due to the above bug + write = self.file.write + # Worse case scenario, every character is 4 bytes of utf-8 + MAX_WRITE = 32 * 1024 // 4 + try: + if len(text) <= MAX_WRITE: + write(text) + else: + batch: List[str] = [] + batch_append = batch.append + size = 0 + for line in text.splitlines(True): + if size + len(line) > MAX_WRITE and batch: + write("".join(batch)) + batch.clear() + size = 0 + batch_append(line) + size += len(line) + if batch: + write("".join(batch)) + batch.clear() + except UnicodeEncodeError as error: + error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" + raise + else: + text = self._render_buffer(self._buffer[:]) + try: + self.file.write(text) + except UnicodeEncodeError as error: + error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" + raise + + self.file.flush() + del self._buffer[:] + + def _render_buffer(self, buffer: Iterable[Segment]) -> str: + """Render buffered output, and clear buffer.""" + output: List[str] = [] + append = output.append + color_system = self._color_system + legacy_windows = self.legacy_windows + not_terminal = not self.is_terminal + if self.no_color and color_system: + buffer = Segment.remove_color(buffer) + for text, style, control in buffer: + if style: + append( + style.render( + text, + color_system=color_system, + legacy_windows=legacy_windows, + ) + ) + elif not (not_terminal and control): + append(text) + + rendered = "".join(output) + return rendered + + def input( + self, + prompt: TextType = "", + *, + markup: bool = True, + emoji: bool = True, + password: bool = False, + stream: Optional[TextIO] = None, + ) -> str: + """Displays a prompt and waits for input from the user. The prompt may contain color / style. + + It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded. + + Args: + prompt (Union[str, Text]): Text to render in the prompt. + markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True. + emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True. + password: (bool, optional): Hide typed text. Defaults to False. + stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None. + + Returns: + str: Text read from stdin. + """ + if prompt: + self.print(prompt, markup=markup, emoji=emoji, end="") + if password: + result = getpass("", stream=stream) + else: + if stream: + result = stream.readline() + else: + result = input() + return result + + def export_text(self, *, clear: bool = True, styles: bool = False) -> str: + """Generate text from console contents (requires record=True argument in constructor). + + Args: + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text. + Defaults to ``False``. + + Returns: + str: String containing console contents. + + """ + assert ( + self.record + ), "To export console contents set record=True in the constructor or instance" + + with self._record_buffer_lock: + if styles: + text = "".join( + (style.render(text) if style else text) + for text, style, _ in self._record_buffer + ) + else: + text = "".join( + segment.text + for segment in self._record_buffer + if not segment.control + ) + if clear: + del self._record_buffer[:] + return text + + def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None: + """Generate text from console and save to a given location (requires record=True argument in constructor). + + Args: + path (str): Path to write text files. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text. + Defaults to ``False``. + + """ + text = self.export_text(clear=clear, styles=styles) + with open(path, "w", encoding="utf-8") as write_file: + write_file.write(text) + + def export_html( + self, + *, + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: Optional[str] = None, + inline_styles: bool = False, + ) -> str: + """Generate HTML from console contents (requires record=True argument in constructor). + + Args: + theme (TerminalTheme, optional): TerminalTheme object containing console colors. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + code_format (str, optional): Format string to render HTML. In addition to '{foreground}', + '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``. + inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files + larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. + Defaults to False. + + Returns: + str: String containing console contents as HTML. + """ + assert ( + self.record + ), "To export console contents set record=True in the constructor or instance" + fragments: List[str] = [] + append = fragments.append + _theme = theme or DEFAULT_TERMINAL_THEME + stylesheet = "" + + render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format + + with self._record_buffer_lock: + if inline_styles: + for text, style, _ in Segment.filter_control( + Segment.simplify(self._record_buffer) + ): + text = escape(text) + if style: + rule = style.get_html_style(_theme) + if style.link: + text = f'{text}' + text = f'{text}' if rule else text + append(text) + else: + styles: Dict[str, int] = {} + for text, style, _ in Segment.filter_control( + Segment.simplify(self._record_buffer) + ): + text = escape(text) + if style: + rule = style.get_html_style(_theme) + style_number = styles.setdefault(rule, len(styles) + 1) + if style.link: + text = f'{text}' + else: + text = f'{text}' + append(text) + stylesheet_rules: List[str] = [] + stylesheet_append = stylesheet_rules.append + for style_rule, style_number in styles.items(): + if style_rule: + stylesheet_append(f".r{style_number} {{{style_rule}}}") + stylesheet = "\n".join(stylesheet_rules) + + rendered_code = render_code_format.format( + code="".join(fragments), + stylesheet=stylesheet, + foreground=_theme.foreground_color.hex, + background=_theme.background_color.hex, + ) + if clear: + del self._record_buffer[:] + return rendered_code + + def save_html( + self, + path: str, + *, + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: str = CONSOLE_HTML_FORMAT, + inline_styles: bool = False, + ) -> None: + """Generate HTML from console contents and write to a file (requires record=True argument in constructor). + + Args: + path (str): Path to write html file. + theme (TerminalTheme, optional): TerminalTheme object containing console colors. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + code_format (str, optional): Format string to render HTML. In addition to '{foreground}', + '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``. + inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files + larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. + Defaults to False. + + """ + html = self.export_html( + theme=theme, + clear=clear, + code_format=code_format, + inline_styles=inline_styles, + ) + with open(path, "w", encoding="utf-8") as write_file: + write_file.write(html) + + def export_svg( + self, + *, + title: str = "Rich", + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: str = CONSOLE_SVG_FORMAT, + font_aspect_ratio: float = 0.61, + unique_id: Optional[str] = None, + ) -> str: + """ + Generate an SVG from the console contents (requires record=True in Console constructor). + + Args: + title (str, optional): The title of the tab in the output image + theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` + code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables + into the string in order to form the final SVG output. The default template used and the variables + injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. + font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format`` + string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font). + If you aren't specifying a different font inside ``code_format``, you probably don't need this. + unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node + ids). If not set, this defaults to a computed value based on the recorded content. + """ + + from pip._vendor.rich.cells import cell_len + + style_cache: Dict[Style, str] = {} + + def get_svg_style(style: Style) -> str: + """Convert a Style to CSS rules for SVG.""" + if style in style_cache: + return style_cache[style] + css_rules = [] + color = ( + _theme.foreground_color + if (style.color is None or style.color.is_default) + else style.color.get_truecolor(_theme) + ) + bgcolor = ( + _theme.background_color + if (style.bgcolor is None or style.bgcolor.is_default) + else style.bgcolor.get_truecolor(_theme) + ) + if style.reverse: + color, bgcolor = bgcolor, color + if style.dim: + color = blend_rgb(color, bgcolor, 0.4) + css_rules.append(f"fill: {color.hex}") + if style.bold: + css_rules.append("font-weight: bold") + if style.italic: + css_rules.append("font-style: italic;") + if style.underline: + css_rules.append("text-decoration: underline;") + if style.strike: + css_rules.append("text-decoration: line-through;") + + css = ";".join(css_rules) + style_cache[style] = css + return css + + _theme = theme or SVG_EXPORT_THEME + + width = self.width + char_height = 20 + char_width = char_height * font_aspect_ratio + line_height = char_height * 1.22 + + margin_top = 1 + margin_right = 1 + margin_bottom = 1 + margin_left = 1 + + padding_top = 40 + padding_right = 8 + padding_bottom = 8 + padding_left = 8 + + padding_width = padding_left + padding_right + padding_height = padding_top + padding_bottom + margin_width = margin_left + margin_right + margin_height = margin_top + margin_bottom + + text_backgrounds: List[str] = [] + text_group: List[str] = [] + classes: Dict[str, int] = {} + style_no = 1 + + def escape_text(text: str) -> str: + """HTML escape text and replace spaces with nbsp.""" + return escape(text).replace(" ", " ") + + def make_tag( + name: str, content: Optional[str] = None, **attribs: object + ) -> str: + """Make a tag from name, content, and attributes.""" + + def stringify(value: object) -> str: + if isinstance(value, (float)): + return format(value, "g") + return str(value) + + tag_attribs = " ".join( + f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"' + for k, v in attribs.items() + ) + return ( + f"<{name} {tag_attribs}>{content}" + if content + else f"<{name} {tag_attribs}/>" + ) + + with self._record_buffer_lock: + segments = list(Segment.filter_control(self._record_buffer)) + if clear: + self._record_buffer.clear() + + if unique_id is None: + unique_id = "terminal-" + str( + zlib.adler32( + ("".join(repr(segment) for segment in segments)).encode( + "utf-8", + "ignore", + ) + + title.encode("utf-8", "ignore") + ) + ) + y = 0 + for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)): + x = 0 + for text, style, _control in line: + style = style or Style() + rules = get_svg_style(style) + if rules not in classes: + classes[rules] = style_no + style_no += 1 + class_name = f"r{classes[rules]}" + + if style.reverse: + has_background = True + background = ( + _theme.foreground_color.hex + if style.color is None + else style.color.get_truecolor(_theme).hex + ) + else: + bgcolor = style.bgcolor + has_background = bgcolor is not None and not bgcolor.is_default + background = ( + _theme.background_color.hex + if style.bgcolor is None + else style.bgcolor.get_truecolor(_theme).hex + ) + + text_length = cell_len(text) + if has_background: + text_backgrounds.append( + make_tag( + "rect", + fill=background, + x=x * char_width, + y=y * line_height + 1.5, + width=char_width * text_length, + height=line_height + 0.25, + shape_rendering="crispEdges", + ) + ) + + if text != " " * len(text): + text_group.append( + make_tag( + "text", + escape_text(text), + _class=f"{unique_id}-{class_name}", + x=x * char_width, + y=y * line_height + char_height, + textLength=char_width * len(text), + clip_path=f"url(#{unique_id}-line-{y})", + ) + ) + x += cell_len(text) + + line_offsets = [line_no * line_height + 1.5 for line_no in range(y)] + lines = "\n".join( + f""" + {make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)} + """ + for line_no, offset in enumerate(line_offsets) + ) + + styles = "\n".join( + f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items() + ) + backgrounds = "".join(text_backgrounds) + matrix = "".join(text_group) + + terminal_width = ceil(width * char_width + padding_width) + terminal_height = (y + 1) * line_height + padding_height + chrome = make_tag( + "rect", + fill=_theme.background_color.hex, + stroke="rgba(255,255,255,0.35)", + stroke_width="1", + x=margin_left, + y=margin_top, + width=terminal_width, + height=terminal_height, + rx=8, + ) + + title_color = _theme.foreground_color.hex + if title: + chrome += make_tag( + "text", + escape_text(title), + _class=f"{unique_id}-title", + fill=title_color, + text_anchor="middle", + x=terminal_width // 2, + y=margin_top + char_height + 6, + ) + chrome += f""" + + + + + + """ + + svg = code_format.format( + unique_id=unique_id, + char_width=char_width, + char_height=char_height, + line_height=line_height, + terminal_width=char_width * width - 1, + terminal_height=(y + 1) * line_height - 1, + width=terminal_width + margin_width, + height=terminal_height + margin_height, + terminal_x=margin_left + padding_left, + terminal_y=margin_top + padding_top, + styles=styles, + chrome=chrome, + backgrounds=backgrounds, + matrix=matrix, + lines=lines, + ) + return svg + + def save_svg( + self, + path: str, + *, + title: str = "Rich", + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: str = CONSOLE_SVG_FORMAT, + font_aspect_ratio: float = 0.61, + unique_id: Optional[str] = None, + ) -> None: + """Generate an SVG file from the console contents (requires record=True in Console constructor). + + Args: + path (str): The path to write the SVG to. + title (str, optional): The title of the tab in the output image + theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` + code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables + into the string in order to form the final SVG output. The default template used and the variables + injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. + font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format`` + string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font). + If you aren't specifying a different font inside ``code_format``, you probably don't need this. + unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node + ids). If not set, this defaults to a computed value based on the recorded content. + """ + svg = self.export_svg( + title=title, + theme=theme, + clear=clear, + code_format=code_format, + font_aspect_ratio=font_aspect_ratio, + unique_id=unique_id, + ) + with open(path, "w", encoding="utf-8") as write_file: + write_file.write(svg) + + +def _svg_hash(svg_main_code: str) -> str: + """Returns a unique hash for the given SVG main code. + + Args: + svg_main_code (str): The content we're going to inject in the SVG envelope. + + Returns: + str: a hash of the given content + """ + return str(zlib.adler32(svg_main_code.encode())) + + +if __name__ == "__main__": # pragma: no cover + console = Console(record=True) + + console.log( + "JSONRPC [i]request[/i]", + 5, + 1.3, + True, + False, + None, + { + "jsonrpc": "2.0", + "method": "subtract", + "params": {"minuend": 42, "subtrahend": 23}, + "id": 3, + }, + ) + + console.log("Hello, World!", "{'a': 1}", repr(console)) + + console.print( + { + "name": None, + "empty": [], + "quiz": { + "sport": { + "answered": True, + "q1": { + "question": "Which one is correct team name in NBA?", + "options": [ + "New York Bulls", + "Los Angeles Kings", + "Golden State Warriors", + "Huston Rocket", + ], + "answer": "Huston Rocket", + }, + }, + "maths": { + "answered": False, + "q1": { + "question": "5 + 7 = ?", + "options": [10, 11, 12, 13], + "answer": 12, + }, + "q2": { + "question": "12 - 8 = ?", + "options": [1, 2, 3, 4], + "answer": 4, + }, + }, + }, + } + ) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/constrain.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/constrain.py new file mode 100644 index 0000000000000000000000000000000000000000..65fdf56342e8b5b8e181914881025231684e1871 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/constrain.py @@ -0,0 +1,37 @@ +from typing import Optional, TYPE_CHECKING + +from .jupyter import JupyterMixin +from .measure import Measurement + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderableType, RenderResult + + +class Constrain(JupyterMixin): + """Constrain the width of a renderable to a given number of characters. + + Args: + renderable (RenderableType): A renderable object. + width (int, optional): The maximum width (in characters) to render. Defaults to 80. + """ + + def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None: + self.renderable = renderable + self.width = width + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + if self.width is None: + yield self.renderable + else: + child_options = options.update_width(min(self.width, options.max_width)) + yield from console.render(self.renderable, child_options) + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + if self.width is not None: + options = options.update_width(self.width) + measurement = Measurement.get(console, options, self.renderable) + return measurement diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/containers.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/containers.py new file mode 100644 index 0000000000000000000000000000000000000000..901ff8ba6ea0836481a015ed5c627889cc416c03 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/containers.py @@ -0,0 +1,167 @@ +from itertools import zip_longest +from typing import ( + TYPE_CHECKING, + Iterable, + Iterator, + List, + Optional, + TypeVar, + Union, + overload, +) + +if TYPE_CHECKING: + from .console import ( + Console, + ConsoleOptions, + JustifyMethod, + OverflowMethod, + RenderResult, + RenderableType, + ) + from .text import Text + +from .cells import cell_len +from .measure import Measurement + +T = TypeVar("T") + + +class Renderables: + """A list subclass which renders its contents to the console.""" + + def __init__( + self, renderables: Optional[Iterable["RenderableType"]] = None + ) -> None: + self._renderables: List["RenderableType"] = ( + list(renderables) if renderables is not None else [] + ) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + """Console render method to insert line-breaks.""" + yield from self._renderables + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + dimensions = [ + Measurement.get(console, options, renderable) + for renderable in self._renderables + ] + if not dimensions: + return Measurement(1, 1) + _min = max(dimension.minimum for dimension in dimensions) + _max = max(dimension.maximum for dimension in dimensions) + return Measurement(_min, _max) + + def append(self, renderable: "RenderableType") -> None: + self._renderables.append(renderable) + + def __iter__(self) -> Iterable["RenderableType"]: + return iter(self._renderables) + + +class Lines: + """A list subclass which can render to the console.""" + + def __init__(self, lines: Iterable["Text"] = ()) -> None: + self._lines: List["Text"] = list(lines) + + def __repr__(self) -> str: + return f"Lines({self._lines!r})" + + def __iter__(self) -> Iterator["Text"]: + return iter(self._lines) + + @overload + def __getitem__(self, index: int) -> "Text": + ... + + @overload + def __getitem__(self, index: slice) -> List["Text"]: + ... + + def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]: + return self._lines[index] + + def __setitem__(self, index: int, value: "Text") -> "Lines": + self._lines[index] = value + return self + + def __len__(self) -> int: + return self._lines.__len__() + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + """Console render method to insert line-breaks.""" + yield from self._lines + + def append(self, line: "Text") -> None: + self._lines.append(line) + + def extend(self, lines: Iterable["Text"]) -> None: + self._lines.extend(lines) + + def pop(self, index: int = -1) -> "Text": + return self._lines.pop(index) + + def justify( + self, + console: "Console", + width: int, + justify: "JustifyMethod" = "left", + overflow: "OverflowMethod" = "fold", + ) -> None: + """Justify and overflow text to a given width. + + Args: + console (Console): Console instance. + width (int): Number of cells available per line. + justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left". + overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold". + + """ + from .text import Text + + if justify == "left": + for line in self._lines: + line.truncate(width, overflow=overflow, pad=True) + elif justify == "center": + for line in self._lines: + line.rstrip() + line.truncate(width, overflow=overflow) + line.pad_left((width - cell_len(line.plain)) // 2) + line.pad_right(width - cell_len(line.plain)) + elif justify == "right": + for line in self._lines: + line.rstrip() + line.truncate(width, overflow=overflow) + line.pad_left(width - cell_len(line.plain)) + elif justify == "full": + for line_index, line in enumerate(self._lines): + if line_index == len(self._lines) - 1: + break + words = line.split(" ") + words_size = sum(cell_len(word.plain) for word in words) + num_spaces = len(words) - 1 + spaces = [1 for _ in range(num_spaces)] + index = 0 + if spaces: + while words_size + num_spaces < width: + spaces[len(spaces) - index - 1] += 1 + num_spaces += 1 + index = (index + 1) % len(spaces) + tokens: List[Text] = [] + for index, (word, next_word) in enumerate( + zip_longest(words, words[1:]) + ): + tokens.append(word) + if index < len(spaces): + style = word.get_style_at_offset(console, -1) + next_style = next_word.get_style_at_offset(console, 0) + space_style = style if style == next_style else line.style + tokens.append(Text(" " * spaces[index], style=space_style)) + self[line_index] = Text("").join(tokens) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/diagnose.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/diagnose.py new file mode 100644 index 0000000000000000000000000000000000000000..ad36183898eddb11e33ccb7623c0291ccc0f091d --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/diagnose.py @@ -0,0 +1,37 @@ +import os +import platform + +from pip._vendor.rich import inspect +from pip._vendor.rich.console import Console, get_windows_console_features +from pip._vendor.rich.panel import Panel +from pip._vendor.rich.pretty import Pretty + + +def report() -> None: # pragma: no cover + """Print a report to the terminal with debugging information""" + console = Console() + inspect(console) + features = get_windows_console_features() + inspect(features) + + env_names = ( + "TERM", + "COLORTERM", + "CLICOLOR", + "NO_COLOR", + "TERM_PROGRAM", + "COLUMNS", + "LINES", + "JUPYTER_COLUMNS", + "JUPYTER_LINES", + "JPY_PARENT_PID", + "VSCODE_VERBOSE_LOGGING", + ) + env = {name: os.getenv(name) for name in env_names} + console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables")) + + console.print(f'platform="{platform.system()}"') + + +if __name__ == "__main__": # pragma: no cover + report() diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/file_proxy.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/file_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..4b0b0da6c2a62b2b1468c35ddd69f1bbb9b91aa8 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/file_proxy.py @@ -0,0 +1,57 @@ +import io +from typing import IO, TYPE_CHECKING, Any, List + +from .ansi import AnsiDecoder +from .text import Text + +if TYPE_CHECKING: + from .console import Console + + +class FileProxy(io.TextIOBase): + """Wraps a file (e.g. sys.stdout) and redirects writes to a console.""" + + def __init__(self, console: "Console", file: IO[str]) -> None: + self.__console = console + self.__file = file + self.__buffer: List[str] = [] + self.__ansi_decoder = AnsiDecoder() + + @property + def rich_proxied_file(self) -> IO[str]: + """Get proxied file.""" + return self.__file + + def __getattr__(self, name: str) -> Any: + return getattr(self.__file, name) + + def write(self, text: str) -> int: + if not isinstance(text, str): + raise TypeError(f"write() argument must be str, not {type(text).__name__}") + buffer = self.__buffer + lines: List[str] = [] + while text: + line, new_line, text = text.partition("\n") + if new_line: + lines.append("".join(buffer) + line) + buffer.clear() + else: + buffer.append(line) + break + if lines: + console = self.__console + with console: + output = Text("\n").join( + self.__ansi_decoder.decode_line(line) for line in lines + ) + console.print(output) + return len(text) + + def flush(self) -> None: + output = "".join(self.__buffer) + if output: + self.__console.print(output) + del self.__buffer[:] + + def fileno(self) -> int: + return self.__file.fileno() diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/filesize.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/filesize.py new file mode 100644 index 0000000000000000000000000000000000000000..83bc9118d2bdb8983f863063687c2ea394a9abb1 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/filesize.py @@ -0,0 +1,88 @@ +"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2 + +The functions declared in this module should cover the different +use cases needed to generate a string representation of a file size +using several different units. Since there are many standards regarding +file size units, three different functions have been implemented. + +See Also: + * `Wikipedia: Binary prefix `_ + +""" + +__all__ = ["decimal"] + +from typing import Iterable, List, Optional, Tuple + + +def _to_str( + size: int, + suffixes: Iterable[str], + base: int, + *, + precision: Optional[int] = 1, + separator: Optional[str] = " ", +) -> str: + if size == 1: + return "1 byte" + elif size < base: + return f"{size:,} bytes" + + for i, suffix in enumerate(suffixes, 2): # noqa: B007 + unit = base**i + if size < unit: + break + return "{:,.{precision}f}{separator}{}".format( + (base * size / unit), + suffix, + precision=precision, + separator=separator, + ) + + +def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]: + """Pick a suffix and base for the given size.""" + for i, suffix in enumerate(suffixes): + unit = base**i + if size < unit * base: + break + return unit, suffix + + +def decimal( + size: int, + *, + precision: Optional[int] = 1, + separator: Optional[str] = " ", +) -> str: + """Convert a filesize in to a string (powers of 1000, SI prefixes). + + In this convention, ``1000 B = 1 kB``. + + This is typically the format used to advertise the storage + capacity of USB flash drives and the like (*256 MB* meaning + actually a storage capacity of more than *256 000 000 B*), + or used by **Mac OS X** since v10.6 to report file sizes. + + Arguments: + int (size): A file size. + int (precision): The number of decimal places to include (default = 1). + str (separator): The string to separate the value from the units (default = " "). + + Returns: + `str`: A string containing a abbreviated file size and units. + + Example: + >>> filesize.decimal(30000) + '30.0 kB' + >>> filesize.decimal(30000, precision=2, separator="") + '30.00kB' + + """ + return _to_str( + size, + ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"), + 1000, + precision=precision, + separator=separator, + ) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/logging.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..ff8d5d95f49acd86e33fe40d0978b903434f075b --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/logging.py @@ -0,0 +1,297 @@ +import logging +from datetime import datetime +from logging import Handler, LogRecord +from pathlib import Path +from types import ModuleType +from typing import ClassVar, Iterable, List, Optional, Type, Union + +from pip._vendor.rich._null_file import NullFile + +from . import get_console +from ._log_render import FormatTimeCallable, LogRender +from .console import Console, ConsoleRenderable +from .highlighter import Highlighter, ReprHighlighter +from .text import Text +from .traceback import Traceback + + +class RichHandler(Handler): + """A logging handler that renders output with Rich. The time / level / message and file are displayed in columns. + The level is color coded, and the message is syntax highlighted. + + Note: + Be careful when enabling console markup in log messages if you have configured logging for libraries not + under your control. If a dependency writes messages containing square brackets, it may not produce the intended output. + + Args: + level (Union[int, str], optional): Log level. Defaults to logging.NOTSET. + console (:class:`~rich.console.Console`, optional): Optional console instance to write logs. + Default will use a global console instance writing to stdout. + show_time (bool, optional): Show a column for the time. Defaults to True. + omit_repeated_times (bool, optional): Omit repetition of the same time. Defaults to True. + show_level (bool, optional): Show a column for the level. Defaults to True. + show_path (bool, optional): Show the path to the original log call. Defaults to True. + enable_link_path (bool, optional): Enable terminal link of path column to file. Defaults to True. + highlighter (Highlighter, optional): Highlighter to style log messages, or None to use ReprHighlighter. Defaults to None. + markup (bool, optional): Enable console markup in log messages. Defaults to False. + rich_tracebacks (bool, optional): Enable rich tracebacks with syntax highlighting and formatting. Defaults to False. + tracebacks_width (Optional[int], optional): Number of characters used to render tracebacks, or None for full width. Defaults to None. + tracebacks_code_width (int, optional): Number of code characters used to render tracebacks, or None for full width. Defaults to 88. + tracebacks_extra_lines (int, optional): Additional lines of code to render tracebacks, or None for full width. Defaults to None. + tracebacks_theme (str, optional): Override pygments theme used in traceback. + tracebacks_word_wrap (bool, optional): Enable word wrapping of long tracebacks lines. Defaults to True. + tracebacks_show_locals (bool, optional): Enable display of locals in tracebacks. Defaults to False. + tracebacks_suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + tracebacks_max_frames (int, optional): Optional maximum number of frames returned by traceback. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%x %X] ". + keywords (List[str], optional): List of words to highlight instead of ``RichHandler.KEYWORDS``. + """ + + KEYWORDS: ClassVar[Optional[List[str]]] = [ + "GET", + "POST", + "HEAD", + "PUT", + "DELETE", + "OPTIONS", + "TRACE", + "PATCH", + ] + HIGHLIGHTER_CLASS: ClassVar[Type[Highlighter]] = ReprHighlighter + + def __init__( + self, + level: Union[int, str] = logging.NOTSET, + console: Optional[Console] = None, + *, + show_time: bool = True, + omit_repeated_times: bool = True, + show_level: bool = True, + show_path: bool = True, + enable_link_path: bool = True, + highlighter: Optional[Highlighter] = None, + markup: bool = False, + rich_tracebacks: bool = False, + tracebacks_width: Optional[int] = None, + tracebacks_code_width: int = 88, + tracebacks_extra_lines: int = 3, + tracebacks_theme: Optional[str] = None, + tracebacks_word_wrap: bool = True, + tracebacks_show_locals: bool = False, + tracebacks_suppress: Iterable[Union[str, ModuleType]] = (), + tracebacks_max_frames: int = 100, + locals_max_length: int = 10, + locals_max_string: int = 80, + log_time_format: Union[str, FormatTimeCallable] = "[%x %X]", + keywords: Optional[List[str]] = None, + ) -> None: + super().__init__(level=level) + self.console = console or get_console() + self.highlighter = highlighter or self.HIGHLIGHTER_CLASS() + self._log_render = LogRender( + show_time=show_time, + show_level=show_level, + show_path=show_path, + time_format=log_time_format, + omit_repeated_times=omit_repeated_times, + level_width=None, + ) + self.enable_link_path = enable_link_path + self.markup = markup + self.rich_tracebacks = rich_tracebacks + self.tracebacks_width = tracebacks_width + self.tracebacks_extra_lines = tracebacks_extra_lines + self.tracebacks_theme = tracebacks_theme + self.tracebacks_word_wrap = tracebacks_word_wrap + self.tracebacks_show_locals = tracebacks_show_locals + self.tracebacks_suppress = tracebacks_suppress + self.tracebacks_max_frames = tracebacks_max_frames + self.tracebacks_code_width = tracebacks_code_width + self.locals_max_length = locals_max_length + self.locals_max_string = locals_max_string + self.keywords = keywords + + def get_level_text(self, record: LogRecord) -> Text: + """Get the level name from the record. + + Args: + record (LogRecord): LogRecord instance. + + Returns: + Text: A tuple of the style and level name. + """ + level_name = record.levelname + level_text = Text.styled( + level_name.ljust(8), f"logging.level.{level_name.lower()}" + ) + return level_text + + def emit(self, record: LogRecord) -> None: + """Invoked by logging.""" + message = self.format(record) + traceback = None + if ( + self.rich_tracebacks + and record.exc_info + and record.exc_info != (None, None, None) + ): + exc_type, exc_value, exc_traceback = record.exc_info + assert exc_type is not None + assert exc_value is not None + traceback = Traceback.from_exception( + exc_type, + exc_value, + exc_traceback, + width=self.tracebacks_width, + code_width=self.tracebacks_code_width, + extra_lines=self.tracebacks_extra_lines, + theme=self.tracebacks_theme, + word_wrap=self.tracebacks_word_wrap, + show_locals=self.tracebacks_show_locals, + locals_max_length=self.locals_max_length, + locals_max_string=self.locals_max_string, + suppress=self.tracebacks_suppress, + max_frames=self.tracebacks_max_frames, + ) + message = record.getMessage() + if self.formatter: + record.message = record.getMessage() + formatter = self.formatter + if hasattr(formatter, "usesTime") and formatter.usesTime(): + record.asctime = formatter.formatTime(record, formatter.datefmt) + message = formatter.formatMessage(record) + + message_renderable = self.render_message(record, message) + log_renderable = self.render( + record=record, traceback=traceback, message_renderable=message_renderable + ) + if isinstance(self.console.file, NullFile): + # Handles pythonw, where stdout/stderr are null, and we return NullFile + # instance from Console.file. In this case, we still want to make a log record + # even though we won't be writing anything to a file. + self.handleError(record) + else: + try: + self.console.print(log_renderable) + except Exception: + self.handleError(record) + + def render_message(self, record: LogRecord, message: str) -> "ConsoleRenderable": + """Render message text in to Text. + + Args: + record (LogRecord): logging Record. + message (str): String containing log message. + + Returns: + ConsoleRenderable: Renderable to display log message. + """ + use_markup = getattr(record, "markup", self.markup) + message_text = Text.from_markup(message) if use_markup else Text(message) + + highlighter = getattr(record, "highlighter", self.highlighter) + if highlighter: + message_text = highlighter(message_text) + + if self.keywords is None: + self.keywords = self.KEYWORDS + + if self.keywords: + message_text.highlight_words(self.keywords, "logging.keyword") + + return message_text + + def render( + self, + *, + record: LogRecord, + traceback: Optional[Traceback], + message_renderable: "ConsoleRenderable", + ) -> "ConsoleRenderable": + """Render log for display. + + Args: + record (LogRecord): logging Record. + traceback (Optional[Traceback]): Traceback instance or None for no Traceback. + message_renderable (ConsoleRenderable): Renderable (typically Text) containing log message contents. + + Returns: + ConsoleRenderable: Renderable to display log. + """ + path = Path(record.pathname).name + level = self.get_level_text(record) + time_format = None if self.formatter is None else self.formatter.datefmt + log_time = datetime.fromtimestamp(record.created) + + log_renderable = self._log_render( + self.console, + [message_renderable] if not traceback else [message_renderable, traceback], + log_time=log_time, + time_format=time_format, + level=level, + path=path, + line_no=record.lineno, + link_path=record.pathname if self.enable_link_path else None, + ) + return log_renderable + + +if __name__ == "__main__": # pragma: no cover + from time import sleep + + FORMAT = "%(message)s" + # FORMAT = "%(asctime)-15s - %(levelname)s - %(message)s" + logging.basicConfig( + level="NOTSET", + format=FORMAT, + datefmt="[%X]", + handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)], + ) + log = logging.getLogger("rich") + + log.info("Server starting...") + log.info("Listening on http://127.0.0.1:8080") + sleep(1) + + log.info("GET /index.html 200 1298") + log.info("GET /imgs/backgrounds/back1.jpg 200 54386") + log.info("GET /css/styles.css 200 54386") + log.warning("GET /favicon.ico 404 242") + sleep(1) + + log.debug( + "JSONRPC request\n--> %r\n<-- %r", + { + "version": "1.1", + "method": "confirmFruitPurchase", + "params": [["apple", "orange", "mangoes", "pomelo"], 1.123], + "id": "194521489", + }, + {"version": "1.1", "result": True, "error": None, "id": "194521489"}, + ) + log.debug( + "Loading configuration file /adasd/asdasd/qeqwe/qwrqwrqwr/sdgsdgsdg/werwerwer/dfgerert/ertertert/ertetert/werwerwer" + ) + log.error("Unable to find 'pomelo' in database!") + log.info("POST /jsonrpc/ 200 65532") + log.info("POST /admin/ 401 42234") + log.warning("password was rejected for admin site.") + + def divide() -> None: + number = 1 + divisor = 0 + foos = ["foo"] * 100 + log.debug("in divide") + try: + number / divisor + except: + log.exception("An error of some kind occurred!") + + divide() + sleep(1) + log.critical("Out of memory!") + log.info("Server exited with code=-1") + log.info("[bold]EXITING...[/bold]", extra=dict(markup=True)) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/markup.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/markup.py new file mode 100644 index 0000000000000000000000000000000000000000..f6171878f823183ee8f77195b3e944be222006dc --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/markup.py @@ -0,0 +1,251 @@ +import re +from ast import literal_eval +from operator import attrgetter +from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union + +from ._emoji_replace import _emoji_replace +from .emoji import EmojiVariant +from .errors import MarkupError +from .style import Style +from .text import Span, Text + +RE_TAGS = re.compile( + r"""((\\*)\[([a-z#/@][^[]*?)])""", + re.VERBOSE, +) + +RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$") + + +class Tag(NamedTuple): + """A tag in console markup.""" + + name: str + """The tag name. e.g. 'bold'.""" + parameters: Optional[str] + """Any additional parameters after the name.""" + + def __str__(self) -> str: + return ( + self.name if self.parameters is None else f"{self.name} {self.parameters}" + ) + + @property + def markup(self) -> str: + """Get the string representation of this tag.""" + return ( + f"[{self.name}]" + if self.parameters is None + else f"[{self.name}={self.parameters}]" + ) + + +_ReStringMatch = Match[str] # regex match object +_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub +_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re + + +def escape( + markup: str, + _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub, +) -> str: + """Escapes text so that it won't be interpreted as markup. + + Args: + markup (str): Content to be inserted in to markup. + + Returns: + str: Markup with square brackets escaped. + """ + + def escape_backslashes(match: Match[str]) -> str: + """Called by re.sub replace matches.""" + backslashes, text = match.groups() + return f"{backslashes}{backslashes}\\{text}" + + markup = _escape(escape_backslashes, markup) + if markup.endswith("\\") and not markup.endswith("\\\\"): + return markup + "\\" + + return markup + + +def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]: + """Parse markup in to an iterable of tuples of (position, text, tag). + + Args: + markup (str): A string containing console markup + + """ + position = 0 + _divmod = divmod + _Tag = Tag + for match in RE_TAGS.finditer(markup): + full_text, escapes, tag_text = match.groups() + start, end = match.span() + if start > position: + yield start, markup[position:start], None + if escapes: + backslashes, escaped = _divmod(len(escapes), 2) + if backslashes: + # Literal backslashes + yield start, "\\" * backslashes, None + start += backslashes * 2 + if escaped: + # Escape of tag + yield start, full_text[len(escapes) :], None + position = end + continue + text, equals, parameters = tag_text.partition("=") + yield start, None, _Tag(text, parameters if equals else None) + position = end + if position < len(markup): + yield position, markup[position:], None + + +def render( + markup: str, + style: Union[str, Style] = "", + emoji: bool = True, + emoji_variant: Optional[EmojiVariant] = None, +) -> Text: + """Render console markup in to a Text instance. + + Args: + markup (str): A string containing console markup. + style: (Union[str, Style]): The style to use. + emoji (bool, optional): Also render emoji code. Defaults to True. + emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None. + + + Raises: + MarkupError: If there is a syntax error in the markup. + + Returns: + Text: A test instance. + """ + emoji_replace = _emoji_replace + if "[" not in markup: + return Text( + emoji_replace(markup, default_variant=emoji_variant) if emoji else markup, + style=style, + ) + text = Text(style=style) + append = text.append + normalize = Style.normalize + + style_stack: List[Tuple[int, Tag]] = [] + pop = style_stack.pop + + spans: List[Span] = [] + append_span = spans.append + + _Span = Span + _Tag = Tag + + def pop_style(style_name: str) -> Tuple[int, Tag]: + """Pop tag matching given style name.""" + for index, (_, tag) in enumerate(reversed(style_stack), 1): + if tag.name == style_name: + return pop(-index) + raise KeyError(style_name) + + for position, plain_text, tag in _parse(markup): + if plain_text is not None: + # Handle open brace escapes, where the brace is not part of a tag. + plain_text = plain_text.replace("\\[", "[") + append(emoji_replace(plain_text) if emoji else plain_text) + elif tag is not None: + if tag.name.startswith("/"): # Closing tag + style_name = tag.name[1:].strip() + + if style_name: # explicit close + style_name = normalize(style_name) + try: + start, open_tag = pop_style(style_name) + except KeyError: + raise MarkupError( + f"closing tag '{tag.markup}' at position {position} doesn't match any open tag" + ) from None + else: # implicit close + try: + start, open_tag = pop() + except IndexError: + raise MarkupError( + f"closing tag '[/]' at position {position} has nothing to close" + ) from None + + if open_tag.name.startswith("@"): + if open_tag.parameters: + handler_name = "" + parameters = open_tag.parameters.strip() + handler_match = RE_HANDLER.match(parameters) + if handler_match is not None: + handler_name, match_parameters = handler_match.groups() + parameters = ( + "()" if match_parameters is None else match_parameters + ) + + try: + meta_params = literal_eval(parameters) + except SyntaxError as error: + raise MarkupError( + f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}" + ) + except Exception as error: + raise MarkupError( + f"error parsing {open_tag.parameters!r}; {error}" + ) from None + + if handler_name: + meta_params = ( + handler_name, + meta_params + if isinstance(meta_params, tuple) + else (meta_params,), + ) + + else: + meta_params = () + + append_span( + _Span( + start, len(text), Style(meta={open_tag.name: meta_params}) + ) + ) + else: + append_span(_Span(start, len(text), str(open_tag))) + + else: # Opening tag + normalized_tag = _Tag(normalize(tag.name), tag.parameters) + style_stack.append((len(text), normalized_tag)) + + text_length = len(text) + while style_stack: + start, tag = style_stack.pop() + style = str(tag) + if style: + append_span(_Span(start, text_length, style)) + + text.spans = sorted(spans[::-1], key=attrgetter("start")) + return text + + +if __name__ == "__main__": # pragma: no cover + MARKUP = [ + "[red]Hello World[/red]", + "[magenta]Hello [b]World[/b]", + "[bold]Bold[italic] bold and italic [/bold]italic[/italic]", + "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog", + ":warning-emoji: [bold red blink] DANGER![/]", + ] + + from pip._vendor.rich import print + from pip._vendor.rich.table import Table + + grid = Table("Markup", "Result", padding=(0, 1)) + + for markup in MARKUP: + grid.add_row(Text(markup), markup) + + print(grid) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/progress.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/progress.py new file mode 100644 index 0000000000000000000000000000000000000000..ec086d9885d6aec8ae736b2cdc6c98849aa1dbdb --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/progress.py @@ -0,0 +1,1715 @@ +import io +import sys +import typing +import warnings +from abc import ABC, abstractmethod +from collections import deque +from dataclasses import dataclass, field +from datetime import timedelta +from io import RawIOBase, UnsupportedOperation +from math import ceil +from mmap import mmap +from operator import length_hint +from os import PathLike, stat +from threading import Event, RLock, Thread +from types import TracebackType +from typing import ( + Any, + BinaryIO, + Callable, + ContextManager, + Deque, + Dict, + Generic, + Iterable, + List, + NamedTuple, + NewType, + Optional, + Sequence, + TextIO, + Tuple, + Type, + TypeVar, + Union, +) + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from pip._vendor.typing_extensions import Literal # pragma: no cover + +if sys.version_info >= (3, 11): + from typing import Self +else: + from pip._vendor.typing_extensions import Self # pragma: no cover + +from . import filesize, get_console +from .console import Console, Group, JustifyMethod, RenderableType +from .highlighter import Highlighter +from .jupyter import JupyterMixin +from .live import Live +from .progress_bar import ProgressBar +from .spinner import Spinner +from .style import StyleType +from .table import Column, Table +from .text import Text, TextType + +TaskID = NewType("TaskID", int) + +ProgressType = TypeVar("ProgressType") + +GetTimeCallable = Callable[[], float] + + +_I = typing.TypeVar("_I", TextIO, BinaryIO) + + +class _TrackThread(Thread): + """A thread to periodically update progress.""" + + def __init__(self, progress: "Progress", task_id: "TaskID", update_period: float): + self.progress = progress + self.task_id = task_id + self.update_period = update_period + self.done = Event() + + self.completed = 0 + super().__init__(daemon=True) + + def run(self) -> None: + task_id = self.task_id + advance = self.progress.advance + update_period = self.update_period + last_completed = 0 + wait = self.done.wait + while not wait(update_period) and self.progress.live.is_started: + completed = self.completed + if last_completed != completed: + advance(task_id, completed - last_completed) + last_completed = completed + + self.progress.update(self.task_id, completed=self.completed, refresh=True) + + def __enter__(self) -> "_TrackThread": + self.start() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.done.set() + self.join() + + +def track( + sequence: Union[Sequence[ProgressType], Iterable[ProgressType]], + description: str = "Working...", + total: Optional[float] = None, + completed: int = 0, + auto_refresh: bool = True, + console: Optional[Console] = None, + transient: bool = False, + get_time: Optional[Callable[[], float]] = None, + refresh_per_second: float = 10, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + update_period: float = 0.1, + disable: bool = False, + show_speed: bool = True, +) -> Iterable[ProgressType]: + """Track progress by iterating over a sequence. + + Args: + sequence (Iterable[ProgressType]): A sequence (must support "len") you wish to iterate over. + description (str, optional): Description of task show next to progress bar. Defaults to "Working". + total: (float, optional): Total number of steps. Default is len(sequence). + completed (int, optional): Number of steps completed so far. Defaults to 0. + auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. + transient: (bool, optional): Clear the progress on exit. Defaults to False. + console (Console, optional): Console to write to. Default creates internal Console instance. + refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. + style (StyleType, optional): Style for the bar background. Defaults to "bar.back". + complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". + finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished". + pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". + update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1. + disable (bool, optional): Disable display of progress. + show_speed (bool, optional): Show speed if total isn't known. Defaults to True. + Returns: + Iterable[ProgressType]: An iterable of the values in the sequence. + + """ + + columns: List["ProgressColumn"] = ( + [TextColumn("[progress.description]{task.description}")] if description else [] + ) + columns.extend( + ( + BarColumn( + style=style, + complete_style=complete_style, + finished_style=finished_style, + pulse_style=pulse_style, + ), + TaskProgressColumn(show_speed=show_speed), + TimeRemainingColumn(elapsed_when_finished=True), + ) + ) + progress = Progress( + *columns, + auto_refresh=auto_refresh, + console=console, + transient=transient, + get_time=get_time, + refresh_per_second=refresh_per_second or 10, + disable=disable, + ) + + with progress: + yield from progress.track( + sequence, + total=total, + completed=completed, + description=description, + update_period=update_period, + ) + + +class _Reader(RawIOBase, BinaryIO): + """A reader that tracks progress while it's being read from.""" + + def __init__( + self, + handle: BinaryIO, + progress: "Progress", + task: TaskID, + close_handle: bool = True, + ) -> None: + self.handle = handle + self.progress = progress + self.task = task + self.close_handle = close_handle + self._closed = False + + def __enter__(self) -> "_Reader": + self.handle.__enter__() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.close() + + def __iter__(self) -> BinaryIO: + return self + + def __next__(self) -> bytes: + line = next(self.handle) + self.progress.advance(self.task, advance=len(line)) + return line + + @property + def closed(self) -> bool: + return self._closed + + def fileno(self) -> int: + return self.handle.fileno() + + def isatty(self) -> bool: + return self.handle.isatty() + + @property + def mode(self) -> str: + return self.handle.mode + + @property + def name(self) -> str: + return self.handle.name + + def readable(self) -> bool: + return self.handle.readable() + + def seekable(self) -> bool: + return self.handle.seekable() + + def writable(self) -> bool: + return False + + def read(self, size: int = -1) -> bytes: + block = self.handle.read(size) + self.progress.advance(self.task, advance=len(block)) + return block + + def readinto(self, b: Union[bytearray, memoryview, mmap]): # type: ignore[no-untyped-def, override] + n = self.handle.readinto(b) # type: ignore[attr-defined] + self.progress.advance(self.task, advance=n) + return n + + def readline(self, size: int = -1) -> bytes: # type: ignore[override] + line = self.handle.readline(size) + self.progress.advance(self.task, advance=len(line)) + return line + + def readlines(self, hint: int = -1) -> List[bytes]: + lines = self.handle.readlines(hint) + self.progress.advance(self.task, advance=sum(map(len, lines))) + return lines + + def close(self) -> None: + if self.close_handle: + self.handle.close() + self._closed = True + + def seek(self, offset: int, whence: int = 0) -> int: + pos = self.handle.seek(offset, whence) + self.progress.update(self.task, completed=pos) + return pos + + def tell(self) -> int: + return self.handle.tell() + + def write(self, s: Any) -> int: + raise UnsupportedOperation("write") + + def writelines(self, lines: Iterable[Any]) -> None: + raise UnsupportedOperation("writelines") + + +class _ReadContext(ContextManager[_I], Generic[_I]): + """A utility class to handle a context for both a reader and a progress.""" + + def __init__(self, progress: "Progress", reader: _I) -> None: + self.progress = progress + self.reader: _I = reader + + def __enter__(self) -> _I: + self.progress.start() + return self.reader.__enter__() + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.progress.stop() + self.reader.__exit__(exc_type, exc_val, exc_tb) + + +def wrap_file( + file: BinaryIO, + total: int, + *, + description: str = "Reading...", + auto_refresh: bool = True, + console: Optional[Console] = None, + transient: bool = False, + get_time: Optional[Callable[[], float]] = None, + refresh_per_second: float = 10, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + disable: bool = False, +) -> ContextManager[BinaryIO]: + """Read bytes from a file while tracking progress. + + Args: + file (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode. + total (int): Total number of bytes to read. + description (str, optional): Description of task show next to progress bar. Defaults to "Reading". + auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. + transient: (bool, optional): Clear the progress on exit. Defaults to False. + console (Console, optional): Console to write to. Default creates internal Console instance. + refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. + style (StyleType, optional): Style for the bar background. Defaults to "bar.back". + complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". + finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished". + pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". + disable (bool, optional): Disable display of progress. + Returns: + ContextManager[BinaryIO]: A context manager yielding a progress reader. + + """ + + columns: List["ProgressColumn"] = ( + [TextColumn("[progress.description]{task.description}")] if description else [] + ) + columns.extend( + ( + BarColumn( + style=style, + complete_style=complete_style, + finished_style=finished_style, + pulse_style=pulse_style, + ), + DownloadColumn(), + TimeRemainingColumn(), + ) + ) + progress = Progress( + *columns, + auto_refresh=auto_refresh, + console=console, + transient=transient, + get_time=get_time, + refresh_per_second=refresh_per_second or 10, + disable=disable, + ) + + reader = progress.wrap_file(file, total=total, description=description) + return _ReadContext(progress, reader) + + +@typing.overload +def open( + file: Union[str, "PathLike[str]", bytes], + mode: Union[Literal["rt"], Literal["r"]], + buffering: int = -1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + *, + total: Optional[int] = None, + description: str = "Reading...", + auto_refresh: bool = True, + console: Optional[Console] = None, + transient: bool = False, + get_time: Optional[Callable[[], float]] = None, + refresh_per_second: float = 10, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + disable: bool = False, +) -> ContextManager[TextIO]: + pass + + +@typing.overload +def open( + file: Union[str, "PathLike[str]", bytes], + mode: Literal["rb"], + buffering: int = -1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + *, + total: Optional[int] = None, + description: str = "Reading...", + auto_refresh: bool = True, + console: Optional[Console] = None, + transient: bool = False, + get_time: Optional[Callable[[], float]] = None, + refresh_per_second: float = 10, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + disable: bool = False, +) -> ContextManager[BinaryIO]: + pass + + +def open( + file: Union[str, "PathLike[str]", bytes], + mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r", + buffering: int = -1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + *, + total: Optional[int] = None, + description: str = "Reading...", + auto_refresh: bool = True, + console: Optional[Console] = None, + transient: bool = False, + get_time: Optional[Callable[[], float]] = None, + refresh_per_second: float = 10, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + disable: bool = False, +) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]: + """Read bytes from a file while tracking progress. + + Args: + path (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode. + mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt". + buffering (int): The buffering strategy to use, see :func:`io.open`. + encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`. + errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`. + newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open` + total: (int, optional): Total number of bytes to read. Must be provided if reading from a file handle. Default for a path is os.stat(file).st_size. + description (str, optional): Description of task show next to progress bar. Defaults to "Reading". + auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. + transient: (bool, optional): Clear the progress on exit. Defaults to False. + console (Console, optional): Console to write to. Default creates internal Console instance. + refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. + style (StyleType, optional): Style for the bar background. Defaults to "bar.back". + complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". + finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished". + pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". + disable (bool, optional): Disable display of progress. + encoding (str, optional): The encoding to use when reading in text mode. + + Returns: + ContextManager[BinaryIO]: A context manager yielding a progress reader. + + """ + + columns: List["ProgressColumn"] = ( + [TextColumn("[progress.description]{task.description}")] if description else [] + ) + columns.extend( + ( + BarColumn( + style=style, + complete_style=complete_style, + finished_style=finished_style, + pulse_style=pulse_style, + ), + DownloadColumn(), + TimeRemainingColumn(), + ) + ) + progress = Progress( + *columns, + auto_refresh=auto_refresh, + console=console, + transient=transient, + get_time=get_time, + refresh_per_second=refresh_per_second or 10, + disable=disable, + ) + + reader = progress.open( + file, + mode=mode, + buffering=buffering, + encoding=encoding, + errors=errors, + newline=newline, + total=total, + description=description, + ) + return _ReadContext(progress, reader) # type: ignore[return-value, type-var] + + +class ProgressColumn(ABC): + """Base class for a widget to use in progress display.""" + + max_refresh: Optional[float] = None + + def __init__(self, table_column: Optional[Column] = None) -> None: + self._table_column = table_column + self._renderable_cache: Dict[TaskID, Tuple[float, RenderableType]] = {} + self._update_time: Optional[float] = None + + def get_table_column(self) -> Column: + """Get a table column, used to build tasks table.""" + return self._table_column or Column() + + def __call__(self, task: "Task") -> RenderableType: + """Called by the Progress object to return a renderable for the given task. + + Args: + task (Task): An object containing information regarding the task. + + Returns: + RenderableType: Anything renderable (including str). + """ + current_time = task.get_time() + if self.max_refresh is not None and not task.completed: + try: + timestamp, renderable = self._renderable_cache[task.id] + except KeyError: + pass + else: + if timestamp + self.max_refresh > current_time: + return renderable + + renderable = self.render(task) + self._renderable_cache[task.id] = (current_time, renderable) + return renderable + + @abstractmethod + def render(self, task: "Task") -> RenderableType: + """Should return a renderable object.""" + + +class RenderableColumn(ProgressColumn): + """A column to insert an arbitrary column. + + Args: + renderable (RenderableType, optional): Any renderable. Defaults to empty string. + """ + + def __init__( + self, renderable: RenderableType = "", *, table_column: Optional[Column] = None + ): + self.renderable = renderable + super().__init__(table_column=table_column) + + def render(self, task: "Task") -> RenderableType: + return self.renderable + + +class SpinnerColumn(ProgressColumn): + """A column with a 'spinner' animation. + + Args: + spinner_name (str, optional): Name of spinner animation. Defaults to "dots". + style (StyleType, optional): Style of spinner. Defaults to "progress.spinner". + speed (float, optional): Speed factor of spinner. Defaults to 1.0. + finished_text (TextType, optional): Text used when task is finished. Defaults to " ". + """ + + def __init__( + self, + spinner_name: str = "dots", + style: Optional[StyleType] = "progress.spinner", + speed: float = 1.0, + finished_text: TextType = " ", + table_column: Optional[Column] = None, + ): + self.spinner = Spinner(spinner_name, style=style, speed=speed) + self.finished_text = ( + Text.from_markup(finished_text) + if isinstance(finished_text, str) + else finished_text + ) + super().__init__(table_column=table_column) + + def set_spinner( + self, + spinner_name: str, + spinner_style: Optional[StyleType] = "progress.spinner", + speed: float = 1.0, + ) -> None: + """Set a new spinner. + + Args: + spinner_name (str): Spinner name, see python -m rich.spinner. + spinner_style (Optional[StyleType], optional): Spinner style. Defaults to "progress.spinner". + speed (float, optional): Speed factor of spinner. Defaults to 1.0. + """ + self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed) + + def render(self, task: "Task") -> RenderableType: + text = ( + self.finished_text + if task.finished + else self.spinner.render(task.get_time()) + ) + return text + + +class TextColumn(ProgressColumn): + """A column containing text.""" + + def __init__( + self, + text_format: str, + style: StyleType = "none", + justify: JustifyMethod = "left", + markup: bool = True, + highlighter: Optional[Highlighter] = None, + table_column: Optional[Column] = None, + ) -> None: + self.text_format = text_format + self.justify: JustifyMethod = justify + self.style = style + self.markup = markup + self.highlighter = highlighter + super().__init__(table_column=table_column or Column(no_wrap=True)) + + def render(self, task: "Task") -> Text: + _text = self.text_format.format(task=task) + if self.markup: + text = Text.from_markup(_text, style=self.style, justify=self.justify) + else: + text = Text(_text, style=self.style, justify=self.justify) + if self.highlighter: + self.highlighter.highlight(text) + return text + + +class BarColumn(ProgressColumn): + """Renders a visual progress bar. + + Args: + bar_width (Optional[int], optional): Width of bar or None for full width. Defaults to 40. + style (StyleType, optional): Style for the bar background. Defaults to "bar.back". + complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". + finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished". + pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". + """ + + def __init__( + self, + bar_width: Optional[int] = 40, + style: StyleType = "bar.back", + complete_style: StyleType = "bar.complete", + finished_style: StyleType = "bar.finished", + pulse_style: StyleType = "bar.pulse", + table_column: Optional[Column] = None, + ) -> None: + self.bar_width = bar_width + self.style = style + self.complete_style = complete_style + self.finished_style = finished_style + self.pulse_style = pulse_style + super().__init__(table_column=table_column) + + def render(self, task: "Task") -> ProgressBar: + """Gets a progress bar widget for a task.""" + return ProgressBar( + total=max(0, task.total) if task.total is not None else None, + completed=max(0, task.completed), + width=None if self.bar_width is None else max(1, self.bar_width), + pulse=not task.started, + animation_time=task.get_time(), + style=self.style, + complete_style=self.complete_style, + finished_style=self.finished_style, + pulse_style=self.pulse_style, + ) + + +class TimeElapsedColumn(ProgressColumn): + """Renders time elapsed.""" + + def render(self, task: "Task") -> Text: + """Show time elapsed.""" + elapsed = task.finished_time if task.finished else task.elapsed + if elapsed is None: + return Text("-:--:--", style="progress.elapsed") + delta = timedelta(seconds=max(0, int(elapsed))) + return Text(str(delta), style="progress.elapsed") + + +class TaskProgressColumn(TextColumn): + """Show task progress as a percentage. + + Args: + text_format (str, optional): Format for percentage display. Defaults to "[progress.percentage]{task.percentage:>3.0f}%". + text_format_no_percentage (str, optional): Format if percentage is unknown. Defaults to "". + style (StyleType, optional): Style of output. Defaults to "none". + justify (JustifyMethod, optional): Text justification. Defaults to "left". + markup (bool, optional): Enable markup. Defaults to True. + highlighter (Optional[Highlighter], optional): Highlighter to apply to output. Defaults to None. + table_column (Optional[Column], optional): Table Column to use. Defaults to None. + show_speed (bool, optional): Show speed if total is unknown. Defaults to False. + """ + + def __init__( + self, + text_format: str = "[progress.percentage]{task.percentage:>3.0f}%", + text_format_no_percentage: str = "", + style: StyleType = "none", + justify: JustifyMethod = "left", + markup: bool = True, + highlighter: Optional[Highlighter] = None, + table_column: Optional[Column] = None, + show_speed: bool = False, + ) -> None: + self.text_format_no_percentage = text_format_no_percentage + self.show_speed = show_speed + super().__init__( + text_format=text_format, + style=style, + justify=justify, + markup=markup, + highlighter=highlighter, + table_column=table_column, + ) + + @classmethod + def render_speed(cls, speed: Optional[float]) -> Text: + """Render the speed in iterations per second. + + Args: + task (Task): A Task object. + + Returns: + Text: Text object containing the task speed. + """ + if speed is None: + return Text("", style="progress.percentage") + unit, suffix = filesize.pick_unit_and_suffix( + int(speed), + ["", "×10³", "×10⁶", "×10⁹", "×10¹²"], + 1000, + ) + data_speed = speed / unit + return Text(f"{data_speed:.1f}{suffix} it/s", style="progress.percentage") + + def render(self, task: "Task") -> Text: + if task.total is None and self.show_speed: + return self.render_speed(task.finished_speed or task.speed) + text_format = ( + self.text_format_no_percentage if task.total is None else self.text_format + ) + _text = text_format.format(task=task) + if self.markup: + text = Text.from_markup(_text, style=self.style, justify=self.justify) + else: + text = Text(_text, style=self.style, justify=self.justify) + if self.highlighter: + self.highlighter.highlight(text) + return text + + +class TimeRemainingColumn(ProgressColumn): + """Renders estimated time remaining. + + Args: + compact (bool, optional): Render MM:SS when time remaining is less than an hour. Defaults to False. + elapsed_when_finished (bool, optional): Render time elapsed when the task is finished. Defaults to False. + """ + + # Only refresh twice a second to prevent jitter + max_refresh = 0.5 + + def __init__( + self, + compact: bool = False, + elapsed_when_finished: bool = False, + table_column: Optional[Column] = None, + ): + self.compact = compact + self.elapsed_when_finished = elapsed_when_finished + super().__init__(table_column=table_column) + + def render(self, task: "Task") -> Text: + """Show time remaining.""" + if self.elapsed_when_finished and task.finished: + task_time = task.finished_time + style = "progress.elapsed" + else: + task_time = task.time_remaining + style = "progress.remaining" + + if task.total is None: + return Text("", style=style) + + if task_time is None: + return Text("--:--" if self.compact else "-:--:--", style=style) + + # Based on https://github.com/tqdm/tqdm/blob/master/tqdm/std.py + minutes, seconds = divmod(int(task_time), 60) + hours, minutes = divmod(minutes, 60) + + if self.compact and not hours: + formatted = f"{minutes:02d}:{seconds:02d}" + else: + formatted = f"{hours:d}:{minutes:02d}:{seconds:02d}" + + return Text(formatted, style=style) + + +class FileSizeColumn(ProgressColumn): + """Renders completed filesize.""" + + def render(self, task: "Task") -> Text: + """Show data completed.""" + data_size = filesize.decimal(int(task.completed)) + return Text(data_size, style="progress.filesize") + + +class TotalFileSizeColumn(ProgressColumn): + """Renders total filesize.""" + + def render(self, task: "Task") -> Text: + """Show data completed.""" + data_size = filesize.decimal(int(task.total)) if task.total is not None else "" + return Text(data_size, style="progress.filesize.total") + + +class MofNCompleteColumn(ProgressColumn): + """Renders completed count/total, e.g. ' 10/1000'. + + Best for bounded tasks with int quantities. + + Space pads the completed count so that progress length does not change as task progresses + past powers of 10. + + Args: + separator (str, optional): Text to separate completed and total values. Defaults to "/". + """ + + def __init__(self, separator: str = "/", table_column: Optional[Column] = None): + self.separator = separator + super().__init__(table_column=table_column) + + def render(self, task: "Task") -> Text: + """Show completed/total.""" + completed = int(task.completed) + total = int(task.total) if task.total is not None else "?" + total_width = len(str(total)) + return Text( + f"{completed:{total_width}d}{self.separator}{total}", + style="progress.download", + ) + + +class DownloadColumn(ProgressColumn): + """Renders file size downloaded and total, e.g. '0.5/2.3 GB'. + + Args: + binary_units (bool, optional): Use binary units, KiB, MiB etc. Defaults to False. + """ + + def __init__( + self, binary_units: bool = False, table_column: Optional[Column] = None + ) -> None: + self.binary_units = binary_units + super().__init__(table_column=table_column) + + def render(self, task: "Task") -> Text: + """Calculate common unit for completed and total.""" + completed = int(task.completed) + + unit_and_suffix_calculation_base = ( + int(task.total) if task.total is not None else completed + ) + if self.binary_units: + unit, suffix = filesize.pick_unit_and_suffix( + unit_and_suffix_calculation_base, + ["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"], + 1024, + ) + else: + unit, suffix = filesize.pick_unit_and_suffix( + unit_and_suffix_calculation_base, + ["bytes", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"], + 1000, + ) + precision = 0 if unit == 1 else 1 + + completed_ratio = completed / unit + completed_str = f"{completed_ratio:,.{precision}f}" + + if task.total is not None: + total = int(task.total) + total_ratio = total / unit + total_str = f"{total_ratio:,.{precision}f}" + else: + total_str = "?" + + download_status = f"{completed_str}/{total_str} {suffix}" + download_text = Text(download_status, style="progress.download") + return download_text + + +class TransferSpeedColumn(ProgressColumn): + """Renders human readable transfer speed.""" + + def render(self, task: "Task") -> Text: + """Show data transfer speed.""" + speed = task.finished_speed or task.speed + if speed is None: + return Text("?", style="progress.data.speed") + data_speed = filesize.decimal(int(speed)) + return Text(f"{data_speed}/s", style="progress.data.speed") + + +class ProgressSample(NamedTuple): + """Sample of progress for a given time.""" + + timestamp: float + """Timestamp of sample.""" + completed: float + """Number of steps completed.""" + + +@dataclass +class Task: + """Information regarding a progress task. + + This object should be considered read-only outside of the :class:`~Progress` class. + + """ + + id: TaskID + """Task ID associated with this task (used in Progress methods).""" + + description: str + """str: Description of the task.""" + + total: Optional[float] + """Optional[float]: Total number of steps in this task.""" + + completed: float + """float: Number of steps completed""" + + _get_time: GetTimeCallable + """Callable to get the current time.""" + + finished_time: Optional[float] = None + """float: Time task was finished.""" + + visible: bool = True + """bool: Indicates if this task is visible in the progress display.""" + + fields: Dict[str, Any] = field(default_factory=dict) + """dict: Arbitrary fields passed in via Progress.update.""" + + start_time: Optional[float] = field(default=None, init=False, repr=False) + """Optional[float]: Time this task was started, or None if not started.""" + + stop_time: Optional[float] = field(default=None, init=False, repr=False) + """Optional[float]: Time this task was stopped, or None if not stopped.""" + + finished_speed: Optional[float] = None + """Optional[float]: The last speed for a finished task.""" + + _progress: Deque[ProgressSample] = field( + default_factory=lambda: deque(maxlen=1000), init=False, repr=False + ) + + _lock: RLock = field(repr=False, default_factory=RLock) + """Thread lock.""" + + def get_time(self) -> float: + """float: Get the current time, in seconds.""" + return self._get_time() + + @property + def started(self) -> bool: + """bool: Check if the task as started.""" + return self.start_time is not None + + @property + def remaining(self) -> Optional[float]: + """Optional[float]: Get the number of steps remaining, if a non-None total was set.""" + if self.total is None: + return None + return self.total - self.completed + + @property + def elapsed(self) -> Optional[float]: + """Optional[float]: Time elapsed since task was started, or ``None`` if the task hasn't started.""" + if self.start_time is None: + return None + if self.stop_time is not None: + return self.stop_time - self.start_time + return self.get_time() - self.start_time + + @property + def finished(self) -> bool: + """Check if the task has finished.""" + return self.finished_time is not None + + @property + def percentage(self) -> float: + """float: Get progress of task as a percentage. If a None total was set, returns 0""" + if not self.total: + return 0.0 + completed = (self.completed / self.total) * 100.0 + completed = min(100.0, max(0.0, completed)) + return completed + + @property + def speed(self) -> Optional[float]: + """Optional[float]: Get the estimated speed in steps per second.""" + if self.start_time is None: + return None + with self._lock: + progress = self._progress + if not progress: + return None + total_time = progress[-1].timestamp - progress[0].timestamp + if total_time == 0: + return None + iter_progress = iter(progress) + next(iter_progress) + total_completed = sum(sample.completed for sample in iter_progress) + speed = total_completed / total_time + return speed + + @property + def time_remaining(self) -> Optional[float]: + """Optional[float]: Get estimated time to completion, or ``None`` if no data.""" + if self.finished: + return 0.0 + speed = self.speed + if not speed: + return None + remaining = self.remaining + if remaining is None: + return None + estimate = ceil(remaining / speed) + return estimate + + def _reset(self) -> None: + """Reset progress.""" + self._progress.clear() + self.finished_time = None + self.finished_speed = None + + +class Progress(JupyterMixin): + """Renders an auto-updating progress bar(s). + + Args: + console (Console, optional): Optional Console instance. Defaults to an internal Console instance writing to stdout. + auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`. + refresh_per_second (Optional[float], optional): Number of times per second to refresh the progress information or None to use default (10). Defaults to None. + speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30. + transient: (bool, optional): Clear the progress on exit. Defaults to False. + redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True. + redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True. + get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None. + disable (bool, optional): Disable progress display. Defaults to False + expand (bool, optional): Expand tasks table to fit width. Defaults to False. + """ + + def __init__( + self, + *columns: Union[str, ProgressColumn], + console: Optional[Console] = None, + auto_refresh: bool = True, + refresh_per_second: float = 10, + speed_estimate_period: float = 30.0, + transient: bool = False, + redirect_stdout: bool = True, + redirect_stderr: bool = True, + get_time: Optional[GetTimeCallable] = None, + disable: bool = False, + expand: bool = False, + ) -> None: + assert refresh_per_second > 0, "refresh_per_second must be > 0" + self._lock = RLock() + self.columns = columns or self.get_default_columns() + self.speed_estimate_period = speed_estimate_period + + self.disable = disable + self.expand = expand + self._tasks: Dict[TaskID, Task] = {} + self._task_index: TaskID = TaskID(0) + self.live = Live( + console=console or get_console(), + auto_refresh=auto_refresh, + refresh_per_second=refresh_per_second, + transient=transient, + redirect_stdout=redirect_stdout, + redirect_stderr=redirect_stderr, + get_renderable=self.get_renderable, + ) + self.get_time = get_time or self.console.get_time + self.print = self.console.print + self.log = self.console.log + + @classmethod + def get_default_columns(cls) -> Tuple[ProgressColumn, ...]: + """Get the default columns used for a new Progress instance: + - a text column for the description (TextColumn) + - the bar itself (BarColumn) + - a text column showing completion percentage (TextColumn) + - an estimated-time-remaining column (TimeRemainingColumn) + If the Progress instance is created without passing a columns argument, + the default columns defined here will be used. + + You can also create a Progress instance using custom columns before + and/or after the defaults, as in this example: + + progress = Progress( + SpinnerColumn(), + *Progress.get_default_columns(), + "Elapsed:", + TimeElapsedColumn(), + ) + + This code shows the creation of a Progress display, containing + a spinner to the left, the default columns, and a labeled elapsed + time column. + """ + return ( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TaskProgressColumn(), + TimeRemainingColumn(), + ) + + @property + def console(self) -> Console: + return self.live.console + + @property + def tasks(self) -> List[Task]: + """Get a list of Task instances.""" + with self._lock: + return list(self._tasks.values()) + + @property + def task_ids(self) -> List[TaskID]: + """A list of task IDs.""" + with self._lock: + return list(self._tasks.keys()) + + @property + def finished(self) -> bool: + """Check if all tasks have been completed.""" + with self._lock: + if not self._tasks: + return True + return all(task.finished for task in self._tasks.values()) + + def start(self) -> None: + """Start the progress display.""" + if not self.disable: + self.live.start(refresh=True) + + def stop(self) -> None: + """Stop the progress display.""" + self.live.stop() + if not self.console.is_interactive and not self.console.is_jupyter: + self.console.print() + + def __enter__(self) -> Self: + self.start() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.stop() + + def track( + self, + sequence: Union[Iterable[ProgressType], Sequence[ProgressType]], + total: Optional[float] = None, + completed: int = 0, + task_id: Optional[TaskID] = None, + description: str = "Working...", + update_period: float = 0.1, + ) -> Iterable[ProgressType]: + """Track progress by iterating over a sequence. + + Args: + sequence (Sequence[ProgressType]): A sequence of values you want to iterate over and track progress. + total: (float, optional): Total number of steps. Default is len(sequence). + completed (int, optional): Number of steps completed so far. Defaults to 0. + task_id: (TaskID): Task to track. Default is new task. + description: (str, optional): Description of task, if new task is created. + update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1. + + Returns: + Iterable[ProgressType]: An iterable of values taken from the provided sequence. + """ + if total is None: + total = float(length_hint(sequence)) or None + + if task_id is None: + task_id = self.add_task(description, total=total, completed=completed) + else: + self.update(task_id, total=total, completed=completed) + + if self.live.auto_refresh: + with _TrackThread(self, task_id, update_period) as track_thread: + for value in sequence: + yield value + track_thread.completed += 1 + else: + advance = self.advance + refresh = self.refresh + for value in sequence: + yield value + advance(task_id, 1) + refresh() + + def wrap_file( + self, + file: BinaryIO, + total: Optional[int] = None, + *, + task_id: Optional[TaskID] = None, + description: str = "Reading...", + ) -> BinaryIO: + """Track progress file reading from a binary file. + + Args: + file (BinaryIO): A file-like object opened in binary mode. + total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given. + task_id (TaskID): Task to track. Default is new task. + description (str, optional): Description of task, if new task is created. + + Returns: + BinaryIO: A readable file-like object in binary mode. + + Raises: + ValueError: When no total value can be extracted from the arguments or the task. + """ + # attempt to recover the total from the task + total_bytes: Optional[float] = None + if total is not None: + total_bytes = total + elif task_id is not None: + with self._lock: + total_bytes = self._tasks[task_id].total + if total_bytes is None: + raise ValueError( + f"unable to get the total number of bytes, please specify 'total'" + ) + + # update total of task or create new task + if task_id is None: + task_id = self.add_task(description, total=total_bytes) + else: + self.update(task_id, total=total_bytes) + + return _Reader(file, self, task_id, close_handle=False) + + @typing.overload + def open( + self, + file: Union[str, "PathLike[str]", bytes], + mode: Literal["rb"], + buffering: int = -1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + *, + total: Optional[int] = None, + task_id: Optional[TaskID] = None, + description: str = "Reading...", + ) -> BinaryIO: + pass + + @typing.overload + def open( + self, + file: Union[str, "PathLike[str]", bytes], + mode: Union[Literal["r"], Literal["rt"]], + buffering: int = -1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + *, + total: Optional[int] = None, + task_id: Optional[TaskID] = None, + description: str = "Reading...", + ) -> TextIO: + pass + + def open( + self, + file: Union[str, "PathLike[str]", bytes], + mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r", + buffering: int = -1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + newline: Optional[str] = None, + *, + total: Optional[int] = None, + task_id: Optional[TaskID] = None, + description: str = "Reading...", + ) -> Union[BinaryIO, TextIO]: + """Track progress while reading from a binary file. + + Args: + path (Union[str, PathLike[str]]): The path to the file to read. + mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt". + buffering (int): The buffering strategy to use, see :func:`io.open`. + encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`. + errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`. + newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`. + total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used. + task_id (TaskID): Task to track. Default is new task. + description (str, optional): Description of task, if new task is created. + + Returns: + BinaryIO: A readable file-like object in binary mode. + + Raises: + ValueError: When an invalid mode is given. + """ + # normalize the mode (always rb, rt) + _mode = "".join(sorted(mode, reverse=False)) + if _mode not in ("br", "rt", "r"): + raise ValueError(f"invalid mode {mode!r}") + + # patch buffering to provide the same behaviour as the builtin `open` + line_buffering = buffering == 1 + if _mode == "br" and buffering == 1: + warnings.warn( + "line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used", + RuntimeWarning, + ) + buffering = -1 + elif _mode in ("rt", "r"): + if buffering == 0: + raise ValueError("can't have unbuffered text I/O") + elif buffering == 1: + buffering = -1 + + # attempt to get the total with `os.stat` + if total is None: + total = stat(file).st_size + + # update total of task or create new task + if task_id is None: + task_id = self.add_task(description, total=total) + else: + self.update(task_id, total=total) + + # open the file in binary mode, + handle = io.open(file, "rb", buffering=buffering) + reader = _Reader(handle, self, task_id, close_handle=True) + + # wrap the reader in a `TextIOWrapper` if text mode + if mode in ("r", "rt"): + return io.TextIOWrapper( + reader, + encoding=encoding, + errors=errors, + newline=newline, + line_buffering=line_buffering, + ) + + return reader + + def start_task(self, task_id: TaskID) -> None: + """Start a task. + + Starts a task (used when calculating elapsed time). You may need to call this manually, + if you called ``add_task`` with ``start=False``. + + Args: + task_id (TaskID): ID of task. + """ + with self._lock: + task = self._tasks[task_id] + if task.start_time is None: + task.start_time = self.get_time() + + def stop_task(self, task_id: TaskID) -> None: + """Stop a task. + + This will freeze the elapsed time on the task. + + Args: + task_id (TaskID): ID of task. + """ + with self._lock: + task = self._tasks[task_id] + current_time = self.get_time() + if task.start_time is None: + task.start_time = current_time + task.stop_time = current_time + + def update( + self, + task_id: TaskID, + *, + total: Optional[float] = None, + completed: Optional[float] = None, + advance: Optional[float] = None, + description: Optional[str] = None, + visible: Optional[bool] = None, + refresh: bool = False, + **fields: Any, + ) -> None: + """Update information associated with a task. + + Args: + task_id (TaskID): Task id (returned by add_task). + total (float, optional): Updates task.total if not None. + completed (float, optional): Updates task.completed if not None. + advance (float, optional): Add a value to task.completed if not None. + description (str, optional): Change task description if not None. + visible (bool, optional): Set visible flag if not None. + refresh (bool): Force a refresh of progress information. Default is False. + **fields (Any): Additional data fields required for rendering. + """ + with self._lock: + task = self._tasks[task_id] + completed_start = task.completed + + if total is not None and total != task.total: + task.total = total + task._reset() + if advance is not None: + task.completed += advance + if completed is not None: + task.completed = completed + if description is not None: + task.description = description + if visible is not None: + task.visible = visible + task.fields.update(fields) + update_completed = task.completed - completed_start + + current_time = self.get_time() + old_sample_time = current_time - self.speed_estimate_period + _progress = task._progress + + popleft = _progress.popleft + while _progress and _progress[0].timestamp < old_sample_time: + popleft() + if update_completed > 0: + _progress.append(ProgressSample(current_time, update_completed)) + if ( + task.total is not None + and task.completed >= task.total + and task.finished_time is None + ): + task.finished_time = task.elapsed + + if refresh: + self.refresh() + + def reset( + self, + task_id: TaskID, + *, + start: bool = True, + total: Optional[float] = None, + completed: int = 0, + visible: Optional[bool] = None, + description: Optional[str] = None, + **fields: Any, + ) -> None: + """Reset a task so completed is 0 and the clock is reset. + + Args: + task_id (TaskID): ID of task. + start (bool, optional): Start the task after reset. Defaults to True. + total (float, optional): New total steps in task, or None to use current total. Defaults to None. + completed (int, optional): Number of steps completed. Defaults to 0. + visible (bool, optional): Enable display of the task. Defaults to True. + description (str, optional): Change task description if not None. Defaults to None. + **fields (str): Additional data fields required for rendering. + """ + current_time = self.get_time() + with self._lock: + task = self._tasks[task_id] + task._reset() + task.start_time = current_time if start else None + if total is not None: + task.total = total + task.completed = completed + if visible is not None: + task.visible = visible + if fields: + task.fields = fields + if description is not None: + task.description = description + task.finished_time = None + self.refresh() + + def advance(self, task_id: TaskID, advance: float = 1) -> None: + """Advance task by a number of steps. + + Args: + task_id (TaskID): ID of task. + advance (float): Number of steps to advance. Default is 1. + """ + current_time = self.get_time() + with self._lock: + task = self._tasks[task_id] + completed_start = task.completed + task.completed += advance + update_completed = task.completed - completed_start + old_sample_time = current_time - self.speed_estimate_period + _progress = task._progress + + popleft = _progress.popleft + while _progress and _progress[0].timestamp < old_sample_time: + popleft() + while len(_progress) > 1000: + popleft() + _progress.append(ProgressSample(current_time, update_completed)) + if ( + task.total is not None + and task.completed >= task.total + and task.finished_time is None + ): + task.finished_time = task.elapsed + task.finished_speed = task.speed + + def refresh(self) -> None: + """Refresh (render) the progress information.""" + if not self.disable and self.live.is_started: + self.live.refresh() + + def get_renderable(self) -> RenderableType: + """Get a renderable for the progress display.""" + renderable = Group(*self.get_renderables()) + return renderable + + def get_renderables(self) -> Iterable[RenderableType]: + """Get a number of renderables for the progress display.""" + table = self.make_tasks_table(self.tasks) + yield table + + def make_tasks_table(self, tasks: Iterable[Task]) -> Table: + """Get a table to render the Progress display. + + Args: + tasks (Iterable[Task]): An iterable of Task instances, one per row of the table. + + Returns: + Table: A table instance. + """ + table_columns = ( + ( + Column(no_wrap=True) + if isinstance(_column, str) + else _column.get_table_column().copy() + ) + for _column in self.columns + ) + table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand) + + for task in tasks: + if task.visible: + table.add_row( + *( + ( + column.format(task=task) + if isinstance(column, str) + else column(task) + ) + for column in self.columns + ) + ) + return table + + def __rich__(self) -> RenderableType: + """Makes the Progress class itself renderable.""" + with self._lock: + return self.get_renderable() + + def add_task( + self, + description: str, + start: bool = True, + total: Optional[float] = 100.0, + completed: int = 0, + visible: bool = True, + **fields: Any, + ) -> TaskID: + """Add a new 'task' to the Progress display. + + Args: + description (str): A description of the task. + start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False, + you will need to call `start` manually. Defaults to True. + total (float, optional): Number of total steps in the progress if known. + Set to None to render a pulsing animation. Defaults to 100. + completed (int, optional): Number of steps completed so far. Defaults to 0. + visible (bool, optional): Enable display of the task. Defaults to True. + **fields (str): Additional data fields required for rendering. + + Returns: + TaskID: An ID you can use when calling `update`. + """ + with self._lock: + task = Task( + self._task_index, + description, + total, + completed, + visible=visible, + fields=fields, + _get_time=self.get_time, + _lock=self._lock, + ) + self._tasks[self._task_index] = task + if start: + self.start_task(self._task_index) + new_task_index = self._task_index + self._task_index = TaskID(int(self._task_index) + 1) + self.refresh() + return new_task_index + + def remove_task(self, task_id: TaskID) -> None: + """Delete a task if it exists. + + Args: + task_id (TaskID): A task ID. + + """ + with self._lock: + del self._tasks[task_id] + + +if __name__ == "__main__": # pragma: no coverage + import random + import time + + from .panel import Panel + from .rule import Rule + from .syntax import Syntax + from .table import Table + + syntax = Syntax( + '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + for value in iter_values: + yield False, previous_value + previous_value = value + yield True, previous_value''', + "python", + line_numbers=True, + ) + + table = Table("foo", "bar", "baz") + table.add_row("1", "2", "3") + + progress_renderables = [ + "Text may be printed while the progress bars are rendering.", + Panel("In fact, [i]any[/i] renderable will work"), + "Such as [magenta]tables[/]...", + table, + "Pretty printed structures...", + {"type": "example", "text": "Pretty printed"}, + "Syntax...", + syntax, + Rule("Give it a try!"), + ] + + from itertools import cycle + + examples = cycle(progress_renderables) + + console = Console(record=True) + + with Progress( + SpinnerColumn(), + *Progress.get_default_columns(), + TimeElapsedColumn(), + console=console, + transient=False, + ) as progress: + task1 = progress.add_task("[red]Downloading", total=1000) + task2 = progress.add_task("[green]Processing", total=1000) + task3 = progress.add_task("[yellow]Thinking", total=None) + + while not progress.finished: + progress.update(task1, advance=0.5) + progress.update(task2, advance=0.3) + time.sleep(0.01) + if random.randint(0, 100) < 1: + progress.log(next(examples)) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/py.typed b/llava/lib/python3.10/site-packages/pip/_vendor/rich/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/repr.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/repr.py new file mode 100644 index 0000000000000000000000000000000000000000..10efc427c35e9d7fe058809f8e7c278dff46db0f --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/repr.py @@ -0,0 +1,149 @@ +import inspect +from functools import partial +from typing import ( + Any, + Callable, + Iterable, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +T = TypeVar("T") + + +Result = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]] +RichReprResult = Result + + +class ReprError(Exception): + """An error occurred when attempting to build a repr.""" + + +@overload +def auto(cls: Optional[Type[T]]) -> Type[T]: + ... + + +@overload +def auto(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]: + ... + + +def auto( + cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None +) -> Union[Type[T], Callable[[Type[T]], Type[T]]]: + """Class decorator to create __repr__ from __rich_repr__""" + + def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]: + def auto_repr(self: T) -> str: + """Create repr string from __rich_repr__""" + repr_str: List[str] = [] + append = repr_str.append + + angular: bool = getattr(self.__rich_repr__, "angular", False) # type: ignore[attr-defined] + for arg in self.__rich_repr__(): # type: ignore[attr-defined] + if isinstance(arg, tuple): + if len(arg) == 1: + append(repr(arg[0])) + else: + key, value, *default = arg + if key is None: + append(repr(value)) + else: + if default and default[0] == value: + continue + append(f"{key}={value!r}") + else: + append(repr(arg)) + if angular: + return f"<{self.__class__.__name__} {' '.join(repr_str)}>" + else: + return f"{self.__class__.__name__}({', '.join(repr_str)})" + + def auto_rich_repr(self: Type[T]) -> Result: + """Auto generate __rich_rep__ from signature of __init__""" + try: + signature = inspect.signature(self.__init__) + for name, param in signature.parameters.items(): + if param.kind == param.POSITIONAL_ONLY: + yield getattr(self, name) + elif param.kind in ( + param.POSITIONAL_OR_KEYWORD, + param.KEYWORD_ONLY, + ): + if param.default is param.empty: + yield getattr(self, param.name) + else: + yield param.name, getattr(self, param.name), param.default + except Exception as error: + raise ReprError( + f"Failed to auto generate __rich_repr__; {error}" + ) from None + + if not hasattr(cls, "__rich_repr__"): + auto_rich_repr.__doc__ = "Build a rich repr" + cls.__rich_repr__ = auto_rich_repr # type: ignore[attr-defined] + + auto_repr.__doc__ = "Return repr(self)" + cls.__repr__ = auto_repr # type: ignore[assignment] + if angular is not None: + cls.__rich_repr__.angular = angular # type: ignore[attr-defined] + return cls + + if cls is None: + return partial(do_replace, angular=angular) + else: + return do_replace(cls, angular=angular) + + +@overload +def rich_repr(cls: Optional[Type[T]]) -> Type[T]: + ... + + +@overload +def rich_repr(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]: + ... + + +def rich_repr( + cls: Optional[Type[T]] = None, *, angular: bool = False +) -> Union[Type[T], Callable[[Type[T]], Type[T]]]: + if cls is None: + return auto(angular=angular) + else: + return auto(cls) + + +if __name__ == "__main__": + + @auto + class Foo: + def __rich_repr__(self) -> Result: + yield "foo" + yield "bar", {"shopping": ["eggs", "ham", "pineapple"]} + yield "buy", "hand sanitizer" + + foo = Foo() + from pip._vendor.rich.console import Console + + console = Console() + + console.rule("Standard repr") + console.print(foo) + + console.print(foo, width=60) + console.print(foo, width=30) + + console.rule("Angular repr") + Foo.__rich_repr__.angular = True # type: ignore[attr-defined] + + console.print(foo) + + console.print(foo, width=60) + console.print(foo, width=30) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/rule.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/rule.py new file mode 100644 index 0000000000000000000000000000000000000000..fd00ce6e4cea506f3ab08e6412d2eb6443ef582c --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/rule.py @@ -0,0 +1,130 @@ +from typing import Union + +from .align import AlignMethod +from .cells import cell_len, set_cell_size +from .console import Console, ConsoleOptions, RenderResult +from .jupyter import JupyterMixin +from .measure import Measurement +from .style import Style +from .text import Text + + +class Rule(JupyterMixin): + """A console renderable to draw a horizontal rule (line). + + Args: + title (Union[str, Text], optional): Text to render in the rule. Defaults to "". + characters (str, optional): Character(s) used to draw the line. Defaults to "─". + style (StyleType, optional): Style of Rule. Defaults to "rule.line". + end (str, optional): Character at end of Rule. defaults to "\\\\n" + align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center". + """ + + def __init__( + self, + title: Union[str, Text] = "", + *, + characters: str = "─", + style: Union[str, Style] = "rule.line", + end: str = "\n", + align: AlignMethod = "center", + ) -> None: + if cell_len(characters) < 1: + raise ValueError( + "'characters' argument must have a cell width of at least 1" + ) + if align not in ("left", "center", "right"): + raise ValueError( + f'invalid value for align, expected "left", "center", "right" (not {align!r})' + ) + self.title = title + self.characters = characters + self.style = style + self.end = end + self.align = align + + def __repr__(self) -> str: + return f"Rule({self.title!r}, {self.characters!r})" + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + width = options.max_width + + characters = ( + "-" + if (options.ascii_only and not self.characters.isascii()) + else self.characters + ) + + chars_len = cell_len(characters) + if not self.title: + yield self._rule_line(chars_len, width) + return + + if isinstance(self.title, Text): + title_text = self.title + else: + title_text = console.render_str(self.title, style="rule.text") + + title_text.plain = title_text.plain.replace("\n", " ") + title_text.expand_tabs() + + required_space = 4 if self.align == "center" else 2 + truncate_width = max(0, width - required_space) + if not truncate_width: + yield self._rule_line(chars_len, width) + return + + rule_text = Text(end=self.end) + if self.align == "center": + title_text.truncate(truncate_width, overflow="ellipsis") + side_width = (width - cell_len(title_text.plain)) // 2 + left = Text(characters * (side_width // chars_len + 1)) + left.truncate(side_width - 1) + right_length = width - cell_len(left.plain) - cell_len(title_text.plain) + right = Text(characters * (side_width // chars_len + 1)) + right.truncate(right_length) + rule_text.append(left.plain + " ", self.style) + rule_text.append(title_text) + rule_text.append(" " + right.plain, self.style) + elif self.align == "left": + title_text.truncate(truncate_width, overflow="ellipsis") + rule_text.append(title_text) + rule_text.append(" ") + rule_text.append(characters * (width - rule_text.cell_len), self.style) + elif self.align == "right": + title_text.truncate(truncate_width, overflow="ellipsis") + rule_text.append(characters * (width - title_text.cell_len - 1), self.style) + rule_text.append(" ") + rule_text.append(title_text) + + rule_text.plain = set_cell_size(rule_text.plain, width) + yield rule_text + + def _rule_line(self, chars_len: int, width: int) -> Text: + rule_text = Text(self.characters * ((width // chars_len) + 1), self.style) + rule_text.truncate(width) + rule_text.plain = set_cell_size(rule_text.plain, width) + return rule_text + + def __rich_measure__( + self, console: Console, options: ConsoleOptions + ) -> Measurement: + return Measurement(1, 1) + + +if __name__ == "__main__": # pragma: no cover + import sys + + from pip._vendor.rich.console import Console + + try: + text = sys.argv[1] + except IndexError: + text = "Hello, World" + console = Console() + console.print(Rule(title=text)) + + console = Console() + console.print(Rule("foo"), width=4) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/screen.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/screen.py new file mode 100644 index 0000000000000000000000000000000000000000..7f416e1e799abfbf62382456020cc8e59e5cf01f --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/screen.py @@ -0,0 +1,54 @@ +from typing import Optional, TYPE_CHECKING + +from .segment import Segment +from .style import StyleType +from ._loop import loop_last + + +if TYPE_CHECKING: + from .console import ( + Console, + ConsoleOptions, + RenderResult, + RenderableType, + Group, + ) + + +class Screen: + """A renderable that fills the terminal screen and crops excess. + + Args: + renderable (RenderableType): Child renderable. + style (StyleType, optional): Optional background style. Defaults to None. + """ + + renderable: "RenderableType" + + def __init__( + self, + *renderables: "RenderableType", + style: Optional[StyleType] = None, + application_mode: bool = False, + ) -> None: + from pip._vendor.rich.console import Group + + self.renderable = Group(*renderables) + self.style = style + self.application_mode = application_mode + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + width, height = options.size + style = console.get_style(self.style) if self.style else None + render_options = options.update(width=width, height=height) + lines = console.render_lines( + self.renderable or "", render_options, style=style, pad=True + ) + lines = Segment.set_shape(lines, width, height, style=style) + new_line = Segment("\n\r") if self.application_mode else Segment.line() + for last, line in loop_last(lines): + yield from line + if not last: + yield new_line diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py new file mode 100644 index 0000000000000000000000000000000000000000..70570b6b0962e530e76d599d2f886d14d91bda33 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py @@ -0,0 +1,138 @@ +from typing import cast, List, Optional, TYPE_CHECKING, Union + +from ._spinners import SPINNERS +from .measure import Measurement +from .table import Table +from .text import Text + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderResult, RenderableType + from .style import StyleType + + +class Spinner: + """A spinner animation. + + Args: + name (str): Name of spinner (run python -m rich.spinner). + text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "". + style (StyleType, optional): Style for spinner animation. Defaults to None. + speed (float, optional): Speed factor for animation. Defaults to 1.0. + + Raises: + KeyError: If name isn't one of the supported spinner animations. + """ + + def __init__( + self, + name: str, + text: "RenderableType" = "", + *, + style: Optional["StyleType"] = None, + speed: float = 1.0, + ) -> None: + try: + spinner = SPINNERS[name] + except KeyError: + raise KeyError(f"no spinner called {name!r}") + self.text: "Union[RenderableType, Text]" = ( + Text.from_markup(text) if isinstance(text, str) else text + ) + self.name = name + self.frames = cast(List[str], spinner["frames"])[:] + self.interval = cast(float, spinner["interval"]) + self.start_time: Optional[float] = None + self.style = style + self.speed = speed + self.frame_no_offset: float = 0.0 + self._update_speed = 0.0 + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + yield self.render(console.get_time()) + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> Measurement: + text = self.render(0) + return Measurement.get(console, options, text) + + def render(self, time: float) -> "RenderableType": + """Render the spinner for a given time. + + Args: + time (float): Time in seconds. + + Returns: + RenderableType: A renderable containing animation frame. + """ + if self.start_time is None: + self.start_time = time + + frame_no = ((time - self.start_time) * self.speed) / ( + self.interval / 1000.0 + ) + self.frame_no_offset + frame = Text( + self.frames[int(frame_no) % len(self.frames)], style=self.style or "" + ) + + if self._update_speed: + self.frame_no_offset = frame_no + self.start_time = time + self.speed = self._update_speed + self._update_speed = 0.0 + + if not self.text: + return frame + elif isinstance(self.text, (str, Text)): + return Text.assemble(frame, " ", self.text) + else: + table = Table.grid(padding=1) + table.add_row(frame, self.text) + return table + + def update( + self, + *, + text: "RenderableType" = "", + style: Optional["StyleType"] = None, + speed: Optional[float] = None, + ) -> None: + """Updates attributes of a spinner after it has been started. + + Args: + text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "". + style (StyleType, optional): Style for spinner animation. Defaults to None. + speed (float, optional): Speed factor for animation. Defaults to None. + """ + if text: + self.text = Text.from_markup(text) if isinstance(text, str) else text + if style: + self.style = style + if speed: + self._update_speed = speed + + +if __name__ == "__main__": # pragma: no cover + from time import sleep + + from .columns import Columns + from .panel import Panel + from .live import Live + + all_spinners = Columns( + [ + Spinner(spinner_name, text=Text(repr(spinner_name), style="green")) + for spinner_name in sorted(SPINNERS.keys()) + ], + column_first=True, + expand=True, + ) + + with Live( + Panel(all_spinners, title="Spinners", border_style="blue"), + refresh_per_second=20, + ) as live: + while True: + sleep(0.1) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d483c3d078762b72264a1f9f952195ec6a1d00 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py @@ -0,0 +1,966 @@ +import os.path +import re +import sys +import textwrap +from abc import ABC, abstractmethod +from pathlib import Path +from typing import ( + Any, + Dict, + Iterable, + List, + NamedTuple, + Optional, + Sequence, + Set, + Tuple, + Type, + Union, +) + +from pip._vendor.pygments.lexer import Lexer +from pip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename +from pip._vendor.pygments.style import Style as PygmentsStyle +from pip._vendor.pygments.styles import get_style_by_name +from pip._vendor.pygments.token import ( + Comment, + Error, + Generic, + Keyword, + Name, + Number, + Operator, + String, + Token, + Whitespace, +) +from pip._vendor.pygments.util import ClassNotFound + +from pip._vendor.rich.containers import Lines +from pip._vendor.rich.padding import Padding, PaddingDimensions + +from ._loop import loop_first +from .cells import cell_len +from .color import Color, blend_rgb +from .console import Console, ConsoleOptions, JustifyMethod, RenderResult +from .jupyter import JupyterMixin +from .measure import Measurement +from .segment import Segment, Segments +from .style import Style, StyleType +from .text import Text + +TokenType = Tuple[str, ...] + +WINDOWS = sys.platform == "win32" +DEFAULT_THEME = "monokai" + +# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py +# A few modifications were made + +ANSI_LIGHT: Dict[TokenType, Style] = { + Token: Style(), + Whitespace: Style(color="white"), + Comment: Style(dim=True), + Comment.Preproc: Style(color="cyan"), + Keyword: Style(color="blue"), + Keyword.Type: Style(color="cyan"), + Operator.Word: Style(color="magenta"), + Name.Builtin: Style(color="cyan"), + Name.Function: Style(color="green"), + Name.Namespace: Style(color="cyan", underline=True), + Name.Class: Style(color="green", underline=True), + Name.Exception: Style(color="cyan"), + Name.Decorator: Style(color="magenta", bold=True), + Name.Variable: Style(color="red"), + Name.Constant: Style(color="red"), + Name.Attribute: Style(color="cyan"), + Name.Tag: Style(color="bright_blue"), + String: Style(color="yellow"), + Number: Style(color="blue"), + Generic.Deleted: Style(color="bright_red"), + Generic.Inserted: Style(color="green"), + Generic.Heading: Style(bold=True), + Generic.Subheading: Style(color="magenta", bold=True), + Generic.Prompt: Style(bold=True), + Generic.Error: Style(color="bright_red"), + Error: Style(color="red", underline=True), +} + +ANSI_DARK: Dict[TokenType, Style] = { + Token: Style(), + Whitespace: Style(color="bright_black"), + Comment: Style(dim=True), + Comment.Preproc: Style(color="bright_cyan"), + Keyword: Style(color="bright_blue"), + Keyword.Type: Style(color="bright_cyan"), + Operator.Word: Style(color="bright_magenta"), + Name.Builtin: Style(color="bright_cyan"), + Name.Function: Style(color="bright_green"), + Name.Namespace: Style(color="bright_cyan", underline=True), + Name.Class: Style(color="bright_green", underline=True), + Name.Exception: Style(color="bright_cyan"), + Name.Decorator: Style(color="bright_magenta", bold=True), + Name.Variable: Style(color="bright_red"), + Name.Constant: Style(color="bright_red"), + Name.Attribute: Style(color="bright_cyan"), + Name.Tag: Style(color="bright_blue"), + String: Style(color="yellow"), + Number: Style(color="bright_blue"), + Generic.Deleted: Style(color="bright_red"), + Generic.Inserted: Style(color="bright_green"), + Generic.Heading: Style(bold=True), + Generic.Subheading: Style(color="bright_magenta", bold=True), + Generic.Prompt: Style(bold=True), + Generic.Error: Style(color="bright_red"), + Error: Style(color="red", underline=True), +} + +RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK} +NUMBERS_COLUMN_DEFAULT_PADDING = 2 + + +class SyntaxTheme(ABC): + """Base class for a syntax theme.""" + + @abstractmethod + def get_style_for_token(self, token_type: TokenType) -> Style: + """Get a style for a given Pygments token.""" + raise NotImplementedError # pragma: no cover + + @abstractmethod + def get_background_style(self) -> Style: + """Get the background color.""" + raise NotImplementedError # pragma: no cover + + +class PygmentsSyntaxTheme(SyntaxTheme): + """Syntax theme that delegates to Pygments theme.""" + + def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None: + self._style_cache: Dict[TokenType, Style] = {} + if isinstance(theme, str): + try: + self._pygments_style_class = get_style_by_name(theme) + except ClassNotFound: + self._pygments_style_class = get_style_by_name("default") + else: + self._pygments_style_class = theme + + self._background_color = self._pygments_style_class.background_color + self._background_style = Style(bgcolor=self._background_color) + + def get_style_for_token(self, token_type: TokenType) -> Style: + """Get a style from a Pygments class.""" + try: + return self._style_cache[token_type] + except KeyError: + try: + pygments_style = self._pygments_style_class.style_for_token(token_type) + except KeyError: + style = Style.null() + else: + color = pygments_style["color"] + bgcolor = pygments_style["bgcolor"] + style = Style( + color="#" + color if color else "#000000", + bgcolor="#" + bgcolor if bgcolor else self._background_color, + bold=pygments_style["bold"], + italic=pygments_style["italic"], + underline=pygments_style["underline"], + ) + self._style_cache[token_type] = style + return style + + def get_background_style(self) -> Style: + return self._background_style + + +class ANSISyntaxTheme(SyntaxTheme): + """Syntax theme to use standard colors.""" + + def __init__(self, style_map: Dict[TokenType, Style]) -> None: + self.style_map = style_map + self._missing_style = Style.null() + self._background_style = Style.null() + self._style_cache: Dict[TokenType, Style] = {} + + def get_style_for_token(self, token_type: TokenType) -> Style: + """Look up style in the style map.""" + try: + return self._style_cache[token_type] + except KeyError: + # Styles form a hierarchy + # We need to go from most to least specific + # e.g. ("foo", "bar", "baz") to ("foo", "bar") to ("foo",) + get_style = self.style_map.get + token = tuple(token_type) + style = self._missing_style + while token: + _style = get_style(token) + if _style is not None: + style = _style + break + token = token[:-1] + self._style_cache[token_type] = style + return style + + def get_background_style(self) -> Style: + return self._background_style + + +SyntaxPosition = Tuple[int, int] + + +class _SyntaxHighlightRange(NamedTuple): + """ + A range to highlight in a Syntax object. + `start` and `end` are 2-integers tuples, where the first integer is the line number + (starting from 1) and the second integer is the column index (starting from 0). + """ + + style: StyleType + start: SyntaxPosition + end: SyntaxPosition + style_before: bool = False + + +class Syntax(JupyterMixin): + """Construct a Syntax object to render syntax highlighted code. + + Args: + code (str): Code to highlight. + lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/) + theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai". + dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False. + line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. + start_line (int, optional): Starting number for line numbers. Defaults to 1. + line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render. + A value of None in the tuple indicates the range is open in that direction. + highlight_lines (Set[int]): A set of line numbers to highlight. + code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. + tab_size (int, optional): Size of tabs. Defaults to 4. + word_wrap (bool, optional): Enable word wrapping. + background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. + indent_guides (bool, optional): Show indent guides. Defaults to False. + padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). + """ + + _pygments_style_class: Type[PygmentsStyle] + _theme: SyntaxTheme + + @classmethod + def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme: + """Get a syntax theme instance.""" + if isinstance(name, SyntaxTheme): + return name + theme: SyntaxTheme + if name in RICH_SYNTAX_THEMES: + theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name]) + else: + theme = PygmentsSyntaxTheme(name) + return theme + + def __init__( + self, + code: str, + lexer: Union[Lexer, str], + *, + theme: Union[str, SyntaxTheme] = DEFAULT_THEME, + dedent: bool = False, + line_numbers: bool = False, + start_line: int = 1, + line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, + highlight_lines: Optional[Set[int]] = None, + code_width: Optional[int] = None, + tab_size: int = 4, + word_wrap: bool = False, + background_color: Optional[str] = None, + indent_guides: bool = False, + padding: PaddingDimensions = 0, + ) -> None: + self.code = code + self._lexer = lexer + self.dedent = dedent + self.line_numbers = line_numbers + self.start_line = start_line + self.line_range = line_range + self.highlight_lines = highlight_lines or set() + self.code_width = code_width + self.tab_size = tab_size + self.word_wrap = word_wrap + self.background_color = background_color + self.background_style = ( + Style(bgcolor=background_color) if background_color else Style() + ) + self.indent_guides = indent_guides + self.padding = padding + + self._theme = self.get_theme(theme) + self._stylized_ranges: List[_SyntaxHighlightRange] = [] + + @classmethod + def from_path( + cls, + path: str, + encoding: str = "utf-8", + lexer: Optional[Union[Lexer, str]] = None, + theme: Union[str, SyntaxTheme] = DEFAULT_THEME, + dedent: bool = False, + line_numbers: bool = False, + line_range: Optional[Tuple[int, int]] = None, + start_line: int = 1, + highlight_lines: Optional[Set[int]] = None, + code_width: Optional[int] = None, + tab_size: int = 4, + word_wrap: bool = False, + background_color: Optional[str] = None, + indent_guides: bool = False, + padding: PaddingDimensions = 0, + ) -> "Syntax": + """Construct a Syntax object from a file. + + Args: + path (str): Path to file to highlight. + encoding (str): Encoding of file. + lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content. + theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs". + dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True. + line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. + start_line (int, optional): Starting number for line numbers. Defaults to 1. + line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render. + highlight_lines (Set[int]): A set of line numbers to highlight. + code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. + tab_size (int, optional): Size of tabs. Defaults to 4. + word_wrap (bool, optional): Enable word wrapping of code. + background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. + indent_guides (bool, optional): Show indent guides. Defaults to False. + padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). + + Returns: + [Syntax]: A Syntax object that may be printed to the console + """ + code = Path(path).read_text(encoding=encoding) + + if not lexer: + lexer = cls.guess_lexer(path, code=code) + + return cls( + code, + lexer, + theme=theme, + dedent=dedent, + line_numbers=line_numbers, + line_range=line_range, + start_line=start_line, + highlight_lines=highlight_lines, + code_width=code_width, + tab_size=tab_size, + word_wrap=word_wrap, + background_color=background_color, + indent_guides=indent_guides, + padding=padding, + ) + + @classmethod + def guess_lexer(cls, path: str, code: Optional[str] = None) -> str: + """Guess the alias of the Pygments lexer to use based on a path and an optional string of code. + If code is supplied, it will use a combination of the code and the filename to determine the + best lexer to use. For example, if the file is ``index.html`` and the file contains Django + templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no + templating language is used, the "html" lexer will be used. If no string of code + is supplied, the lexer will be chosen based on the file extension.. + + Args: + path (AnyStr): The path to the file containing the code you wish to know the lexer for. + code (str, optional): Optional string of code that will be used as a fallback if no lexer + is found for the supplied path. + + Returns: + str: The name of the Pygments lexer that best matches the supplied path/code. + """ + lexer: Optional[Lexer] = None + lexer_name = "default" + if code: + try: + lexer = guess_lexer_for_filename(path, code) + except ClassNotFound: + pass + + if not lexer: + try: + _, ext = os.path.splitext(path) + if ext: + extension = ext.lstrip(".").lower() + lexer = get_lexer_by_name(extension) + except ClassNotFound: + pass + + if lexer: + if lexer.aliases: + lexer_name = lexer.aliases[0] + else: + lexer_name = lexer.name + + return lexer_name + + def _get_base_style(self) -> Style: + """Get the base style.""" + default_style = self._theme.get_background_style() + self.background_style + return default_style + + def _get_token_color(self, token_type: TokenType) -> Optional[Color]: + """Get a color (if any) for the given token. + + Args: + token_type (TokenType): A token type tuple from Pygments. + + Returns: + Optional[Color]: Color from theme, or None for no color. + """ + style = self._theme.get_style_for_token(token_type) + return style.color + + @property + def lexer(self) -> Optional[Lexer]: + """The lexer for this syntax, or None if no lexer was found. + + Tries to find the lexer by name if a string was passed to the constructor. + """ + + if isinstance(self._lexer, Lexer): + return self._lexer + try: + return get_lexer_by_name( + self._lexer, + stripnl=False, + ensurenl=True, + tabsize=self.tab_size, + ) + except ClassNotFound: + return None + + @property + def default_lexer(self) -> Lexer: + """A Pygments Lexer to use if one is not specified or invalid.""" + return get_lexer_by_name( + "text", + stripnl=False, + ensurenl=True, + tabsize=self.tab_size, + ) + + def highlight( + self, + code: str, + line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, + ) -> Text: + """Highlight code and return a Text instance. + + Args: + code (str): Code to highlight. + line_range(Tuple[int, int], optional): Optional line range to highlight. + + Returns: + Text: A text instance containing highlighted syntax. + """ + + base_style = self._get_base_style() + justify: JustifyMethod = ( + "default" if base_style.transparent_background else "left" + ) + + text = Text( + justify=justify, + style=base_style, + tab_size=self.tab_size, + no_wrap=not self.word_wrap, + ) + _get_theme_style = self._theme.get_style_for_token + + lexer = self.lexer or self.default_lexer + + if lexer is None: + text.append(code) + else: + if line_range: + # More complicated path to only stylize a portion of the code + # This speeds up further operations as there are less spans to process + line_start, line_end = line_range + + def line_tokenize() -> Iterable[Tuple[Any, str]]: + """Split tokens to one per line.""" + assert lexer # required to make MyPy happy - we know lexer is not None at this point + + for token_type, token in lexer.get_tokens(code): + while token: + line_token, new_line, token = token.partition("\n") + yield token_type, line_token + new_line + + def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]: + """Convert tokens to spans.""" + tokens = iter(line_tokenize()) + line_no = 0 + _line_start = line_start - 1 if line_start else 0 + + # Skip over tokens until line start + while line_no < _line_start: + try: + _token_type, token = next(tokens) + except StopIteration: + break + yield (token, None) + if token.endswith("\n"): + line_no += 1 + # Generate spans until line end + for token_type, token in tokens: + yield (token, _get_theme_style(token_type)) + if token.endswith("\n"): + line_no += 1 + if line_end and line_no >= line_end: + break + + text.append_tokens(tokens_to_spans()) + + else: + text.append_tokens( + (token, _get_theme_style(token_type)) + for token_type, token in lexer.get_tokens(code) + ) + if self.background_color is not None: + text.stylize(f"on {self.background_color}") + + if self._stylized_ranges: + self._apply_stylized_ranges(text) + + return text + + def stylize_range( + self, + style: StyleType, + start: SyntaxPosition, + end: SyntaxPosition, + style_before: bool = False, + ) -> None: + """ + Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered. + Line numbers are 1-based, while column indexes are 0-based. + + Args: + style (StyleType): The style to apply. + start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`. + end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`. + style_before (bool): Apply the style before any existing styles. + """ + self._stylized_ranges.append( + _SyntaxHighlightRange(style, start, end, style_before) + ) + + def _get_line_numbers_color(self, blend: float = 0.3) -> Color: + background_style = self._theme.get_background_style() + self.background_style + background_color = background_style.bgcolor + if background_color is None or background_color.is_system_defined: + return Color.default() + foreground_color = self._get_token_color(Token.Text) + if foreground_color is None or foreground_color.is_system_defined: + return foreground_color or Color.default() + new_color = blend_rgb( + background_color.get_truecolor(), + foreground_color.get_truecolor(), + cross_fade=blend, + ) + return Color.from_triplet(new_color) + + @property + def _numbers_column_width(self) -> int: + """Get the number of characters used to render the numbers column.""" + column_width = 0 + if self.line_numbers: + column_width = ( + len(str(self.start_line + self.code.count("\n"))) + + NUMBERS_COLUMN_DEFAULT_PADDING + ) + return column_width + + def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]: + """Get background, number, and highlight styles for line numbers.""" + background_style = self._get_base_style() + if background_style.transparent_background: + return Style.null(), Style(dim=True), Style.null() + if console.color_system in ("256", "truecolor"): + number_style = Style.chain( + background_style, + self._theme.get_style_for_token(Token.Text), + Style(color=self._get_line_numbers_color()), + self.background_style, + ) + highlight_number_style = Style.chain( + background_style, + self._theme.get_style_for_token(Token.Text), + Style(bold=True, color=self._get_line_numbers_color(0.9)), + self.background_style, + ) + else: + number_style = background_style + Style(dim=True) + highlight_number_style = background_style + Style(dim=False) + return background_style, number_style, highlight_number_style + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + _, right, _, left = Padding.unpack(self.padding) + padding = left + right + if self.code_width is not None: + width = self.code_width + self._numbers_column_width + padding + 1 + return Measurement(self._numbers_column_width, width) + lines = self.code.splitlines() + width = ( + self._numbers_column_width + + padding + + (max(cell_len(line) for line in lines) if lines else 0) + ) + if self.line_numbers: + width += 1 + return Measurement(self._numbers_column_width, width) + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + segments = Segments(self._get_syntax(console, options)) + if self.padding: + yield Padding(segments, style=self._get_base_style(), pad=self.padding) + else: + yield segments + + def _get_syntax( + self, + console: Console, + options: ConsoleOptions, + ) -> Iterable[Segment]: + """ + Get the Segments for the Syntax object, excluding any vertical/horizontal padding + """ + transparent_background = self._get_base_style().transparent_background + code_width = ( + ( + (options.max_width - self._numbers_column_width - 1) + if self.line_numbers + else options.max_width + ) + if self.code_width is None + else self.code_width + ) + + ends_on_nl, processed_code = self._process_code(self.code) + text = self.highlight(processed_code, self.line_range) + + if not self.line_numbers and not self.word_wrap and not self.line_range: + if not ends_on_nl: + text.remove_suffix("\n") + # Simple case of just rendering text + style = ( + self._get_base_style() + + self._theme.get_style_for_token(Comment) + + Style(dim=True) + + self.background_style + ) + if self.indent_guides and not options.ascii_only: + text = text.with_indent_guides(self.tab_size, style=style) + text.overflow = "crop" + if style.transparent_background: + yield from console.render( + text, options=options.update(width=code_width) + ) + else: + syntax_lines = console.render_lines( + text, + options.update(width=code_width, height=None, justify="left"), + style=self.background_style, + pad=True, + new_lines=True, + ) + for syntax_line in syntax_lines: + yield from syntax_line + return + + start_line, end_line = self.line_range or (None, None) + line_offset = 0 + if start_line: + line_offset = max(0, start_line - 1) + lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl) + if self.line_range: + if line_offset > len(lines): + return + lines = lines[line_offset:end_line] + + if self.indent_guides and not options.ascii_only: + style = ( + self._get_base_style() + + self._theme.get_style_for_token(Comment) + + Style(dim=True) + + self.background_style + ) + lines = ( + Text("\n") + .join(lines) + .with_indent_guides(self.tab_size, style=style + Style(italic=False)) + .split("\n", allow_blank=True) + ) + + numbers_column_width = self._numbers_column_width + render_options = options.update(width=code_width) + + highlight_line = self.highlight_lines.__contains__ + _Segment = Segment + new_line = _Segment("\n") + + line_pointer = "> " if options.legacy_windows else "❱ " + + ( + background_style, + number_style, + highlight_number_style, + ) = self._get_number_styles(console) + + for line_no, line in enumerate(lines, self.start_line + line_offset): + if self.word_wrap: + wrapped_lines = console.render_lines( + line, + render_options.update(height=None, justify="left"), + style=background_style, + pad=not transparent_background, + ) + else: + segments = list(line.render(console, end="")) + if options.no_wrap: + wrapped_lines = [segments] + else: + wrapped_lines = [ + _Segment.adjust_line_length( + segments, + render_options.max_width, + style=background_style, + pad=not transparent_background, + ) + ] + + if self.line_numbers: + wrapped_line_left_pad = _Segment( + " " * numbers_column_width + " ", background_style + ) + for first, wrapped_line in loop_first(wrapped_lines): + if first: + line_column = str(line_no).rjust(numbers_column_width - 2) + " " + if highlight_line(line_no): + yield _Segment(line_pointer, Style(color="red")) + yield _Segment(line_column, highlight_number_style) + else: + yield _Segment(" ", highlight_number_style) + yield _Segment(line_column, number_style) + else: + yield wrapped_line_left_pad + yield from wrapped_line + yield new_line + else: + for wrapped_line in wrapped_lines: + yield from wrapped_line + yield new_line + + def _apply_stylized_ranges(self, text: Text) -> None: + """ + Apply stylized ranges to a text instance, + using the given code to determine the right portion to apply the style to. + + Args: + text (Text): Text instance to apply the style to. + """ + code = text.plain + newlines_offsets = [ + # Let's add outer boundaries at each side of the list: + 0, + # N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z": + *[ + match.start() + 1 + for match in re.finditer("\n", code, flags=re.MULTILINE) + ], + len(code) + 1, + ] + + for stylized_range in self._stylized_ranges: + start = _get_code_index_for_syntax_position( + newlines_offsets, stylized_range.start + ) + end = _get_code_index_for_syntax_position( + newlines_offsets, stylized_range.end + ) + if start is not None and end is not None: + if stylized_range.style_before: + text.stylize_before(stylized_range.style, start, end) + else: + text.stylize(stylized_range.style, start, end) + + def _process_code(self, code: str) -> Tuple[bool, str]: + """ + Applies various processing to a raw code string + (normalises it so it always ends with a line return, dedents it if necessary, etc.) + + Args: + code (str): The raw code string to process + + Returns: + Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return, + while the string is the processed code. + """ + ends_on_nl = code.endswith("\n") + processed_code = code if ends_on_nl else code + "\n" + processed_code = ( + textwrap.dedent(processed_code) if self.dedent else processed_code + ) + processed_code = processed_code.expandtabs(self.tab_size) + return ends_on_nl, processed_code + + +def _get_code_index_for_syntax_position( + newlines_offsets: Sequence[int], position: SyntaxPosition +) -> Optional[int]: + """ + Returns the index of the code string for the given positions. + + Args: + newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet. + position (SyntaxPosition): The position to search for. + + Returns: + Optional[int]: The index of the code string for this position, or `None` + if the given position's line number is out of range (if it's the column that is out of range + we silently clamp its value so that it reaches the end of the line) + """ + lines_count = len(newlines_offsets) + + line_number, column_index = position + if line_number > lines_count or len(newlines_offsets) < (line_number + 1): + return None # `line_number` is out of range + line_index = line_number - 1 + line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1 + # If `column_index` is out of range: let's silently clamp it: + column_index = min(line_length, column_index) + return newlines_offsets[line_index] + column_index + + +if __name__ == "__main__": # pragma: no cover + import argparse + import sys + + parser = argparse.ArgumentParser( + description="Render syntax to the console with Rich" + ) + parser.add_argument( + "path", + metavar="PATH", + help="path to file, or - for stdin", + ) + parser.add_argument( + "-c", + "--force-color", + dest="force_color", + action="store_true", + default=None, + help="force color for non-terminals", + ) + parser.add_argument( + "-i", + "--indent-guides", + dest="indent_guides", + action="store_true", + default=False, + help="display indent guides", + ) + parser.add_argument( + "-l", + "--line-numbers", + dest="line_numbers", + action="store_true", + help="render line numbers", + ) + parser.add_argument( + "-w", + "--width", + type=int, + dest="width", + default=None, + help="width of output (default will auto-detect)", + ) + parser.add_argument( + "-r", + "--wrap", + dest="word_wrap", + action="store_true", + default=False, + help="word wrap long lines", + ) + parser.add_argument( + "-s", + "--soft-wrap", + action="store_true", + dest="soft_wrap", + default=False, + help="enable soft wrapping mode", + ) + parser.add_argument( + "-t", "--theme", dest="theme", default="monokai", help="pygments theme" + ) + parser.add_argument( + "-b", + "--background-color", + dest="background_color", + default=None, + help="Override background color", + ) + parser.add_argument( + "-x", + "--lexer", + default=None, + dest="lexer_name", + help="Lexer name", + ) + parser.add_argument( + "-p", "--padding", type=int, default=0, dest="padding", help="Padding" + ) + parser.add_argument( + "--highlight-line", + type=int, + default=None, + dest="highlight_line", + help="The line number (not index!) to highlight", + ) + args = parser.parse_args() + + from pip._vendor.rich.console import Console + + console = Console(force_terminal=args.force_color, width=args.width) + + if args.path == "-": + code = sys.stdin.read() + syntax = Syntax( + code=code, + lexer=args.lexer_name, + line_numbers=args.line_numbers, + word_wrap=args.word_wrap, + theme=args.theme, + background_color=args.background_color, + indent_guides=args.indent_guides, + padding=args.padding, + highlight_lines={args.highlight_line}, + ) + else: + syntax = Syntax.from_path( + args.path, + lexer=args.lexer_name, + line_numbers=args.line_numbers, + word_wrap=args.word_wrap, + theme=args.theme, + background_color=args.background_color, + indent_guides=args.indent_guides, + padding=args.padding, + highlight_lines={args.highlight_line}, + ) + console.print(syntax, soft_wrap=args.soft_wrap) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/terminal_theme.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/terminal_theme.py new file mode 100644 index 0000000000000000000000000000000000000000..565e9d960f8604c487e063ad9ed3f6f63027f3b4 --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/terminal_theme.py @@ -0,0 +1,153 @@ +from typing import List, Optional, Tuple + +from .color_triplet import ColorTriplet +from .palette import Palette + +_ColorTuple = Tuple[int, int, int] + + +class TerminalTheme: + """A color theme used when exporting console content. + + Args: + background (Tuple[int, int, int]): The background color. + foreground (Tuple[int, int, int]): The foreground (text) color. + normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors. + bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None + to repeat normal intensity. Defaults to None. + """ + + def __init__( + self, + background: _ColorTuple, + foreground: _ColorTuple, + normal: List[_ColorTuple], + bright: Optional[List[_ColorTuple]] = None, + ) -> None: + self.background_color = ColorTriplet(*background) + self.foreground_color = ColorTriplet(*foreground) + self.ansi_colors = Palette(normal + (bright or normal)) + + +DEFAULT_TERMINAL_THEME = TerminalTheme( + (255, 255, 255), + (0, 0, 0), + [ + (0, 0, 0), + (128, 0, 0), + (0, 128, 0), + (128, 128, 0), + (0, 0, 128), + (128, 0, 128), + (0, 128, 128), + (192, 192, 192), + ], + [ + (128, 128, 128), + (255, 0, 0), + (0, 255, 0), + (255, 255, 0), + (0, 0, 255), + (255, 0, 255), + (0, 255, 255), + (255, 255, 255), + ], +) + +MONOKAI = TerminalTheme( + (12, 12, 12), + (217, 217, 217), + [ + (26, 26, 26), + (244, 0, 95), + (152, 224, 36), + (253, 151, 31), + (157, 101, 255), + (244, 0, 95), + (88, 209, 235), + (196, 197, 181), + (98, 94, 76), + ], + [ + (244, 0, 95), + (152, 224, 36), + (224, 213, 97), + (157, 101, 255), + (244, 0, 95), + (88, 209, 235), + (246, 246, 239), + ], +) +DIMMED_MONOKAI = TerminalTheme( + (25, 25, 25), + (185, 188, 186), + [ + (58, 61, 67), + (190, 63, 72), + (135, 154, 59), + (197, 166, 53), + (79, 118, 161), + (133, 92, 141), + (87, 143, 164), + (185, 188, 186), + (136, 137, 135), + ], + [ + (251, 0, 31), + (15, 114, 47), + (196, 112, 51), + (24, 109, 227), + (251, 0, 103), + (46, 112, 109), + (253, 255, 185), + ], +) +NIGHT_OWLISH = TerminalTheme( + (255, 255, 255), + (64, 63, 83), + [ + (1, 22, 39), + (211, 66, 62), + (42, 162, 152), + (218, 170, 1), + (72, 118, 214), + (64, 63, 83), + (8, 145, 106), + (122, 129, 129), + (122, 129, 129), + ], + [ + (247, 110, 110), + (73, 208, 197), + (218, 194, 107), + (92, 167, 228), + (105, 112, 152), + (0, 201, 144), + (152, 159, 177), + ], +) + +SVG_EXPORT_THEME = TerminalTheme( + (41, 41, 41), + (197, 200, 198), + [ + (75, 78, 85), + (204, 85, 90), + (152, 168, 75), + (208, 179, 68), + (96, 138, 177), + (152, 114, 159), + (104, 160, 179), + (197, 200, 198), + (154, 155, 153), + ], + [ + (255, 38, 39), + (0, 130, 61), + (208, 132, 66), + (25, 132, 233), + (255, 44, 122), + (57, 130, 128), + (253, 253, 197), + ], +) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/text.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/text.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0c6b142b82ea440b37bc4253063a45e19512bf --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/text.py @@ -0,0 +1,1361 @@ +import re +from functools import partial, reduce +from math import gcd +from operator import itemgetter +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + NamedTuple, + Optional, + Pattern, + Tuple, + Union, +) + +from ._loop import loop_last +from ._pick import pick_bool +from ._wrap import divide_line +from .align import AlignMethod +from .cells import cell_len, set_cell_size +from .containers import Lines +from .control import strip_control_codes +from .emoji import EmojiVariant +from .jupyter import JupyterMixin +from .measure import Measurement +from .segment import Segment +from .style import Style, StyleType + +if TYPE_CHECKING: # pragma: no cover + from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod + +DEFAULT_JUSTIFY: "JustifyMethod" = "default" +DEFAULT_OVERFLOW: "OverflowMethod" = "fold" + + +_re_whitespace = re.compile(r"\s+$") + +TextType = Union[str, "Text"] +"""A plain string or a :class:`Text` instance.""" + +GetStyleCallable = Callable[[str], Optional[StyleType]] + + +class Span(NamedTuple): + """A marked up region in some text.""" + + start: int + """Span start index.""" + end: int + """Span end index.""" + style: Union[str, Style] + """Style associated with the span.""" + + def __repr__(self) -> str: + return f"Span({self.start}, {self.end}, {self.style!r})" + + def __bool__(self) -> bool: + return self.end > self.start + + def split(self, offset: int) -> Tuple["Span", Optional["Span"]]: + """Split a span in to 2 from a given offset.""" + + if offset < self.start: + return self, None + if offset >= self.end: + return self, None + + start, end, style = self + span1 = Span(start, min(end, offset), style) + span2 = Span(span1.end, end, style) + return span1, span2 + + def move(self, offset: int) -> "Span": + """Move start and end by a given offset. + + Args: + offset (int): Number of characters to add to start and end. + + Returns: + TextSpan: A new TextSpan with adjusted position. + """ + start, end, style = self + return Span(start + offset, end + offset, style) + + def right_crop(self, offset: int) -> "Span": + """Crop the span at the given offset. + + Args: + offset (int): A value between start and end. + + Returns: + Span: A new (possibly smaller) span. + """ + start, end, style = self + if offset >= end: + return self + return Span(start, min(offset, end), style) + + def extend(self, cells: int) -> "Span": + """Extend the span by the given number of cells. + + Args: + cells (int): Additional space to add to end of span. + + Returns: + Span: A span. + """ + if cells: + start, end, style = self + return Span(start, end + cells, style) + else: + return self + + +class Text(JupyterMixin): + """Text with color / style. + + Args: + text (str, optional): Default unstyled text. Defaults to "". + style (Union[str, Style], optional): Base style for text. Defaults to "". + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None. + end (str, optional): Character to end text with. Defaults to "\\\\n". + tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None. + spans (List[Span], optional). A list of predefined style spans. Defaults to None. + """ + + __slots__ = [ + "_text", + "style", + "justify", + "overflow", + "no_wrap", + "end", + "tab_size", + "_spans", + "_length", + ] + + def __init__( + self, + text: str = "", + style: Union[str, Style] = "", + *, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + no_wrap: Optional[bool] = None, + end: str = "\n", + tab_size: Optional[int] = None, + spans: Optional[List[Span]] = None, + ) -> None: + sanitized_text = strip_control_codes(text) + self._text = [sanitized_text] + self.style = style + self.justify: Optional["JustifyMethod"] = justify + self.overflow: Optional["OverflowMethod"] = overflow + self.no_wrap = no_wrap + self.end = end + self.tab_size = tab_size + self._spans: List[Span] = spans or [] + self._length: int = len(sanitized_text) + + def __len__(self) -> int: + return self._length + + def __bool__(self) -> bool: + return bool(self._length) + + def __str__(self) -> str: + return self.plain + + def __repr__(self) -> str: + return f"" + + def __add__(self, other: Any) -> "Text": + if isinstance(other, (str, Text)): + result = self.copy() + result.append(other) + return result + return NotImplemented + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Text): + return NotImplemented + return self.plain == other.plain and self._spans == other._spans + + def __contains__(self, other: object) -> bool: + if isinstance(other, str): + return other in self.plain + elif isinstance(other, Text): + return other.plain in self.plain + return False + + def __getitem__(self, slice: Union[int, slice]) -> "Text": + def get_text_at(offset: int) -> "Text": + _Span = Span + text = Text( + self.plain[offset], + spans=[ + _Span(0, 1, style) + for start, end, style in self._spans + if end > offset >= start + ], + end="", + ) + return text + + if isinstance(slice, int): + return get_text_at(slice) + else: + start, stop, step = slice.indices(len(self.plain)) + if step == 1: + lines = self.divide([start, stop]) + return lines[1] + else: + # This would be a bit of work to implement efficiently + # For now, its not required + raise TypeError("slices with step!=1 are not supported") + + @property + def cell_len(self) -> int: + """Get the number of cells required to render this text.""" + return cell_len(self.plain) + + @property + def markup(self) -> str: + """Get console markup to render this Text. + + Returns: + str: A string potentially creating markup tags. + """ + from .markup import escape + + output: List[str] = [] + + plain = self.plain + markup_spans = [ + (0, False, self.style), + *((span.start, False, span.style) for span in self._spans), + *((span.end, True, span.style) for span in self._spans), + (len(plain), True, self.style), + ] + markup_spans.sort(key=itemgetter(0, 1)) + position = 0 + append = output.append + for offset, closing, style in markup_spans: + if offset > position: + append(escape(plain[position:offset])) + position = offset + if style: + append(f"[/{style}]" if closing else f"[{style}]") + markup = "".join(output) + return markup + + @classmethod + def from_markup( + cls, + text: str, + *, + style: Union[str, Style] = "", + emoji: bool = True, + emoji_variant: Optional[EmojiVariant] = None, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + end: str = "\n", + ) -> "Text": + """Create Text instance from markup. + + Args: + text (str): A string containing console markup. + style (Union[str, Style], optional): Base style for text. Defaults to "". + emoji (bool, optional): Also render emoji code. Defaults to True. + emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None. + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + end (str, optional): Character to end text with. Defaults to "\\\\n". + + Returns: + Text: A Text instance with markup rendered. + """ + from .markup import render + + rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant) + rendered_text.justify = justify + rendered_text.overflow = overflow + rendered_text.end = end + return rendered_text + + @classmethod + def from_ansi( + cls, + text: str, + *, + style: Union[str, Style] = "", + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + no_wrap: Optional[bool] = None, + end: str = "\n", + tab_size: Optional[int] = 8, + ) -> "Text": + """Create a Text object from a string containing ANSI escape codes. + + Args: + text (str): A string containing escape codes. + style (Union[str, Style], optional): Base style for text. Defaults to "". + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None. + end (str, optional): Character to end text with. Defaults to "\\\\n". + tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None. + """ + from .ansi import AnsiDecoder + + joiner = Text( + "\n", + justify=justify, + overflow=overflow, + no_wrap=no_wrap, + end=end, + tab_size=tab_size, + style=style, + ) + decoder = AnsiDecoder() + result = joiner.join(line for line in decoder.decode(text)) + return result + + @classmethod + def styled( + cls, + text: str, + style: StyleType = "", + *, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + ) -> "Text": + """Construct a Text instance with a pre-applied styled. A style applied in this way won't be used + to pad the text when it is justified. + + Args: + text (str): A string containing console markup. + style (Union[str, Style]): Style to apply to the text. Defaults to "". + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + + Returns: + Text: A text instance with a style applied to the entire string. + """ + styled_text = cls(text, justify=justify, overflow=overflow) + styled_text.stylize(style) + return styled_text + + @classmethod + def assemble( + cls, + *parts: Union[str, "Text", Tuple[str, StyleType]], + style: Union[str, Style] = "", + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + no_wrap: Optional[bool] = None, + end: str = "\n", + tab_size: int = 8, + meta: Optional[Dict[str, Any]] = None, + ) -> "Text": + """Construct a text instance by combining a sequence of strings with optional styles. + The positional arguments should be either strings, or a tuple of string + style. + + Args: + style (Union[str, Style], optional): Base style for text. Defaults to "". + justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. + overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. + no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None. + end (str, optional): Character to end text with. Defaults to "\\\\n". + tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None. + meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None + + Returns: + Text: A new text instance. + """ + text = cls( + style=style, + justify=justify, + overflow=overflow, + no_wrap=no_wrap, + end=end, + tab_size=tab_size, + ) + append = text.append + _Text = Text + for part in parts: + if isinstance(part, (_Text, str)): + append(part) + else: + append(*part) + if meta: + text.apply_meta(meta) + return text + + @property + def plain(self) -> str: + """Get the text as a single string.""" + if len(self._text) != 1: + self._text[:] = ["".join(self._text)] + return self._text[0] + + @plain.setter + def plain(self, new_text: str) -> None: + """Set the text to a new value.""" + if new_text != self.plain: + sanitized_text = strip_control_codes(new_text) + self._text[:] = [sanitized_text] + old_length = self._length + self._length = len(sanitized_text) + if old_length > self._length: + self._trim_spans() + + @property + def spans(self) -> List[Span]: + """Get a reference to the internal list of spans.""" + return self._spans + + @spans.setter + def spans(self, spans: List[Span]) -> None: + """Set spans.""" + self._spans = spans[:] + + def blank_copy(self, plain: str = "") -> "Text": + """Return a new Text instance with copied metadata (but not the string or spans).""" + copy_self = Text( + plain, + style=self.style, + justify=self.justify, + overflow=self.overflow, + no_wrap=self.no_wrap, + end=self.end, + tab_size=self.tab_size, + ) + return copy_self + + def copy(self) -> "Text": + """Return a copy of this instance.""" + copy_self = Text( + self.plain, + style=self.style, + justify=self.justify, + overflow=self.overflow, + no_wrap=self.no_wrap, + end=self.end, + tab_size=self.tab_size, + ) + copy_self._spans[:] = self._spans + return copy_self + + def stylize( + self, + style: Union[str, Style], + start: int = 0, + end: Optional[int] = None, + ) -> None: + """Apply a style to the text, or a portion of the text. + + Args: + style (Union[str, Style]): Style instance or style definition to apply. + start (int): Start offset (negative indexing is supported). Defaults to 0. + end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None. + """ + if style: + length = len(self) + if start < 0: + start = length + start + if end is None: + end = length + if end < 0: + end = length + end + if start >= length or end <= start: + # Span not in text or not valid + return + self._spans.append(Span(start, min(length, end), style)) + + def stylize_before( + self, + style: Union[str, Style], + start: int = 0, + end: Optional[int] = None, + ) -> None: + """Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present. + + Args: + style (Union[str, Style]): Style instance or style definition to apply. + start (int): Start offset (negative indexing is supported). Defaults to 0. + end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None. + """ + if style: + length = len(self) + if start < 0: + start = length + start + if end is None: + end = length + if end < 0: + end = length + end + if start >= length or end <= start: + # Span not in text or not valid + return + self._spans.insert(0, Span(start, min(length, end), style)) + + def apply_meta( + self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None + ) -> None: + """Apply metadata to the text, or a portion of the text. + + Args: + meta (Dict[str, Any]): A dict of meta information. + start (int): Start offset (negative indexing is supported). Defaults to 0. + end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None. + + """ + style = Style.from_meta(meta) + self.stylize(style, start=start, end=end) + + def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text": + """Apply event handlers (used by Textual project). + + Example: + >>> from rich.text import Text + >>> text = Text("hello world") + >>> text.on(click="view.toggle('world')") + + Args: + meta (Dict[str, Any]): Mapping of meta information. + **handlers: Keyword args are prefixed with "@" to defined handlers. + + Returns: + Text: Self is returned to method may be chained. + """ + meta = {} if meta is None else meta + meta.update({f"@{key}": value for key, value in handlers.items()}) + self.stylize(Style.from_meta(meta)) + return self + + def remove_suffix(self, suffix: str) -> None: + """Remove a suffix if it exists. + + Args: + suffix (str): Suffix to remove. + """ + if self.plain.endswith(suffix): + self.right_crop(len(suffix)) + + def get_style_at_offset(self, console: "Console", offset: int) -> Style: + """Get the style of a character at give offset. + + Args: + console (~Console): Console where text will be rendered. + offset (int): Offset in to text (negative indexing supported) + + Returns: + Style: A Style instance. + """ + # TODO: This is a little inefficient, it is only used by full justify + if offset < 0: + offset = len(self) + offset + get_style = console.get_style + style = get_style(self.style).copy() + for start, end, span_style in self._spans: + if end > offset >= start: + style += get_style(span_style, default="") + return style + + def extend_style(self, spaces: int) -> None: + """Extend the Text given number of spaces where the spaces have the same style as the last character. + + Args: + spaces (int): Number of spaces to add to the Text. + """ + if spaces <= 0: + return + spans = self.spans + new_spaces = " " * spaces + if spans: + end_offset = len(self) + self._spans[:] = [ + span.extend(spaces) if span.end >= end_offset else span + for span in spans + ] + self._text.append(new_spaces) + self._length += spaces + else: + self.plain += new_spaces + + def highlight_regex( + self, + re_highlight: Union[Pattern[str], str], + style: Optional[Union[GetStyleCallable, StyleType]] = None, + *, + style_prefix: str = "", + ) -> int: + """Highlight text with a regular expression, where group names are + translated to styles. + + Args: + re_highlight (Union[re.Pattern, str]): A regular expression object or string. + style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable + which accepts the matched text and returns a style. Defaults to None. + style_prefix (str, optional): Optional prefix to add to style group names. + + Returns: + int: Number of regex matches + """ + count = 0 + append_span = self._spans.append + _Span = Span + plain = self.plain + if isinstance(re_highlight, str): + re_highlight = re.compile(re_highlight) + for match in re_highlight.finditer(plain): + get_span = match.span + if style: + start, end = get_span() + match_style = style(plain[start:end]) if callable(style) else style + if match_style is not None and end > start: + append_span(_Span(start, end, match_style)) + + count += 1 + for name in match.groupdict().keys(): + start, end = get_span(name) + if start != -1 and end > start: + append_span(_Span(start, end, f"{style_prefix}{name}")) + return count + + def highlight_words( + self, + words: Iterable[str], + style: Union[str, Style], + *, + case_sensitive: bool = True, + ) -> int: + """Highlight words with a style. + + Args: + words (Iterable[str]): Words to highlight. + style (Union[str, Style]): Style to apply. + case_sensitive (bool, optional): Enable case sensitive matching. Defaults to True. + + Returns: + int: Number of words highlighted. + """ + re_words = "|".join(re.escape(word) for word in words) + add_span = self._spans.append + count = 0 + _Span = Span + for match in re.finditer( + re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE + ): + start, end = match.span(0) + add_span(_Span(start, end, style)) + count += 1 + return count + + def rstrip(self) -> None: + """Strip whitespace from end of text.""" + self.plain = self.plain.rstrip() + + def rstrip_end(self, size: int) -> None: + """Remove whitespace beyond a certain width at the end of the text. + + Args: + size (int): The desired size of the text. + """ + text_length = len(self) + if text_length > size: + excess = text_length - size + whitespace_match = _re_whitespace.search(self.plain) + if whitespace_match is not None: + whitespace_count = len(whitespace_match.group(0)) + self.right_crop(min(whitespace_count, excess)) + + def set_length(self, new_length: int) -> None: + """Set new length of the text, clipping or padding is required.""" + length = len(self) + if length != new_length: + if length < new_length: + self.pad_right(new_length - length) + else: + self.right_crop(length - new_length) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> Iterable[Segment]: + tab_size: int = console.tab_size if self.tab_size is None else self.tab_size + justify = self.justify or options.justify or DEFAULT_JUSTIFY + + overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW + + lines = self.wrap( + console, + options.max_width, + justify=justify, + overflow=overflow, + tab_size=tab_size or 8, + no_wrap=pick_bool(self.no_wrap, options.no_wrap, False), + ) + all_lines = Text("\n").join(lines) + yield from all_lines.render(console, end=self.end) + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> Measurement: + text = self.plain + lines = text.splitlines() + max_text_width = max(cell_len(line) for line in lines) if lines else 0 + words = text.split() + min_text_width = ( + max(cell_len(word) for word in words) if words else max_text_width + ) + return Measurement(min_text_width, max_text_width) + + def render(self, console: "Console", end: str = "") -> Iterable["Segment"]: + """Render the text as Segments. + + Args: + console (Console): Console instance. + end (Optional[str], optional): Optional end character. + + Returns: + Iterable[Segment]: Result of render that may be written to the console. + """ + _Segment = Segment + text = self.plain + if not self._spans: + yield Segment(text) + if end: + yield _Segment(end) + return + get_style = partial(console.get_style, default=Style.null()) + + enumerated_spans = list(enumerate(self._spans, 1)) + style_map = {index: get_style(span.style) for index, span in enumerated_spans} + style_map[0] = get_style(self.style) + + spans = [ + (0, False, 0), + *((span.start, False, index) for index, span in enumerated_spans), + *((span.end, True, index) for index, span in enumerated_spans), + (len(text), True, 0), + ] + spans.sort(key=itemgetter(0, 1)) + + stack: List[int] = [] + stack_append = stack.append + stack_pop = stack.remove + + style_cache: Dict[Tuple[Style, ...], Style] = {} + style_cache_get = style_cache.get + combine = Style.combine + + def get_current_style() -> Style: + """Construct current style from stack.""" + styles = tuple(style_map[_style_id] for _style_id in sorted(stack)) + cached_style = style_cache_get(styles) + if cached_style is not None: + return cached_style + current_style = combine(styles) + style_cache[styles] = current_style + return current_style + + for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]): + if leaving: + stack_pop(style_id) + else: + stack_append(style_id) + if next_offset > offset: + yield _Segment(text[offset:next_offset], get_current_style()) + if end: + yield _Segment(end) + + def join(self, lines: Iterable["Text"]) -> "Text": + """Join text together with this instance as the separator. + + Args: + lines (Iterable[Text]): An iterable of Text instances to join. + + Returns: + Text: A new text instance containing join text. + """ + + new_text = self.blank_copy() + + def iter_text() -> Iterable["Text"]: + if self.plain: + for last, line in loop_last(lines): + yield line + if not last: + yield self + else: + yield from lines + + extend_text = new_text._text.extend + append_span = new_text._spans.append + extend_spans = new_text._spans.extend + offset = 0 + _Span = Span + + for text in iter_text(): + extend_text(text._text) + if text.style: + append_span(_Span(offset, offset + len(text), text.style)) + extend_spans( + _Span(offset + start, offset + end, style) + for start, end, style in text._spans + ) + offset += len(text) + new_text._length = offset + return new_text + + def expand_tabs(self, tab_size: Optional[int] = None) -> None: + """Converts tabs to spaces. + + Args: + tab_size (int, optional): Size of tabs. Defaults to 8. + + """ + if "\t" not in self.plain: + return + if tab_size is None: + tab_size = self.tab_size + if tab_size is None: + tab_size = 8 + + new_text: List[Text] = [] + append = new_text.append + + for line in self.split("\n", include_separator=True): + if "\t" not in line.plain: + append(line) + else: + cell_position = 0 + parts = line.split("\t", include_separator=True) + for part in parts: + if part.plain.endswith("\t"): + part._text[-1] = part._text[-1][:-1] + " " + cell_position += part.cell_len + tab_remainder = cell_position % tab_size + if tab_remainder: + spaces = tab_size - tab_remainder + part.extend_style(spaces) + cell_position += spaces + else: + cell_position += part.cell_len + append(part) + + result = Text("").join(new_text) + + self._text = [result.plain] + self._length = len(self.plain) + self._spans[:] = result._spans + + def truncate( + self, + max_width: int, + *, + overflow: Optional["OverflowMethod"] = None, + pad: bool = False, + ) -> None: + """Truncate text if it is longer that a given width. + + Args: + max_width (int): Maximum number of characters in text. + overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow. + pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False. + """ + _overflow = overflow or self.overflow or DEFAULT_OVERFLOW + if _overflow != "ignore": + length = cell_len(self.plain) + if length > max_width: + if _overflow == "ellipsis": + self.plain = set_cell_size(self.plain, max_width - 1) + "…" + else: + self.plain = set_cell_size(self.plain, max_width) + if pad and length < max_width: + spaces = max_width - length + self._text = [f"{self.plain}{' ' * spaces}"] + self._length = len(self.plain) + + def _trim_spans(self) -> None: + """Remove or modify any spans that are over the end of the text.""" + max_offset = len(self.plain) + _Span = Span + self._spans[:] = [ + ( + span + if span.end < max_offset + else _Span(span.start, min(max_offset, span.end), span.style) + ) + for span in self._spans + if span.start < max_offset + ] + + def pad(self, count: int, character: str = " ") -> None: + """Pad left and right with a given number of characters. + + Args: + count (int): Width of padding. + character (str): The character to pad with. Must be a string of length 1. + """ + assert len(character) == 1, "Character must be a string of length 1" + if count: + pad_characters = character * count + self.plain = f"{pad_characters}{self.plain}{pad_characters}" + _Span = Span + self._spans[:] = [ + _Span(start + count, end + count, style) + for start, end, style in self._spans + ] + + def pad_left(self, count: int, character: str = " ") -> None: + """Pad the left with a given character. + + Args: + count (int): Number of characters to pad. + character (str, optional): Character to pad with. Defaults to " ". + """ + assert len(character) == 1, "Character must be a string of length 1" + if count: + self.plain = f"{character * count}{self.plain}" + _Span = Span + self._spans[:] = [ + _Span(start + count, end + count, style) + for start, end, style in self._spans + ] + + def pad_right(self, count: int, character: str = " ") -> None: + """Pad the right with a given character. + + Args: + count (int): Number of characters to pad. + character (str, optional): Character to pad with. Defaults to " ". + """ + assert len(character) == 1, "Character must be a string of length 1" + if count: + self.plain = f"{self.plain}{character * count}" + + def align(self, align: AlignMethod, width: int, character: str = " ") -> None: + """Align text to a given width. + + Args: + align (AlignMethod): One of "left", "center", or "right". + width (int): Desired width. + character (str, optional): Character to pad with. Defaults to " ". + """ + self.truncate(width) + excess_space = width - cell_len(self.plain) + if excess_space: + if align == "left": + self.pad_right(excess_space, character) + elif align == "center": + left = excess_space // 2 + self.pad_left(left, character) + self.pad_right(excess_space - left, character) + else: + self.pad_left(excess_space, character) + + def append( + self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None + ) -> "Text": + """Add text with an optional style. + + Args: + text (Union[Text, str]): A str or Text to append. + style (str, optional): A style name. Defaults to None. + + Returns: + Text: Returns self for chaining. + """ + + if not isinstance(text, (str, Text)): + raise TypeError("Only str or Text can be appended to Text") + + if len(text): + if isinstance(text, str): + sanitized_text = strip_control_codes(text) + self._text.append(sanitized_text) + offset = len(self) + text_length = len(sanitized_text) + if style: + self._spans.append(Span(offset, offset + text_length, style)) + self._length += text_length + elif isinstance(text, Text): + _Span = Span + if style is not None: + raise ValueError( + "style must not be set when appending Text instance" + ) + text_length = self._length + if text.style: + self._spans.append( + _Span(text_length, text_length + len(text), text.style) + ) + self._text.append(text.plain) + self._spans.extend( + _Span(start + text_length, end + text_length, style) + for start, end, style in text._spans.copy() + ) + self._length += len(text) + return self + + def append_text(self, text: "Text") -> "Text": + """Append another Text instance. This method is more performant that Text.append, but + only works for Text. + + Args: + text (Text): The Text instance to append to this instance. + + Returns: + Text: Returns self for chaining. + """ + _Span = Span + text_length = self._length + if text.style: + self._spans.append(_Span(text_length, text_length + len(text), text.style)) + self._text.append(text.plain) + self._spans.extend( + _Span(start + text_length, end + text_length, style) + for start, end, style in text._spans.copy() + ) + self._length += len(text) + return self + + def append_tokens( + self, tokens: Iterable[Tuple[str, Optional[StyleType]]] + ) -> "Text": + """Append iterable of str and style. Style may be a Style instance or a str style definition. + + Args: + tokens (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style. + + Returns: + Text: Returns self for chaining. + """ + append_text = self._text.append + append_span = self._spans.append + _Span = Span + offset = len(self) + for content, style in tokens: + content = strip_control_codes(content) + append_text(content) + if style: + append_span(_Span(offset, offset + len(content), style)) + offset += len(content) + self._length = offset + return self + + def copy_styles(self, text: "Text") -> None: + """Copy styles from another Text instance. + + Args: + text (Text): A Text instance to copy styles from, must be the same length. + """ + self._spans.extend(text._spans) + + def split( + self, + separator: str = "\n", + *, + include_separator: bool = False, + allow_blank: bool = False, + ) -> Lines: + """Split rich text in to lines, preserving styles. + + Args: + separator (str, optional): String to split on. Defaults to "\\\\n". + include_separator (bool, optional): Include the separator in the lines. Defaults to False. + allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False. + + Returns: + List[RichText]: A list of rich text, one per line of the original. + """ + assert separator, "separator must not be empty" + + text = self.plain + if separator not in text: + return Lines([self.copy()]) + + if include_separator: + lines = self.divide( + match.end() for match in re.finditer(re.escape(separator), text) + ) + else: + + def flatten_spans() -> Iterable[int]: + for match in re.finditer(re.escape(separator), text): + start, end = match.span() + yield start + yield end + + lines = Lines( + line for line in self.divide(flatten_spans()) if line.plain != separator + ) + + if not allow_blank and text.endswith(separator): + lines.pop() + + return lines + + def divide(self, offsets: Iterable[int]) -> Lines: + """Divide text in to a number of lines at given offsets. + + Args: + offsets (Iterable[int]): Offsets used to divide text. + + Returns: + Lines: New RichText instances between offsets. + """ + _offsets = list(offsets) + + if not _offsets: + return Lines([self.copy()]) + + text = self.plain + text_length = len(text) + divide_offsets = [0, *_offsets, text_length] + line_ranges = list(zip(divide_offsets, divide_offsets[1:])) + + style = self.style + justify = self.justify + overflow = self.overflow + _Text = Text + new_lines = Lines( + _Text( + text[start:end], + style=style, + justify=justify, + overflow=overflow, + ) + for start, end in line_ranges + ) + if not self._spans: + return new_lines + + _line_appends = [line._spans.append for line in new_lines._lines] + line_count = len(line_ranges) + _Span = Span + + for span_start, span_end, style in self._spans: + lower_bound = 0 + upper_bound = line_count + start_line_no = (lower_bound + upper_bound) // 2 + + while True: + line_start, line_end = line_ranges[start_line_no] + if span_start < line_start: + upper_bound = start_line_no - 1 + elif span_start > line_end: + lower_bound = start_line_no + 1 + else: + break + start_line_no = (lower_bound + upper_bound) // 2 + + if span_end < line_end: + end_line_no = start_line_no + else: + end_line_no = lower_bound = start_line_no + upper_bound = line_count + + while True: + line_start, line_end = line_ranges[end_line_no] + if span_end < line_start: + upper_bound = end_line_no - 1 + elif span_end > line_end: + lower_bound = end_line_no + 1 + else: + break + end_line_no = (lower_bound + upper_bound) // 2 + + for line_no in range(start_line_no, end_line_no + 1): + line_start, line_end = line_ranges[line_no] + new_start = max(0, span_start - line_start) + new_end = min(span_end - line_start, line_end - line_start) + if new_end > new_start: + _line_appends[line_no](_Span(new_start, new_end, style)) + + return new_lines + + def right_crop(self, amount: int = 1) -> None: + """Remove a number of characters from the end of the text.""" + max_offset = len(self.plain) - amount + _Span = Span + self._spans[:] = [ + ( + span + if span.end < max_offset + else _Span(span.start, min(max_offset, span.end), span.style) + ) + for span in self._spans + if span.start < max_offset + ] + self._text = [self.plain[:-amount]] + self._length -= amount + + def wrap( + self, + console: "Console", + width: int, + *, + justify: Optional["JustifyMethod"] = None, + overflow: Optional["OverflowMethod"] = None, + tab_size: int = 8, + no_wrap: Optional[bool] = None, + ) -> Lines: + """Word wrap the text. + + Args: + console (Console): Console instance. + width (int): Number of cells available per line. + justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default". + overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None. + tab_size (int, optional): Default tab size. Defaults to 8. + no_wrap (bool, optional): Disable wrapping, Defaults to False. + + Returns: + Lines: Number of lines. + """ + wrap_justify = justify or self.justify or DEFAULT_JUSTIFY + wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW + + no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore" + + lines = Lines() + for line in self.split(allow_blank=True): + if "\t" in line: + line.expand_tabs(tab_size) + if no_wrap: + new_lines = Lines([line]) + else: + offsets = divide_line(str(line), width, fold=wrap_overflow == "fold") + new_lines = line.divide(offsets) + for line in new_lines: + line.rstrip_end(width) + if wrap_justify: + new_lines.justify( + console, width, justify=wrap_justify, overflow=wrap_overflow + ) + for line in new_lines: + line.truncate(width, overflow=wrap_overflow) + lines.extend(new_lines) + return lines + + def fit(self, width: int) -> Lines: + """Fit the text in to given width by chopping in to lines. + + Args: + width (int): Maximum characters in a line. + + Returns: + Lines: Lines container. + """ + lines: Lines = Lines() + append = lines.append + for line in self.split(): + line.set_length(width) + append(line) + return lines + + def detect_indentation(self) -> int: + """Auto-detect indentation of code. + + Returns: + int: Number of spaces used to indent code. + """ + + _indentations = { + len(match.group(1)) + for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE) + } + + try: + indentation = ( + reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1 + ) + except TypeError: + indentation = 1 + + return indentation + + def with_indent_guides( + self, + indent_size: Optional[int] = None, + *, + character: str = "│", + style: StyleType = "dim green", + ) -> "Text": + """Adds indent guide lines to text. + + Args: + indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None. + character (str, optional): Character to use for indentation. Defaults to "│". + style (Union[Style, str], optional): Style of indent guides. + + Returns: + Text: New text with indentation guides. + """ + + _indent_size = self.detect_indentation() if indent_size is None else indent_size + + text = self.copy() + text.expand_tabs() + indent_line = f"{character}{' ' * (_indent_size - 1)}" + + re_indent = re.compile(r"^( *)(.*)$") + new_lines: List[Text] = [] + add_line = new_lines.append + blank_lines = 0 + for line in text.split(allow_blank=True): + match = re_indent.match(line.plain) + if not match or not match.group(2): + blank_lines += 1 + continue + indent = match.group(1) + full_indents, remaining_space = divmod(len(indent), _indent_size) + new_indent = f"{indent_line * full_indents}{' ' * remaining_space}" + line.plain = new_indent + line.plain[len(new_indent) :] + line.stylize(style, 0, len(new_indent)) + if blank_lines: + new_lines.extend([Text(new_indent, style=style)] * blank_lines) + blank_lines = 0 + add_line(line) + if blank_lines: + new_lines.extend([Text("", style=style)] * blank_lines) + + new_text = text.blank_copy("\n").join(new_lines) + return new_text + + +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.console import Console + + text = Text( + """\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n""" + ) + text.highlight_words(["Lorem"], "bold") + text.highlight_words(["ipsum"], "italic") + + console = Console() + + console.rule("justify='left'") + console.print(text, style="red") + console.print() + + console.rule("justify='center'") + console.print(text, style="green", justify="center") + console.print() + + console.rule("justify='right'") + console.print(text, style="blue", justify="right") + console.print() + + console.rule("justify='full'") + console.print(text, style="magenta", justify="full") + console.print() diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/theme.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/theme.py new file mode 100644 index 0000000000000000000000000000000000000000..227f1d8635f8ba915153b21a6b925643a11d286e --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/theme.py @@ -0,0 +1,115 @@ +import configparser +from typing import IO, Dict, List, Mapping, Optional + +from .default_styles import DEFAULT_STYLES +from .style import Style, StyleType + + +class Theme: + """A container for style information, used by :class:`~rich.console.Console`. + + Args: + styles (Dict[str, Style], optional): A mapping of style names on to styles. Defaults to None for a theme with no styles. + inherit (bool, optional): Inherit default styles. Defaults to True. + """ + + styles: Dict[str, Style] + + def __init__( + self, styles: Optional[Mapping[str, StyleType]] = None, inherit: bool = True + ): + self.styles = DEFAULT_STYLES.copy() if inherit else {} + if styles is not None: + self.styles.update( + { + name: style if isinstance(style, Style) else Style.parse(style) + for name, style in styles.items() + } + ) + + @property + def config(self) -> str: + """Get contents of a config file for this theme.""" + config = "[styles]\n" + "\n".join( + f"{name} = {style}" for name, style in sorted(self.styles.items()) + ) + return config + + @classmethod + def from_file( + cls, config_file: IO[str], source: Optional[str] = None, inherit: bool = True + ) -> "Theme": + """Load a theme from a text mode file. + + Args: + config_file (IO[str]): An open conf file. + source (str, optional): The filename of the open file. Defaults to None. + inherit (bool, optional): Inherit default styles. Defaults to True. + + Returns: + Theme: A New theme instance. + """ + config = configparser.ConfigParser() + config.read_file(config_file, source=source) + styles = {name: Style.parse(value) for name, value in config.items("styles")} + theme = Theme(styles, inherit=inherit) + return theme + + @classmethod + def read( + cls, path: str, inherit: bool = True, encoding: Optional[str] = None + ) -> "Theme": + """Read a theme from a path. + + Args: + path (str): Path to a config file readable by Python configparser module. + inherit (bool, optional): Inherit default styles. Defaults to True. + encoding (str, optional): Encoding of the config file. Defaults to None. + + Returns: + Theme: A new theme instance. + """ + with open(path, encoding=encoding) as config_file: + return cls.from_file(config_file, source=path, inherit=inherit) + + +class ThemeStackError(Exception): + """Base exception for errors related to the theme stack.""" + + +class ThemeStack: + """A stack of themes. + + Args: + theme (Theme): A theme instance + """ + + def __init__(self, theme: Theme) -> None: + self._entries: List[Dict[str, Style]] = [theme.styles] + self.get = self._entries[-1].get + + def push_theme(self, theme: Theme, inherit: bool = True) -> None: + """Push a theme on the top of the stack. + + Args: + theme (Theme): A Theme instance. + inherit (boolean, optional): Inherit styles from current top of stack. + """ + styles: Dict[str, Style] + styles = ( + {**self._entries[-1], **theme.styles} if inherit else theme.styles.copy() + ) + self._entries.append(styles) + self.get = self._entries[-1].get + + def pop_theme(self) -> None: + """Pop (and discard) the top-most theme.""" + if len(self._entries) == 1: + raise ThemeStackError("Unable to pop base theme") + self._entries.pop() + self.get = self._entries[-1].get + + +if __name__ == "__main__": # pragma: no cover + theme = Theme() + print(theme.config) diff --git a/llava/lib/python3.10/site-packages/pip/_vendor/rich/traceback.py b/llava/lib/python3.10/site-packages/pip/_vendor/rich/traceback.py new file mode 100644 index 0000000000000000000000000000000000000000..28d742b4fd0c558b176f8db6a7613a0745b86bfc --- /dev/null +++ b/llava/lib/python3.10/site-packages/pip/_vendor/rich/traceback.py @@ -0,0 +1,797 @@ +import inspect +import linecache +import os +import sys +from dataclasses import dataclass, field +from itertools import islice +from traceback import walk_tb +from types import ModuleType, TracebackType +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from pip._vendor.pygments.lexers import guess_lexer_for_filename +from pip._vendor.pygments.token import Comment, Keyword, Name, Number, Operator, String +from pip._vendor.pygments.token import Text as TextToken +from pip._vendor.pygments.token import Token +from pip._vendor.pygments.util import ClassNotFound + +from . import pretty +from ._loop import loop_last +from .columns import Columns +from .console import Console, ConsoleOptions, ConsoleRenderable, RenderResult, group +from .constrain import Constrain +from .highlighter import RegexHighlighter, ReprHighlighter +from .panel import Panel +from .scope import render_scope +from .style import Style +from .syntax import Syntax +from .text import Text +from .theme import Theme + +WINDOWS = sys.platform == "win32" + +LOCALS_MAX_LENGTH = 10 +LOCALS_MAX_STRING = 80 + + +def install( + *, + console: Optional[Console] = None, + width: Optional[int] = 100, + code_width: Optional[int] = 88, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + locals_max_length: int = LOCALS_MAX_LENGTH, + locals_max_string: int = LOCALS_MAX_STRING, + locals_hide_dunder: bool = True, + locals_hide_sunder: Optional[bool] = None, + indent_guides: bool = True, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, +) -> Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]: + """Install a rich traceback handler. + + Once installed, any tracebacks will be printed with syntax highlighting and rich formatting. + + + Args: + console (Optional[Console], optional): Console to write exception to. Default uses internal Console instance. + width (Optional[int], optional): Width (in characters) of traceback. Defaults to 100. + code_width (Optional[int], optional): Code width (in characters) of traceback. Defaults to 88. + extra_lines (int, optional): Extra lines of code. Defaults to 3. + theme (Optional[str], optional): Pygments theme to use in traceback. Defaults to ``None`` which will pick + a theme appropriate for the platform. + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True. + locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False. + indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True. + suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + + Returns: + Callable: The previous exception handler that was replaced. + + """ + traceback_console = Console(stderr=True) if console is None else console + + locals_hide_sunder = ( + True + if (traceback_console.is_jupyter and locals_hide_sunder is None) + else locals_hide_sunder + ) + + def excepthook( + type_: Type[BaseException], + value: BaseException, + traceback: Optional[TracebackType], + ) -> None: + traceback_console.print( + Traceback.from_exception( + type_, + value, + traceback, + width=width, + code_width=code_width, + extra_lines=extra_lines, + theme=theme, + word_wrap=word_wrap, + show_locals=show_locals, + locals_max_length=locals_max_length, + locals_max_string=locals_max_string, + locals_hide_dunder=locals_hide_dunder, + locals_hide_sunder=bool(locals_hide_sunder), + indent_guides=indent_guides, + suppress=suppress, + max_frames=max_frames, + ) + ) + + def ipy_excepthook_closure(ip: Any) -> None: # pragma: no cover + tb_data = {} # store information about showtraceback call + default_showtraceback = ip.showtraceback # keep reference of default traceback + + def ipy_show_traceback(*args: Any, **kwargs: Any) -> None: + """wrap the default ip.showtraceback to store info for ip._showtraceback""" + nonlocal tb_data + tb_data = kwargs + default_showtraceback(*args, **kwargs) + + def ipy_display_traceback( + *args: Any, is_syntax: bool = False, **kwargs: Any + ) -> None: + """Internally called traceback from ip._showtraceback""" + nonlocal tb_data + exc_tuple = ip._get_exc_info() + + # do not display trace on syntax error + tb: Optional[TracebackType] = None if is_syntax else exc_tuple[2] + + # determine correct tb_offset + compiled = tb_data.get("running_compiled_code", False) + tb_offset = tb_data.get("tb_offset", 1 if compiled else 0) + # remove ipython internal frames from trace with tb_offset + for _ in range(tb_offset): + if tb is None: + break + tb = tb.tb_next + + excepthook(exc_tuple[0], exc_tuple[1], tb) + tb_data = {} # clear data upon usage + + # replace _showtraceback instead of showtraceback to allow ipython features such as debugging to work + # this is also what the ipython docs recommends to modify when subclassing InteractiveShell + ip._showtraceback = ipy_display_traceback + # add wrapper to capture tb_data + ip.showtraceback = ipy_show_traceback + ip.showsyntaxerror = lambda *args, **kwargs: ipy_display_traceback( + *args, is_syntax=True, **kwargs + ) + + try: # pragma: no cover + # if within ipython, use customized traceback + ip = get_ipython() # type: ignore[name-defined] + ipy_excepthook_closure(ip) + return sys.excepthook + except Exception: + # otherwise use default system hook + old_excepthook = sys.excepthook + sys.excepthook = excepthook + return old_excepthook + + +@dataclass +class Frame: + filename: str + lineno: int + name: str + line: str = "" + locals: Optional[Dict[str, pretty.Node]] = None + last_instruction: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None + + +@dataclass +class _SyntaxError: + offset: int + filename: str + line: str + lineno: int + msg: str + + +@dataclass +class Stack: + exc_type: str + exc_value: str + syntax_error: Optional[_SyntaxError] = None + is_cause: bool = False + frames: List[Frame] = field(default_factory=list) + + +@dataclass +class Trace: + stacks: List[Stack] + + +class PathHighlighter(RegexHighlighter): + highlights = [r"(?P.*/)(?P.+)"] + + +class Traceback: + """A Console renderable that renders a traceback. + + Args: + trace (Trace, optional): A `Trace` object produced from `extract`. Defaults to None, which uses + the last exception. + width (Optional[int], optional): Number of characters used to traceback. Defaults to 100. + code_width (Optional[int], optional): Number of code characters used to traceback. Defaults to 88. + extra_lines (int, optional): Additional lines of code to render. Defaults to 3. + theme (str, optional): Override pygments theme used in traceback. + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True. + locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False. + suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. + + """ + + LEXERS = { + "": "text", + ".py": "python", + ".pxd": "cython", + ".pyx": "cython", + ".pxi": "pyrex", + } + + def __init__( + self, + trace: Optional[Trace] = None, + *, + width: Optional[int] = 100, + code_width: Optional[int] = 88, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + locals_max_length: int = LOCALS_MAX_LENGTH, + locals_max_string: int = LOCALS_MAX_STRING, + locals_hide_dunder: bool = True, + locals_hide_sunder: bool = False, + indent_guides: bool = True, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, + ): + if trace is None: + exc_type, exc_value, traceback = sys.exc_info() + if exc_type is None or exc_value is None or traceback is None: + raise ValueError( + "Value for 'trace' required if not called in except: block" + ) + trace = self.extract( + exc_type, exc_value, traceback, show_locals=show_locals + ) + self.trace = trace + self.width = width + self.code_width = code_width + self.extra_lines = extra_lines + self.theme = Syntax.get_theme(theme or "ansi_dark") + self.word_wrap = word_wrap + self.show_locals = show_locals + self.indent_guides = indent_guides + self.locals_max_length = locals_max_length + self.locals_max_string = locals_max_string + self.locals_hide_dunder = locals_hide_dunder + self.locals_hide_sunder = locals_hide_sunder + + self.suppress: Sequence[str] = [] + for suppress_entity in suppress: + if not isinstance(suppress_entity, str): + assert ( + suppress_entity.__file__ is not None + ), f"{suppress_entity!r} must be a module with '__file__' attribute" + path = os.path.dirname(suppress_entity.__file__) + else: + path = suppress_entity + path = os.path.normpath(os.path.abspath(path)) + self.suppress.append(path) + self.max_frames = max(4, max_frames) if max_frames > 0 else 0 + + @classmethod + def from_exception( + cls, + exc_type: Type[Any], + exc_value: BaseException, + traceback: Optional[TracebackType], + *, + width: Optional[int] = 100, + code_width: Optional[int] = 88, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + locals_max_length: int = LOCALS_MAX_LENGTH, + locals_max_string: int = LOCALS_MAX_STRING, + locals_hide_dunder: bool = True, + locals_hide_sunder: bool = False, + indent_guides: bool = True, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, + ) -> "Traceback": + """Create a traceback from exception info + + Args: + exc_type (Type[BaseException]): Exception type. + exc_value (BaseException): Exception value. + traceback (TracebackType): Python Traceback object. + width (Optional[int], optional): Number of characters used to traceback. Defaults to 100. + code_width (Optional[int], optional): Number of code characters used to traceback. Defaults to 88. + extra_lines (int, optional): Additional lines of code to render. Defaults to 3. + theme (str, optional): Override pygments theme used in traceback. + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True. + locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False. + suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. + + Returns: + Traceback: A Traceback instance that may be printed. + """ + rich_traceback = cls.extract( + exc_type, + exc_value, + traceback, + show_locals=show_locals, + locals_max_length=locals_max_length, + locals_max_string=locals_max_string, + locals_hide_dunder=locals_hide_dunder, + locals_hide_sunder=locals_hide_sunder, + ) + + return cls( + rich_traceback, + width=width, + code_width=code_width, + extra_lines=extra_lines, + theme=theme, + word_wrap=word_wrap, + show_locals=show_locals, + indent_guides=indent_guides, + locals_max_length=locals_max_length, + locals_max_string=locals_max_string, + locals_hide_dunder=locals_hide_dunder, + locals_hide_sunder=locals_hide_sunder, + suppress=suppress, + max_frames=max_frames, + ) + + @classmethod + def extract( + cls, + exc_type: Type[BaseException], + exc_value: BaseException, + traceback: Optional[TracebackType], + *, + show_locals: bool = False, + locals_max_length: int = LOCALS_MAX_LENGTH, + locals_max_string: int = LOCALS_MAX_STRING, + locals_hide_dunder: bool = True, + locals_hide_sunder: bool = False, + ) -> Trace: + """Extract traceback information. + + Args: + exc_type (Type[BaseException]): Exception type. + exc_value (BaseException): Exception value. + traceback (TracebackType): Python Traceback object. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. + Defaults to 10. + locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. + locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True. + locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False. + + Returns: + Trace: A Trace instance which you can use to construct a `Traceback`. + """ + + stacks: List[Stack] = [] + is_cause = False + + from pip._vendor.rich import _IMPORT_CWD + + def safe_str(_object: Any) -> str: + """Don't allow exceptions from __str__ to propagate.""" + try: + return str(_object) + except Exception: + return "" + + while True: + stack = Stack( + exc_type=safe_str(exc_type.__name__), + exc_value=safe_str(exc_value), + is_cause=is_cause, + ) + + if isinstance(exc_value, SyntaxError): + stack.syntax_error = _SyntaxError( + offset=exc_value.offset or 0, + filename=exc_value.filename or "?", + lineno=exc_value.lineno or 0, + line=exc_value.text or "", + msg=exc_value.msg, + ) + + stacks.append(stack) + append = stack.frames.append + + def get_locals( + iter_locals: Iterable[Tuple[str, object]] + ) -> Iterable[Tuple[str, object]]: + """Extract locals from an iterator of key pairs.""" + if not (locals_hide_dunder or locals_hide_sunder): + yield from iter_locals + return + for key, value in iter_locals: + if locals_hide_dunder and key.startswith("__"): + continue + if locals_hide_sunder and key.startswith("_"): + continue + yield key, value + + for frame_summary, line_no in walk_tb(traceback): + filename = frame_summary.f_code.co_filename + + last_instruction: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] + last_instruction = None + if sys.version_info >= (3, 11): + instruction_index = frame_summary.f_lasti // 2 + instruction_position = next( + islice( + frame_summary.f_code.co_positions(), + instruction_index, + instruction_index + 1, + ) + ) + ( + start_line, + end_line, + start_column, + end_column, + ) = instruction_position + if ( + start_line is not None + and end_line is not None + and start_column is not None + and end_column is not None + ): + last_instruction = ( + (start_line, start_column), + (end_line, end_column), + ) + + if filename and not filename.startswith("<"): + if not os.path.isabs(filename): + filename = os.path.join(_IMPORT_CWD, filename) + if frame_summary.f_locals.get("_rich_traceback_omit", False): + continue + + frame = Frame( + filename=filename or "?", + lineno=line_no, + name=frame_summary.f_code.co_name, + locals=( + { + key: pretty.traverse( + value, + max_length=locals_max_length, + max_string=locals_max_string, + ) + for key, value in get_locals(frame_summary.f_locals.items()) + if not (inspect.isfunction(value) or inspect.isclass(value)) + } + if show_locals + else None + ), + last_instruction=last_instruction, + ) + append(frame) + if frame_summary.f_locals.get("_rich_traceback_guard", False): + del stack.frames[:] + + cause = getattr(exc_value, "__cause__", None) + if cause: + exc_type = cause.__class__ + exc_value = cause + # __traceback__ can be None, e.g. for exceptions raised by the + # 'multiprocessing' module + traceback = cause.__traceback__ + is_cause = True + continue + + cause = exc_value.__context__ + if cause and not getattr(exc_value, "__suppress_context__", False): + exc_type = cause.__class__ + exc_value = cause + traceback = cause.__traceback__ + is_cause = False + continue + # No cover, code is reached but coverage doesn't recognize it. + break # pragma: no cover + + trace = Trace(stacks=stacks) + return trace + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + theme = self.theme + background_style = theme.get_background_style() + token_style = theme.get_style_for_token + + traceback_theme = Theme( + { + "pretty": token_style(TextToken), + "pygments.text": token_style(Token), + "pygments.string": token_style(String), + "pygments.function": token_style(Name.Function), + "pygments.number": token_style(Number), + "repr.indent": token_style(Comment) + Style(dim=True), + "repr.str": token_style(String), + "repr.brace": token_style(TextToken) + Style(bold=True), + "repr.number": token_style(Number), + "repr.bool_true": token_style(Keyword.Constant), + "repr.bool_false": token_style(Keyword.Constant), + "repr.none": token_style(Keyword.Constant), + "scope.border": token_style(String.Delimiter), + "scope.equals": token_style(Operator), + "scope.key": token_style(Name), + "scope.key.special": token_style(Name.Constant) + Style(dim=True), + }, + inherit=False, + ) + + highlighter = ReprHighlighter() + for last, stack in loop_last(reversed(self.trace.stacks)): + if stack.frames: + stack_renderable: ConsoleRenderable = Panel( + self._render_stack(stack), + title="[traceback.title]Traceback [dim](most recent call last)", + style=background_style, + border_style="traceback.border", + expand=True, + padding=(0, 1), + ) + stack_renderable = Constrain(stack_renderable, self.width) + with console.use_theme(traceback_theme): + yield stack_renderable + if stack.syntax_error is not None: + with console.use_theme(traceback_theme): + yield Constrain( + Panel( + self._render_syntax_error(stack.syntax_error), + style=background_style, + border_style="traceback.border.syntax_error", + expand=True, + padding=(0, 1), + width=self.width, + ), + self.width, + ) + yield Text.assemble( + (f"{stack.exc_type}: ", "traceback.exc_type"), + highlighter(stack.syntax_error.msg), + ) + elif stack.exc_value: + yield Text.assemble( + (f"{stack.exc_type}: ", "traceback.exc_type"), + highlighter(stack.exc_value), + ) + else: + yield Text.assemble((f"{stack.exc_type}", "traceback.exc_type")) + + if not last: + if stack.is_cause: + yield Text.from_markup( + "\n[i]The above exception was the direct cause of the following exception:\n", + ) + else: + yield Text.from_markup( + "\n[i]During handling of the above exception, another exception occurred:\n", + ) + + @group() + def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult: + highlighter = ReprHighlighter() + path_highlighter = PathHighlighter() + if syntax_error.filename != "": + if os.path.exists(syntax_error.filename): + text = Text.assemble( + (f" {syntax_error.filename}", "pygments.string"), + (":", "pygments.text"), + (str(syntax_error.lineno), "pygments.number"), + style="pygments.text", + ) + yield path_highlighter(text) + syntax_error_text = highlighter(syntax_error.line.rstrip()) + syntax_error_text.no_wrap = True + offset = min(syntax_error.offset - 1, len(syntax_error_text)) + syntax_error_text.stylize("bold underline", offset, offset) + syntax_error_text += Text.from_markup( + "\n" + " " * offset + "[traceback.offset]▲[/]", + style="pygments.text", + ) + yield syntax_error_text + + @classmethod + def _guess_lexer(cls, filename: str, code: str) -> str: + ext = os.path.splitext(filename)[-1] + if not ext: + # No extension, look at first line to see if it is a hashbang + # Note, this is an educated guess and not a guarantee + # If it fails, the only downside is that the code is highlighted strangely + new_line_index = code.index("\n") + first_line = code[:new_line_index] if new_line_index != -1 else code + if first_line.startswith("#!") and "python" in first_line.lower(): + return "python" + try: + return cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name + except ClassNotFound: + return "text" + + @group() + def _render_stack(self, stack: Stack) -> RenderResult: + path_highlighter = PathHighlighter() + theme = self.theme + + def read_code(filename: str) -> str: + """Read files, and cache results on filename. + + Args: + filename (str): Filename to read + + Returns: + str: Contents of file + """ + return "".join(linecache.getlines(filename)) + + def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]: + if frame.locals: + yield render_scope( + frame.locals, + title="locals", + indent_guides=self.indent_guides, + max_length=self.locals_max_length, + max_string=self.locals_max_string, + ) + + exclude_frames: Optional[range] = None + if self.max_frames != 0: + exclude_frames = range( + self.max_frames // 2, + len(stack.frames) - self.max_frames // 2, + ) + + excluded = False + for frame_index, frame in enumerate(stack.frames): + if exclude_frames and frame_index in exclude_frames: + excluded = True + continue + + if excluded: + assert exclude_frames is not None + yield Text( + f"\n... {len(exclude_frames)} frames hidden ...", + justify="center", + style="traceback.error", + ) + excluded = False + + first = frame_index == 0 + frame_filename = frame.filename + suppressed = any(frame_filename.startswith(path) for path in self.suppress) + + if os.path.exists(frame.filename): + text = Text.assemble( + path_highlighter(Text(frame.filename, style="pygments.string")), + (":", "pygments.text"), + (str(frame.lineno), "pygments.number"), + " in ", + (frame.name, "pygments.function"), + style="pygments.text", + ) + else: + text = Text.assemble( + "in ", + (frame.name, "pygments.function"), + (":", "pygments.text"), + (str(frame.lineno), "pygments.number"), + style="pygments.text", + ) + if not frame.filename.startswith("<") and not first: + yield "" + yield text + if frame.filename.startswith("<"): + yield from render_locals(frame) + continue + if not suppressed: + try: + code = read_code(frame.filename) + if not code: + # code may be an empty string if the file doesn't exist, OR + # if the traceback filename is generated dynamically + continue + lexer_name = self._guess_lexer(frame.filename, code) + syntax = Syntax( + code, + lexer_name, + theme=theme, + line_numbers=True, + line_range=( + frame.lineno - self.extra_lines, + frame.lineno + self.extra_lines, + ), + highlight_lines={frame.lineno}, + word_wrap=self.word_wrap, + code_width=self.code_width, + indent_guides=self.indent_guides, + dedent=False, + ) + yield "" + except Exception as error: + yield Text.assemble( + (f"\n{error}", "traceback.error"), + ) + else: + if frame.last_instruction is not None: + start, end = frame.last_instruction + syntax.stylize_range( + style="traceback.error_range", + start=start, + end=end, + style_before=True, + ) + yield ( + Columns( + [ + syntax, + *render_locals(frame), + ], + padding=1, + ) + if frame.locals + else syntax + ) + + +if __name__ == "__main__": # pragma: no cover + install(show_locals=True) + import sys + + def bar( + a: Any, + ) -> None: # 这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑 + one = 1 + print(one / a) + + def foo(a: Any) -> None: + _rich_traceback_guard = True + zed = { + "characters": { + "Paul Atreides", + "Vladimir Harkonnen", + "Thufir Hawat", + "Duncan Idaho", + }, + "atomic_types": (None, False, True), + } + bar(a) + + def error() -> None: + foo(0) + + error() diff --git a/minigpt2/lib/python3.10/site-packages/imageio/config/__init__.py b/minigpt2/lib/python3.10/site-packages/imageio/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca78dd22f2e690bc5115565e9e0c11b67929031c --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/config/__init__.py @@ -0,0 +1,16 @@ +from .extensions import ( + extension_list, + known_extensions, + FileExtension, + video_extensions, +) +from .plugins import known_plugins, PluginConfig + +__all__ = [ + "known_plugins", + "PluginConfig", + "extension_list", + "known_extensions", + "FileExtension", + "video_extensions", +] diff --git a/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c09665f63b2a8e3076fc85ca2fce0379bf31b80 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..547a68221079ac392da555a0e3caacf71779349a Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/plugins.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/plugins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..955f11bd0f4a2d64b0e6d998c47358e4473a0bcf Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/config/__pycache__/plugins.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/config/extensions.py b/minigpt2/lib/python3.10/site-packages/imageio/config/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..00c716246ad5683dfbf505c9347561ab5c13bd1c --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/config/extensions.py @@ -0,0 +1,2002 @@ +""" +A set of objects representing each file extension recognized by ImageIO. If an +extension is not listed here it is still supported, as long as there exists a +supporting backend. + +""" + + +class FileExtension: + """File Extension Metadata + + This class holds information about a image file format associated with a + given extension. This information is used to track plugins that are known to + be able to handle a particular format. It also contains additional + information about a format, which is used when creating the supported format + docs. + + Plugins known to be able to handle this format are ordered by a ``priority`` + list. This list is used to determine the ideal plugin to use when choosing a + plugin based on file extension. + + Parameters + ---------- + extension : str + The name of the extension including the initial dot, e.g. ".png". + priority : List + A list of plugin names (entries in config.known_plugins) that can handle + this format. The position of a plugin expresses a preference, e.g. + ["plugin1", "plugin2"] indicates that, if available, plugin1 should be + preferred over plugin2 when handling a request related to this format. + name : str + The full name of the format. + description : str + A description of the format. + external_link : str + A link to further information about the format. Typically, the format's + specification. + volume_support : str + If True, the format/extension supports volumetric image data. + + Examples + -------- + >>> FileExtension( + name="Bitmap", + extension=".bmp", + priority=["pillow", "BMP-PIL", "BMP-FI", "ITK"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ) + + """ + + def __init__( + self, + *, + extension, + priority, + name=None, + description=None, + external_link=None, + volume_support=False + ): + self.extension = extension + self.priority = priority + self.name = name + self.description = description + self.external_link = external_link + self.default_priority = priority.copy() + self.volume_support = volume_support + + def reset(self): + self.priority = self.default_priority.copy() + + +extension_list = [ + FileExtension( + name="Hasselblad raw", + extension=".3fr", + priority=["RAW-FI"], + ), + FileExtension( + name="Sony alpha", + extension=".arw", + priority=["RAW-FI"], + ), + FileExtension( + name="Animated Portable Network Graphics", + external_link="https://en.wikipedia.org/wiki/APNG", + extension=".apng", + priority=["pillow", "pyav"], + ), + FileExtension( + name="Audio Video Interleave", + extension=".avi", + priority=["FFMPEG"], + ), + FileExtension( + name="Casio raw format", + extension=".bay", + priority=["RAW-FI"], + ), + FileExtension( + extension=".blp", + priority=["pillow"], + ), + FileExtension( + name="Bitmap", + extension=".bmp", + priority=["pillow", "BMP-PIL", "BMP-FI", "ITK", "pyav", "opencv"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ), + FileExtension( + name="Device-Independent Bitmap", + extension=".dip", + priority=["opencv"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ), + FileExtension( + name="Re-Volt mipmap", + extension=".bmq", + priority=["RAW-FI"], + ), + FileExtension( + name="Binary Structured Data Format", + extension=".bsdf", + priority=["BSDF"], + external_link="http://bsdf.io/", + ), + FileExtension( + name="Binary Universal Form for the Representation of meteorological data", + extension=".bufr", + priority=["pillow", "BUFR-PIL"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".bw", + priority=["pillow", "SGI-PIL", "SGI-FI"], + ), + FileExtension( + name="Scirra Construct", + extension=".cap", + priority=["RAW-FI"], + ), + FileExtension( + name="AMETEK High Speed Camera Format", + extension=".cine", + priority=["RAW-FI"], + external_link="https://phantomhighspeed-knowledge.secure.force.com/servlet/fileField?id=0BE1N000000kD2i#:~:text=Cine%20is%20a%20video%20file,camera%20model%20and%20image%20resolution", + ), + FileExtension(extension=".cr2", priority=["RAW-FI"]), + FileExtension( + extension=".crw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".cs1", + priority=["RAW-FI"], + ), + FileExtension( + name="Computerized Tomography", + extension=".ct", + priority=["DICOM"], + ), + FileExtension( + name="Windows Cursor Icons", + extension=".cur", + priority=["pillow", "CUR-PIL"], + ), + FileExtension( + name="Dr. Halo", + extension=".cut", + priority=["CUT-FI"], + ), + FileExtension( + extension=".dc2", + priority=["RAW-FI"], + ), + FileExtension( + name="DICOM file format", + extension=".dcm", + priority=["DICOM", "ITK"], + ), + FileExtension( + extension=".dcr", + priority=["RAW-FI"], + ), + FileExtension( + name="Intel DCX", + extension=".dcx", + priority=["pillow", "DCX-PIL"], + ), + FileExtension( + name="DirectX Texture Container", + extension=".dds", + priority=["pillow", "DDS-FI", "DDS-PIL"], + ), + FileExtension( + name="Windows Bitmap", + extension=".dib", + priority=["pillow", "DIB-PIL"], + ), + FileExtension( + name="DICOM file format", + extension=".dicom", + priority=["ITK"], + ), + FileExtension( + extension=".dng", + priority=["RAW-FI"], + ), + FileExtension( + extension=".drf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".dsc", + priority=["RAW-FI"], + ), + FileExtension( + name="Enhanced Compression Wavelet", + extension=".ecw", + priority=["GDAL"], + ), + FileExtension( + name="Windows Metafile", + extension=".emf", + priority=["pillow", "WMF-PIL"], + ), + FileExtension( + name="Encapsulated Postscript", + extension=".eps", + priority=["pillow", "EPS-PIL"], + ), + FileExtension( + extension=".erf", + priority=["RAW-FI"], + ), + FileExtension( + name="OpenEXR", + extension=".exr", + external_link="https://openexr.readthedocs.io/en/latest/", + priority=["EXR-FI", "pyav", "opencv"], + ), + FileExtension( + extension=".fff", + priority=["RAW-FI"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fit", + priority=["pillow", "FITS-PIL", "FITS"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fits", + priority=["pillow", "FITS-PIL", "FITS", "pyav"], + ), + FileExtension( + name="Autodesk FLC Animation", + extension=".flc", + priority=["pillow", "FLI-PIL"], + ), + FileExtension( + name="Autodesk FLI Animation", + extension=".fli", + priority=["pillow", "FLI-PIL"], + ), + FileExtension( + name="Kodak FlashPix", + extension=".fpx", + priority=["pillow", "FPX-PIL"], + ), + FileExtension( + name="Independence War 2: Edge Of Chaos Texture Format", + extension=".ftc", + priority=["pillow", "FTEX-PIL"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fts", + priority=["FITS"], + ), + FileExtension( + name="Independence War 2: Edge Of Chaos Texture Format", + extension=".ftu", + priority=["pillow", "FTEX-PIL"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fz", + priority=["FITS"], + ), + FileExtension( + name="Raw fax format CCITT G.3", + extension=".g3", + priority=["G3-FI"], + ), + FileExtension( + name="GIMP brush file", + extension=".gbr", + priority=["pillow", "GBR-PIL"], + ), + FileExtension( + name="Grassroots DICOM", + extension=".gdcm", + priority=["ITK"], + ), + FileExtension( + name="Graphics Interchange Format", + extension=".gif", + priority=["pillow", "GIF-PIL", "pyav"], + ), + FileExtension( + name="UMDS GIPL", + extension=".gipl", + priority=["ITK"], + ), + FileExtension( + name="gridded meteorological data", + extension=".grib", + priority=["pillow", "GRIB-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".h5", + priority=["pillow", "HDF5-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".hdf", + priority=["pillow", "HDF5-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".hdf5", + priority=["ITK"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".hdp", + priority=["JPEG-XR-FI"], + ), + FileExtension( + name="High Dynamic Range Image", + extension=".hdr", + priority=["HDR-FI", "ITK", "opencv"], + ), + FileExtension( + extension=".ia", + priority=["RAW-FI"], + ), + FileExtension( + extension=".icb", + priority=["pillow"], + ), + FileExtension( + name="Mac OS Icon File", + extension=".icns", + priority=["pillow", "ICNS-PIL"], + ), + FileExtension( + name="Windows Icon File", + extension=".ico", + priority=["pillow", "ICO-FI", "ICO-PIL", "pyav"], + ), + FileExtension( + name="ILBM Interleaved Bitmap", + extension=".iff", + priority=["IFF-FI"], + ), + FileExtension( + name="IPTC/NAA", + extension=".iim", + priority=["pillow", "IPTC-PIL"], + ), + FileExtension( + extension=".iiq", + priority=["RAW-FI"], + ), + FileExtension( + name="IFUNC Image Memory", + extension=".im", + priority=["pillow", "IM-PIL"], + ), + FileExtension( + extension=".img", + priority=["ITK", "GDAL"], + ), + FileExtension( + extension=".img.gz", + priority=["ITK"], + ), + FileExtension( + name="IM Tools", + extension=".IMT", + priority=["pillow", "IMT-PIL"], + ), + FileExtension( + name="Image Processing Lab", + extension=".ipl", + priority=["ITK"], + ), + FileExtension( + name="JPEG 2000", + extension=".j2c", + priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"], + ), + FileExtension( + name="JPEG 2000", + extension=".j2k", + priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"], + ), + FileExtension( + name="JPEG", + extension=".jfif", + priority=["pillow", "JPEG-PIL"], + ), + FileExtension( + name="JPEG", + extension=".jif", + priority=["JPEG-FI"], + ), + FileExtension( + name="JPEG Network Graphics", + extension=".jng", + priority=["JNG-FI"], + ), + FileExtension( + name="JPEG 2000", + extension=".jp2", + priority=["pillow", "JP2-FI", "JPEG2000-PIL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpc", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="JPEG", + extension=".jpe", + priority=["pillow", "JPEG-FI", "JPEG-PIL", "opencv"], + ), + FileExtension( + name="Joint Photographic Experts Group", + extension=".jpeg", + priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpf", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="Joint Photographic Experts Group", + extension=".jpg", + priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpx", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".jxr", + priority=["JPEG-XR-FI"], + ), + FileExtension( + extension=".k25", + priority=["RAW-FI"], + ), + FileExtension( + extension=".kc2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".kdc", + priority=["RAW-FI"], + ), + FileExtension( + name="C64 Koala Graphics", + extension=".koa", + priority=["KOALA-FI"], + ), + FileExtension( + name="ILBM Interleaved Bitmap", + extension=".lbm", + priority=["IFF-FI"], + ), + FileExtension( + name="Lytro F01", + extension=".lfp", + priority=["LYTRO-LFP"], + ), + FileExtension( + name="Lytro Illum", + extension=".lfr", + priority=["LYTRO-LFR"], + ), + FileExtension( + name="ZEISS LSM", + extension=".lsm", + priority=["tifffile", "ITK", "TIFF"], + ), + FileExtension( + name="McIdas area file", + extension=".MCIDAS", + priority=["pillow", "MCIDAS-PIL"], + external_link="https://www.ssec.wisc.edu/mcidas/doc/prog_man/2003print/progman2003-formats.html", + ), + FileExtension( + extension=".mdc", + priority=["RAW-FI"], + ), + FileExtension( + extension=".mef", + priority=["RAW-FI"], + ), + FileExtension( + name="FreeSurfer File Format", + extension=".mgh", + priority=["ITK"], + ), + FileExtension( + name="ITK MetaImage", + extension=".mha", + priority=["ITK"], + ), + FileExtension( + name="ITK MetaImage Header", + extension=".mhd", + priority=["ITK"], + ), + FileExtension( + name="Microsoft Image Composer", + extension=".mic", + priority=["pillow", "MIC-PIL"], + ), + FileExtension( + name="Matroska Multimedia Container", + extension=".mkv", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Medical Imaging NetCDF", + extension=".mnc", + priority=["ITK"], + ), + FileExtension( + name="Medical Imaging NetCDF 2", + extension=".mnc2", + priority=["ITK"], + ), + FileExtension( + name="Leaf Raw Image Format", + extension=".mos", + priority=["RAW-FI"], + ), + FileExtension( + name="QuickTime File Format", + extension=".mov", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="MPEG-4 Part 14", + extension=".mp4", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="MPEG-1 Moving Picture Experts Group", + extension=".mpeg", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Moving Picture Experts Group", + extension=".mpg", + priority=["pillow", "FFMPEG", "pyav"], + ), + FileExtension( + name="JPEG Multi-Picture Format", + extension=".mpo", + priority=["pillow", "MPO-PIL"], + ), + FileExtension( + name="Magnetic resonance imaging", + extension=".mri", + priority=["DICOM"], + ), + FileExtension( + extension=".mrw", + priority=["RAW-FI"], + ), + FileExtension( + name="Windows Paint", + extension=".msp", + priority=["pillow", "MSP-PIL"], + ), + FileExtension( + extension=".nef", + priority=["RAW-FI", "rawpy"], + ), + FileExtension( + extension=".nhdr", + priority=["ITK"], + ), + FileExtension( + extension=".nia", + priority=["ITK"], + ), + FileExtension( + extension=".nii", + priority=["ITK"], + ), + FileExtension( + name="nii.gz", + extension=".nii.gz", + priority=["ITK"], + ), + FileExtension( + name="Numpy Array", + extension=".npz", + priority=["NPZ"], + volume_support=True, + ), + FileExtension( + extension=".nrrd", + priority=["ITK"], + ), + FileExtension( + extension=".nrw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".orf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".palm", + priority=["pillow"], + ), + FileExtension( + name="Portable Bitmap", + extension=".pbm", + priority=["PGM-FI", "PGMRAW-FI", "pyav", "opencv"], + ), + FileExtension( + name="Kodak PhotoCD", + extension=".pcd", + priority=["pillow", "PCD-FI", "PCD-PIL"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pct", + priority=["PICT-FI"], + ), + FileExtension( + name="Zsoft Paintbrush", + extension=".PCX", + priority=["pillow", "PCX-FI", "PCX-PIL"], + ), + FileExtension( + extension=".pdf", + priority=["pillow"], + ), + FileExtension( + extension=".pef", + priority=["RAW-FI"], + ), + FileExtension( + extension=".pfm", + priority=["PFM-FI", "pyav", "opencv"], + ), + FileExtension( + name="Portable Greymap", + extension=".pgm", + priority=["pillow", "PGM-FI", "PGMRAW-FI", "pyav", "opencv"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pic", + priority=["PICT-FI", "ITK", "opencv"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pict", + priority=["PICT-FI"], + ), + FileExtension( + name="Portable Network Graphics", + extension=".png", + priority=["pillow", "PNG-PIL", "PNG-FI", "ITK", "pyav", "opencv"], + ), + FileExtension( + name="Portable Image Format", + extension=".pnm", + priority=["pillow", "opencv"], + ), + FileExtension( + name="Pbmplus image", + extension=".ppm", + priority=["pillow", "PPM-PIL", "pyav"], + ), + FileExtension( + name="Pbmplus image", + extension=".pbm", + priority=["pillow", "PPM-PIL", "PPM-FI"], + ), + FileExtension( + name="Portable image format", + extension=".pxm", + priority=["opencv"], + ), + FileExtension( + name="Portable Pixelmap (ASCII)", + extension=".ppm", + priority=["PPM-FI", "opencv"], + ), + FileExtension( + name="Portable Pixelmap (Raw)", + extension=".ppm", + priority=["PPMRAW-FI"], + ), + FileExtension( + name="Ghostscript", + extension=".ps", + priority=["pillow", "EPS-PIL"], + ), + FileExtension( + name="Adope Photoshop 2.5 and 3.0", + extension=".psd", + priority=["pillow", "PSD-PIL", "PSD-FI"], + ), + FileExtension( + extension=".ptx", + priority=["RAW-FI"], + ), + FileExtension( + extension=".pxn", + priority=["RAW-FI"], + ), + FileExtension( + name="PIXAR raster image", + extension=".pxr", + priority=["pillow", "PIXAR-PIL"], + ), + FileExtension( + extension=".qtk", + priority=["RAW-FI"], + ), + FileExtension( + extension=".raf", + priority=["RAW-FI"], + ), + FileExtension( + name="Sun Raster File", + extension=".ras", + priority=["pillow", "SUN-PIL", "RAS-FI", "pyav", "opencv"], + ), + FileExtension( + name="Sun Raster File", + extension=".sr", + priority=["opencv"], + ), + FileExtension( + extension=".raw", + priority=["RAW-FI", "LYTRO-ILLUM-RAW", "LYTRO-F01-RAW", "rawpy"], + ), + FileExtension( + extension=".rdc", + priority=["RAW-FI"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".rgb", + priority=["pillow", "SGI-PIL"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".rgba", + priority=["pillow", "SGI-PIL"], + ), + FileExtension( + extension=".rw2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".rwl", + priority=["RAW-FI"], + ), + FileExtension( + extension=".rwz", + priority=["RAW-FI"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".sgi", + priority=["pillow", "SGI-PIL", "pyav"], + ), + FileExtension( + name="SPE File Format", + extension=".spe", + priority=["SPE"], + ), + FileExtension( + extension=".SPIDER", + priority=["pillow", "SPIDER-PIL"], + ), + FileExtension( + extension=".sr2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".srf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".srw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".sti", + priority=["RAW-FI"], + ), + FileExtension( + extension=".stk", + priority=["tifffile", "TIFF"], + ), + FileExtension( + name="ShockWave Flash", + extension=".swf", + priority=["SWF", "pyav"], + ), + FileExtension( + name="Truevision TGA", + extension=".targa", + priority=["pillow", "TARGA-FI"], + ), + FileExtension( + name="Truevision TGA", + extension=".tga", + priority=["pillow", "TGA-PIL", "TARGA-FI", "pyav"], + ), + FileExtension( + name="Tagged Image File", + extension=".tif", + priority=[ + "tifffile", + "TIFF", + "pillow", + "TIFF-PIL", + "TIFF-FI", + "FEI", + "ITK", + "GDAL", + "pyav", + "opencv", + ], + volume_support=True, + ), + FileExtension( + name="Tagged Image File Format", + extension=".tiff", + priority=[ + "tifffile", + "TIFF", + "pillow", + "TIFF-PIL", + "TIFF-FI", + "FEI", + "ITK", + "GDAL", + "pyav", + "opencv", + ], + volume_support=True, + ), + FileExtension( + extension=".vda", + priority=["pillow"], + ), + FileExtension( + extension=".vst", + priority=["pillow"], + ), + FileExtension( + extension=".vtk", + priority=["ITK"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wap", + priority=["WBMP-FI"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wbm", + priority=["WBMP-FI"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wbmp", + priority=["WBMP-FI"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".wdp", + priority=["JPEG-XR-FI"], + ), + FileExtension( + name="Matroska", + extension=".webm", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Google WebP", + extension=".webp", + priority=["pillow", "WEBP-FI", "pyav", "opencv"], + ), + FileExtension( + name="Windows Meta File", + extension=".wmf", + priority=["pillow", "WMF-PIL"], + ), + FileExtension( + name="Windows Media Video", + extension=".wmv", + priority=["FFMPEG"], + ), + FileExtension( + name="X11 Bitmap", + extension=".xbm", + priority=["pillow", "XBM-PIL", "XBM-FI", "pyav"], + ), + FileExtension( + name="X11 Pixel Map", + extension=".xpm", + priority=["pillow", "XPM-PIL", "XPM-FI"], + ), + FileExtension( + name="Thumbnail Image", + extension=".XVTHUMB", + priority=["pillow", "XVTHUMB-PIL"], + ), + FileExtension( + extension=".dpx", + priority=["pyav"], + ), + FileExtension( + extension=".im1", + priority=["pyav"], + ), + FileExtension( + extension=".im24", + priority=["pyav"], + ), + FileExtension( + extension=".im8", + priority=["pyav"], + ), + FileExtension( + extension=".jls", + priority=["pyav"], + ), + FileExtension( + extension=".ljpg", + priority=["pyav"], + ), + FileExtension( + extension=".pam", + priority=["pyav"], + ), + FileExtension( + extension=".pcx", + priority=["pyav"], + ), + FileExtension( + extension=".pgmyuv", + priority=["pyav"], + ), + FileExtension( + extension=".pix", + priority=["pyav"], + ), + FileExtension( + extension=".ppm", + priority=["pyav"], + ), + FileExtension( + extension=".rs", + priority=["pyav"], + ), + FileExtension( + extension=".sun", + priority=["pyav"], + ), + FileExtension( + extension=".sunras", + priority=["pyav"], + ), + FileExtension( + extension=".xface", + priority=["pyav"], + ), + FileExtension( + extension=".xwd", + priority=["pyav"], + ), + FileExtension( + extension=".y", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="3GPP AMR", + extension=".amr", + priority=["pyav"], + ), + FileExtension( + name="a64 - video for Commodore 64", + extension=".A64", + priority=["pyav"], + ), + FileExtension( + name="a64 - video for Commodore 64", + extension=".a64", + priority=["pyav"], + ), + FileExtension( + name="Adobe Filmstrip", + extension=".flm", + priority=["pyav"], + ), + FileExtension( + name="AMV", + extension=".amv", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".asf", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".asf", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".wmv", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".wmv", + priority=["pyav"], + ), + FileExtension( + name="AV1 Annex B", + extension=".obu", + priority=["pyav"], + ), + FileExtension( + name="AV1 low overhead OBU", + extension=".obu", + priority=["pyav"], + ), + FileExtension( + name="AVI (Audio Video Interleaved)", + extension=".avi", + priority=["pyav"], + ), + FileExtension( + name="AVR (Audio Visual Research)", + extension=".avr", + priority=["pyav"], + ), + FileExtension( + name="Beam Software SIFF", + extension=".vb", + priority=["pyav"], + ), + FileExtension( + name="CD Graphics", + extension=".cdg", + priority=["pyav"], + ), + FileExtension( + name="Commodore CDXL video", + extension=".cdxl", + priority=["pyav"], + ), + FileExtension( + name="Commodore CDXL video", + extension=".xl", + priority=["pyav"], + ), + FileExtension( + name="DASH Muxer", + extension=".mpd", + priority=["pyav"], + ), + FileExtension( + name="Digital Pictures SGA", + extension=".sga", + priority=["pyav"], + ), + FileExtension( + name="Discworld II BMV", + extension=".bmv", + priority=["pyav"], + ), + FileExtension( + name="DV (Digital Video)", + extension=".dif", + priority=["pyav"], + ), + FileExtension( + name="DV (Digital Video)", + extension=".dv", + priority=["pyav"], + ), + FileExtension( + name="F4V Adobe Flash Video", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="FLV (Flash Video)", + extension=".flv", + priority=["pyav"], + ), + FileExtension( + name="GXF (General eXchange Format)", + extension=".gxf", + priority=["pyav"], + ), + FileExtension( + name="iCE Draw File", + extension=".idf", + priority=["pyav"], + ), + FileExtension( + name="IFV CCTV DVR", + extension=".ifv", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4v", + priority=["pyav"], + ), + FileExtension( + name="IVR (Internet Video Recording)", + extension=".ivr", + priority=["pyav"], + ), + FileExtension( + name="Konami PS2 SVAG", + extension=".svag", + priority=["pyav"], + ), + FileExtension( + name="KUX (YouKu)", + extension=".kux", + priority=["pyav"], + ), + FileExtension( + name="live RTMP FLV (Flash Video)", + extension=".flv", + priority=["pyav"], + ), + FileExtension( + name="Loki SDL MJPEG", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="LVF", + extension=".lvf", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mk3d", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mka", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mks", + priority=["pyav"], + ), + FileExtension( + name="Microsoft XMV", + extension=".xmv", + priority=["pyav"], + ), + FileExtension( + name="MIME multipart JPEG", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="MobiClip MODS", + extension=".mods", + priority=["pyav"], + ), + FileExtension( + name="MobiClip MOFLEX", + extension=".moflex", + priority=["pyav"], + ), + FileExtension( + name="Motion Pixels MVI", + extension=".mvi", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (DVD VOB)", + extension=".dvd", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (SVCD)", + extension=".vob", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (VOB)", + extension=".vob", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".m2t", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".m2ts", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".mts", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".ts", + priority=["pyav"], + ), + FileExtension( + name="Musepack", + extension=".mpc", + priority=["pyav"], + ), + FileExtension( + name="MXF (Material eXchange Format) Operational Pattern Atom", + extension=".mxf", + priority=["pyav"], + ), + FileExtension( + name="MXF (Material eXchange Format)", + extension=".mxf", + priority=["pyav"], + ), + FileExtension( + name="MxPEG clip", + extension=".mxg", + priority=["pyav"], + ), + FileExtension( + name="NC camera feed", + extension=".v", + priority=["pyav"], + ), + FileExtension( + name="NUT", + extension=".nut", + priority=["pyav"], + ), + FileExtension( + name="Ogg Video", + extension=".ogv", + priority=["pyav"], + ), + FileExtension( + name="Ogg", + extension=".ogg", + priority=["pyav"], + ), + FileExtension( + name="On2 IVF", + extension=".ivf", + priority=["pyav"], + ), + FileExtension( + name="PSP MP4 (MPEG-4 Part 14)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="Psygnosis YOP", + extension=".yop", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="raw AVS2-P2/IEEE1857.4 video", + extension=".avs", + priority=["pyav"], + ), + FileExtension( + name="raw AVS2-P2/IEEE1857.4 video", + extension=".avs2", + priority=["pyav"], + ), + FileExtension( + name="raw AVS3-P2/IEEE1857.10", + extension=".avs3", + priority=["pyav"], + ), + FileExtension( + name="raw Chinese AVS (Audio Video Standard) video", + extension=".cavs", + priority=["pyav"], + ), + FileExtension( + name="raw Dirac", + extension=".drc", + priority=["pyav"], + ), + FileExtension( + name="raw Dirac", + extension=".vc2", + priority=["pyav"], + ), + FileExtension( + name="raw DNxHD (SMPTE VC-3)", + extension=".dnxhd", + priority=["pyav"], + ), + FileExtension( + name="raw DNxHD (SMPTE VC-3)", + extension=".dnxhr", + priority=["pyav"], + ), + FileExtension( + name="raw GSM", + extension=".gsm", + priority=["pyav"], + ), + FileExtension( + name="raw H.261", + extension=".h261", + priority=["pyav"], + ), + FileExtension( + name="raw H.263", + extension=".h263", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".264", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".avc", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".h264", + priority=["pyav", "FFMPEG"], + ), + FileExtension( + name="raw H.264 video", + extension=".h26l", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".265", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".h265", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".hevc", + priority=["pyav"], + ), + FileExtension( + name="raw id RoQ", + extension=".roq", + priority=["pyav"], + ), + FileExtension( + name="raw Ingenient MJPEG", + extension=".cgi", + priority=["pyav"], + ), + FileExtension( + name="raw IPU Video", + extension=".ipu", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG 2000 video", + extension=".j2k", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mjpeg", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mpo", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".m1v", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".mpeg", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".mpg", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-2 video", + extension=".m2v", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-4 video", + extension=".m4v", + priority=["pyav"], + ), + FileExtension( + name="raw VC-1 video", + extension=".vc1", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".cif", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".qcif", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".rgb", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".yuv", + priority=["pyav"], + ), + FileExtension( + name="RealMedia", + extension=".rm", + priority=["pyav"], + ), + FileExtension( + name="SDR2", + extension=".sdr2", + priority=["pyav"], + ), + FileExtension( + name="Sega FILM / CPK", + extension=".cpk", + priority=["pyav"], + ), + FileExtension( + name="SER (Simple uncompressed video format for astronomical capturing)", + extension=".ser", + priority=["pyav"], + ), + FileExtension( + name="Simbiosis Interactive IMX", + extension=".imx", + priority=["pyav"], + ), + FileExtension( + name="Square SVS", + extension=".svs", + priority=["tifffile", "pyav"], + ), + FileExtension( + name="TiVo TY Stream", + extension=".ty", + priority=["pyav"], + ), + FileExtension( + name="TiVo TY Stream", + extension=".ty+", + priority=["pyav"], + ), + FileExtension( + name="Uncompressed 4:2:2 10-bit", + extension=".v210", + priority=["pyav"], + ), + FileExtension( + name="Uncompressed 4:2:2 10-bit", + extension=".yuv10", + priority=["pyav"], + ), + FileExtension( + name="VC-1 test bitstream", + extension=".rcv", + priority=["pyav"], + ), + FileExtension( + name="Video CCTV DAT", + extension=".dat", + priority=["pyav"], + ), + FileExtension( + name="Video DAV", + extension=".dav", + priority=["pyav"], + ), + FileExtension( + name="Vivo", + extension=".viv", + priority=["pyav"], + ), + FileExtension( + name="WebM Chunk Muxer", + extension=".chk", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mk3d", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mka", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mks", + priority=["pyav"], + ), + FileExtension( + name="Windows Television (WTV)", + extension=".wtv", + priority=["pyav"], + ), + FileExtension( + name="Xilam DERF", + extension=".adp", + priority=["pyav"], + ), + FileExtension( + name="YUV4MPEG pipe", + extension=".y4m", + priority=["pyav"], + ), + FileExtension( + extension=".qpi", + priority=["tifffile"], + ), + FileExtension( + name="PCO Camera", + extension=".pcoraw", + priority=["tifffile"], + ), + FileExtension( + name="PCO Camera", + extension=".rec", + priority=["tifffile"], + ), + FileExtension( + name="Perkin Elmer Vectra", + extension=".qptiff", + priority=["tifffile"], + ), + FileExtension( + name="Pyramid Encoded TIFF", + extension=".ptiff", + priority=["tifffile"], + ), + FileExtension( + name="Pyramid Encoded TIFF", + extension=".ptif", + priority=["tifffile"], + ), + FileExtension( + name="Opticks Gel", + extension=".gel", + priority=["tifffile"], + ), + FileExtension( + name="Zoomify Image Format", + extension=".zif", + priority=["tifffile"], + ), + FileExtension( + name="Hamamatsu Slide Scanner", + extension=".ndpi", + priority=["tifffile"], + ), + FileExtension( + name="Roche Digital Pathology", + extension=".bif", + priority=["tifffile"], + ), + FileExtension( + extension=".tf8", + priority=["tifffile"], + ), + FileExtension( + extension=".btf", + priority=["tifffile"], + ), + FileExtension( + name="High Efficiency Image File Format", + extension=".heic", + priority=["pillow"], + ), + FileExtension( + name="AV1 Image File Format", + extension=".avif", + priority=["pillow"], + ), +] +extension_list.sort(key=lambda x: x.extension) + + +known_extensions = dict() +for ext in extension_list: + if ext.extension not in known_extensions: + known_extensions[ext.extension] = list() + known_extensions[ext.extension].append(ext) + +extension_list = [ext for ext_list in known_extensions.values() for ext in ext_list] + +_video_extension_strings = [ + ".264", + ".265", + ".3g2", + ".3gp", + ".a64", + ".A64", + ".adp", + ".amr", + ".amv", + ".asf", + ".avc", + ".avi", + ".avr", + ".avs", + ".avs2", + ".avs3", + ".bmv", + ".cavs", + ".cdg", + ".cdxl", + ".cgi", + ".chk", + ".cif", + ".cpk", + ".dat", + ".dav", + ".dif", + ".dnxhd", + ".dnxhr", + ".drc", + ".dv", + ".dvd", + ".f4v", + ".flm", + ".flv", + ".gsm", + ".gxf", + ".h261", + ".h263", + ".h264", + ".h265", + ".h26l", + ".hevc", + ".idf", + ".ifv", + ".imx", + ".ipu", + ".ism", + ".isma", + ".ismv", + ".ivf", + ".ivr", + ".j2k", + ".kux", + ".lvf", + ".m1v", + ".m2t", + ".m2ts", + ".m2v", + ".m4a", + ".m4b", + ".m4v", + ".mj2", + ".mjpeg", + ".mjpg", + ".mk3d", + ".mka", + ".mks", + ".mkv", + ".mods", + ".moflex", + ".mov", + ".mp4", + ".mpc", + ".mpd", + ".mpeg", + ".mpg", + ".mpo", + ".mts", + ".mvi", + ".mxf", + ".mxg", + ".nut", + ".obu", + ".ogg", + ".ogv", + ".psp", + ".qcif", + ".rcv", + ".rgb", + ".rm", + ".roq", + ".sdr2", + ".ser", + ".sga", + ".svag", + ".svs", + ".ts", + ".ty", + ".ty+", + ".v", + ".v210", + ".vb", + ".vc1", + ".vc2", + ".viv", + ".vob", + ".webm", + ".wmv", + ".wtv", + ".xl", + ".xmv", + ".y4m", + ".yop", + ".yuv", + ".yuv10", +] +video_extensions = list() +for ext_string in _video_extension_strings: + formats = known_extensions[ext_string] + video_extensions.append(formats[0]) +video_extensions.sort(key=lambda x: x.extension) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/config/extensions.pyi b/minigpt2/lib/python3.10/site-packages/imageio/config/extensions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..266d0632748d528b6e730a25624919cb7805de6e --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/config/extensions.pyi @@ -0,0 +1,24 @@ +from typing import List, Dict, Optional + +class FileExtension: + extension: str + priority: List[str] + name: Optional[str] = None + description: Optional[str] = None + external_link: Optional[str] = None + volume_support: bool + + def __init__( + self, + *, + extension: str, + priority: List[str], + name: str = None, + description: str = None, + external_link: str = None + ) -> None: ... + def reset(self) -> None: ... + +extension_list: List[FileExtension] +known_extensions: Dict[str, List[FileExtension]] +video_extensions: List[FileExtension] diff --git a/minigpt2/lib/python3.10/site-packages/imageio/config/plugins.py b/minigpt2/lib/python3.10/site-packages/imageio/config/plugins.py new file mode 100644 index 0000000000000000000000000000000000000000..261dcfb17794fa0695f3e4393dfe9f8ebc72d9bd --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/config/plugins.py @@ -0,0 +1,782 @@ +import importlib + +from ..core.legacy_plugin_wrapper import LegacyPlugin + + +class PluginConfig: + """Plugin Configuration Metadata + + This class holds the information needed to lazy-import plugins. + + Parameters + ---------- + name : str + The name of the plugin. + class_name : str + The name of the plugin class inside the plugin module. + module_name : str + The name of the module/package from which to import the plugin. + is_legacy : bool + If True, this plugin is a v2 plugin and will be wrapped in a + LegacyPlugin. Default: False. + package_name : str + If the given module name points to a relative module, then the package + name determines the package it is relative to. + install_name : str + The name of the optional dependency that can be used to install this + plugin if it is missing. + legacy_args : Dict + A dictionary of kwargs to pass to the v2 plugin (Format) upon construction. + + Examples + -------- + >>> PluginConfig( + name="TIFF", + class_name="TiffFormat", + module_name="imageio.plugins.tifffile", + is_legacy=True, + install_name="tifffile", + legacy_args={ + "description": "TIFF format", + "extensions": ".tif .tiff .stk .lsm", + "modes": "iIvV", + }, + ) + >>> PluginConfig( + name="pillow", + class_name="PillowPlugin", + module_name="imageio.plugins.pillow" + ) + + """ + + def __init__( + self, + name, + class_name, + module_name, + *, + is_legacy=False, + package_name=None, + install_name=None, + legacy_args=None, + ): + legacy_args = legacy_args or dict() + + self.name = name + self.class_name = class_name + self.module_name = module_name + self.package_name = package_name + + self.is_legacy = is_legacy + self.install_name = install_name or self.name + self.legacy_args = {"name": name, "description": "A legacy plugin"} + self.legacy_args.update(legacy_args) + + @property + def format(self): + """For backwards compatibility with FormatManager + + Delete when migrating to v3 + """ + if not self.is_legacy: + raise RuntimeError("Can only get format for legacy plugins.") + + module = importlib.import_module(self.module_name, self.package_name) + clazz = getattr(module, self.class_name) + return clazz(**self.legacy_args) + + @property + def plugin_class(self): + """Get the plugin class (import if needed) + + Returns + ------- + plugin_class : Any + The class that can be used to instantiate plugins. + + """ + + module = importlib.import_module(self.module_name, self.package_name) + clazz = getattr(module, self.class_name) + + if self.is_legacy: + legacy_plugin = clazz(**self.legacy_args) + + def partial_legacy_plugin(request): + return LegacyPlugin(request, legacy_plugin) + + clazz = partial_legacy_plugin + + return clazz + + +known_plugins = dict() +known_plugins["pillow"] = PluginConfig( + name="pillow", class_name="PillowPlugin", module_name="imageio.plugins.pillow" +) +known_plugins["pyav"] = PluginConfig( + name="pyav", class_name="PyAVPlugin", module_name="imageio.plugins.pyav" +) +known_plugins["opencv"] = PluginConfig( + name="opencv", class_name="OpenCVPlugin", module_name="imageio.plugins.opencv" +) +known_plugins["tifffile"] = PluginConfig( + name="tifffile", + class_name="TifffilePlugin", + module_name="imageio.plugins.tifffile_v3", +) +known_plugins["SPE"] = PluginConfig( + name="spe", class_name="SpePlugin", module_name="imageio.plugins.spe" +) +known_plugins["rawpy"] = PluginConfig( + name="rawpy", class_name="RawPyPlugin", module_name="imageio.plugins.rawpy" +) + +# Legacy plugins +# ============== +# +# Which are partly registered by format, partly by plugin, and partly by a mix +# of both. We keep the naming here for backwards compatibility. +# In v3 this should become a single entry per plugin named after the plugin +# We can choose extension-specific priority in ``config.extensions``. +# +# Note: Since python 3.7 order of insertion determines the order of dict().keys() +# This means that the order here determines the order by which plugins are +# checked during the full fallback search. We don't advertise this downstream, +# but it could be a useful thing to keep in mind to choose a sensible default +# search order. + +known_plugins["TIFF"] = PluginConfig( + name="TIFF", + class_name="TiffFormat", + module_name="imageio.plugins.tifffile", + is_legacy=True, + install_name="tifffile", + legacy_args={ + "description": "TIFF format", + "extensions": ".tif .tiff .stk .lsm", + "modes": "iIvV", + }, +) + +# PILLOW plugin formats (legacy) +PILLOW_FORMATS = [ + ("BMP", "Windows Bitmap", ".bmp", "PillowFormat"), + ("BUFR", "BUFR", ".bufr", "PillowFormat"), + ("CUR", "Windows Cursor", ".cur", "PillowFormat"), + ("DCX", "Intel DCX", ".dcx", "PillowFormat"), + ("DDS", "DirectDraw Surface", ".dds", "PillowFormat"), + ("DIB", "Windows Bitmap", "", "PillowFormat"), + ("EPS", "Encapsulated Postscript", ".ps .eps", "PillowFormat"), + ("FITS", "FITS", ".fit .fits", "PillowFormat"), + ("FLI", "Autodesk FLI/FLC Animation", ".fli .flc", "PillowFormat"), + ("FPX", "FlashPix", ".fpx", "PillowFormat"), + ("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu", "PillowFormat"), + ("GBR", "GIMP brush file", ".gbr", "PillowFormat"), + ("GIF", "Compuserve GIF", ".gif", "GIFFormat"), + ("GRIB", "GRIB", ".grib", "PillowFormat"), + ("HDF5", "HDF5", ".h5 .hdf", "PillowFormat"), + ("ICNS", "Mac OS icns resource", ".icns", "PillowFormat"), + ("ICO", "Windows Icon", ".ico", "PillowFormat"), + ("IM", "IFUNC Image Memory", ".im", "PillowFormat"), + ("IMT", "IM Tools", "", "PillowFormat"), + ("IPTC", "IPTC/NAA", ".iim", "PillowFormat"), + ("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg", "JPEGFormat"), + ( + "JPEG2000", + "JPEG 2000 (ISO 15444)", + ".jp2 .j2k .jpc .jpf .jpx .j2c", + "JPEG2000Format", + ), + ("MCIDAS", "McIdas area file", "", "PillowFormat"), + ("MIC", "Microsoft Image Composer", ".mic", "PillowFormat"), + # skipped in legacy pillow + # ("MPEG", "MPEG", ".mpg .mpeg", "PillowFormat"), + ("MPO", "MPO (CIPA DC-007)", ".mpo", "PillowFormat"), + ("MSP", "Windows Paint", ".msp", "PillowFormat"), + ("PCD", "Kodak PhotoCD", ".pcd", "PillowFormat"), + ("PCX", "Paintbrush", ".pcx", "PillowFormat"), + ("PIXAR", "PIXAR raster image", ".pxr", "PillowFormat"), + ("PNG", "Portable network graphics", ".png", "PNGFormat"), + ("PPM", "Pbmplus image", ".pbm .pgm .ppm", "PillowFormat"), + ("PSD", "Adobe Photoshop", ".psd", "PillowFormat"), + ("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi", "PillowFormat"), + ("SPIDER", "Spider 2D image", "", "PillowFormat"), + ("SUN", "Sun Raster File", ".ras", "PillowFormat"), + ("TGA", "Targa", ".tga", "PillowFormat"), + ("TIFF", "Adobe TIFF", ".tif .tiff", "TIFFFormat"), + ("WMF", "Windows Metafile", ".wmf .emf", "PillowFormat"), + ("XBM", "X11 Bitmap", ".xbm", "PillowFormat"), + ("XPM", "X11 Pixel Map", ".xpm", "PillowFormat"), + ("XVTHUMB", "XV thumbnail image", "", "PillowFormat"), +] +for id, summary, ext, class_name in PILLOW_FORMATS: + config = PluginConfig( + name=id.upper() + "-PIL", + class_name=class_name, + module_name="imageio.plugins.pillow_legacy", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": summary + " via Pillow", + "extensions": ext, + "modes": "iI" if class_name == "GIFFormat" else "i", + "plugin_id": id, + }, + ) + known_plugins[config.name] = config + +known_plugins["FFMPEG"] = PluginConfig( + name="FFMPEG", + class_name="FfmpegFormat", + module_name="imageio.plugins.ffmpeg", + is_legacy=True, + install_name="ffmpeg", + legacy_args={ + "description": "Many video formats and cameras (via ffmpeg)", + "extensions": ".mov .avi .mpg .mpeg .mp4 .mkv .webm .wmv .h264", + "modes": "I", + }, +) + +known_plugins["BSDF"] = PluginConfig( + name="BSDF", + class_name="BsdfFormat", + module_name="imageio.plugins.bsdf", + is_legacy=True, + install_name="bsdf", + legacy_args={ + "description": "Format based on the Binary Structured Data Format", + "extensions": ".bsdf", + "modes": "iIvV", + }, +) + +known_plugins["DICOM"] = PluginConfig( + name="DICOM", + class_name="DicomFormat", + module_name="imageio.plugins.dicom", + is_legacy=True, + install_name="dicom", + legacy_args={ + "description": "Digital Imaging and Communications in Medicine", + "extensions": ".dcm .ct .mri", + "modes": "iIvV", + }, +) + +known_plugins["FEI"] = PluginConfig( + name="FEI", + class_name="FEISEMFormat", + module_name="imageio.plugins.feisem", + is_legacy=True, + install_name="feisem", + legacy_args={ + "description": "FEI-SEM TIFF format", + "extensions": [".tif", ".tiff"], + "modes": "iv", + }, +) + +known_plugins["FITS"] = PluginConfig( + name="FITS", + class_name="FitsFormat", + module_name="imageio.plugins.fits", + is_legacy=True, + install_name="fits", + legacy_args={ + "description": "Flexible Image Transport System (FITS) format", + "extensions": ".fits .fit .fts .fz", + "modes": "iIvV", + }, +) + +known_plugins["GDAL"] = PluginConfig( + name="GDAL", + class_name="GdalFormat", + module_name="imageio.plugins.gdal", + is_legacy=True, + install_name="gdal", + legacy_args={ + "description": "Geospatial Data Abstraction Library", + "extensions": ".tiff .tif .img .ecw .jpg .jpeg", + "modes": "iIvV", + }, +) + +known_plugins["ITK"] = PluginConfig( + name="ITK", + class_name="ItkFormat", + module_name="imageio.plugins.simpleitk", + is_legacy=True, + install_name="simpleitk", + legacy_args={ + "description": "Insight Segmentation and Registration Toolkit (ITK) format", + "extensions": " ".join( + ( + ".gipl", + ".ipl", + ".mha", + ".mhd", + ".nhdr", + ".nia", + ".hdr", + ".nrrd", + ".nii", + ".nii.gz", + ".img", + ".img.gz", + ".vtk", + ".hdf5", + ".lsm", + ".mnc", + ".mnc2", + ".mgh", + ".mnc", + ".pic", + ".bmp", + ".jpeg", + ".jpg", + ".png", + ".tiff", + ".tif", + ".dicom", + ".dcm", + ".gdcm", + ) + ), + "modes": "iIvV", + }, +) + +known_plugins["NPZ"] = PluginConfig( + name="NPZ", + class_name="NpzFormat", + module_name="imageio.plugins.npz", + is_legacy=True, + install_name="numpy", + legacy_args={ + "description": "Numpy's compressed array format", + "extensions": ".npz", + "modes": "iIvV", + }, +) + +known_plugins["SWF"] = PluginConfig( + name="SWF", + class_name="SWFFormat", + module_name="imageio.plugins.swf", + is_legacy=True, + install_name="swf", + legacy_args={ + "description": "Shockwave flash", + "extensions": ".swf", + "modes": "I", + }, +) + +known_plugins["SCREENGRAB"] = PluginConfig( + name="SCREENGRAB", + class_name="ScreenGrabFormat", + module_name="imageio.plugins.grab", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": "Grab screenshots (Windows and OS X only)", + "extensions": [], + "modes": "i", + }, +) + +known_plugins["CLIPBOARDGRAB"] = PluginConfig( + name="CLIPBOARDGRAB", + class_name="ClipboardGrabFormat", + module_name="imageio.plugins.grab", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": "Grab from clipboard (Windows only)", + "extensions": [], + "modes": "i", + }, +) + +# LYTRO plugin (legacy) +lytro_formats = [ + ("lytro-lfr", "Lytro Illum lfr image file", ".lfr", "i", "LytroLfrFormat"), + ( + "lytro-illum-raw", + "Lytro Illum raw image file", + ".raw", + "i", + "LytroIllumRawFormat", + ), + ("lytro-lfp", "Lytro F01 lfp image file", ".lfp", "i", "LytroLfpFormat"), + ("lytro-f01-raw", "Lytro F01 raw image file", ".raw", "i", "LytroF01RawFormat"), +] +for name, des, ext, mode, class_name in lytro_formats: + config = PluginConfig( + name=name.upper(), + class_name=class_name, + module_name="imageio.plugins.lytro", + is_legacy=True, + install_name="lytro", + legacy_args={ + "description": des, + "extensions": ext, + "modes": mode, + }, + ) + known_plugins[config.name] = config + +# FreeImage plugin (legacy) +FREEIMAGE_FORMATS = [ + ( + "BMP", + 0, + "Windows or OS/2 Bitmap", + ".bmp", + "i", + "FreeimageBmpFormat", + "imageio.plugins.freeimage", + ), + ( + "CUT", + 21, + "Dr. Halo", + ".cut", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "DDS", + 24, + "DirectX Surface", + ".dds", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "EXR", + 29, + "ILM OpenEXR", + ".exr", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "G3", + 27, + "Raw fax format CCITT G.3", + ".g3", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "GIF", + 25, + "Static and animated gif (FreeImage)", + ".gif", + "iI", + "GifFormat", + "imageio.plugins.freeimagemulti", + ), + ( + "HDR", + 26, + "High Dynamic Range Image", + ".hdr", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "ICO", + 1, + "Windows Icon", + ".ico", + "iI", + "IcoFormat", + "imageio.plugins.freeimagemulti", + ), + ( + "IFF", + 5, + "IFF Interleaved Bitmap", + ".iff .lbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "J2K", + 30, + "JPEG-2000 codestream", + ".j2k .j2c", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JNG", + 3, + "JPEG Network Graphics", + ".jng", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JP2", + 31, + "JPEG-2000 File Format", + ".jp2", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JPEG", + 2, + "JPEG - JFIF Compliant", + ".jpg .jif .jpeg .jpe", + "i", + "FreeimageJpegFormat", + "imageio.plugins.freeimage", + ), + ( + "JPEG-XR", + 36, + "JPEG XR image format", + ".jxr .wdp .hdp", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "KOALA", + 4, + "C64 Koala Graphics", + ".koa", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + # not registered in legacy pillow + # ("MNG", 6, "Multiple-image Network Graphics", ".mng", "i", "FreeimageFormat", "imageio.plugins.freeimage"), + ( + "PBM", + 7, + "Portable Bitmap (ASCII)", + ".pbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PBMRAW", + 8, + "Portable Bitmap (RAW)", + ".pbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PCD", + 9, + "Kodak PhotoCD", + ".pcd", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PCX", + 10, + "Zsoft Paintbrush", + ".pcx", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PFM", + 32, + "Portable floatmap", + ".pfm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PGM", + 11, + "Portable Greymap (ASCII)", + ".pgm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PGMRAW", + 12, + "Portable Greymap (RAW)", + ".pgm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PICT", + 33, + "Macintosh PICT", + ".pct .pict .pic", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PNG", + 13, + "Portable Network Graphics", + ".png", + "i", + "FreeimagePngFormat", + "imageio.plugins.freeimage", + ), + ( + "PPM", + 14, + "Portable Pixelmap (ASCII)", + ".ppm", + "i", + "FreeimagePnmFormat", + "imageio.plugins.freeimage", + ), + ( + "PPMRAW", + 15, + "Portable Pixelmap (RAW)", + ".ppm", + "i", + "FreeimagePnmFormat", + "imageio.plugins.freeimage", + ), + ( + "PSD", + 20, + "Adobe Photoshop", + ".psd", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "RAS", + 16, + "Sun Raster Image", + ".ras", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "RAW", + 34, + "RAW camera image", + ".3fr .arw .bay .bmq .cap .cine .cr2 .crw .cs1 .dc2 " + ".dcr .drf .dsc .dng .erf .fff .ia .iiq .k25 .kc2 .kdc .mdc .mef .mos .mrw .nef .nrw .orf " + ".pef .ptx .pxn .qtk .raf .raw .rdc .rw2 .rwl .rwz .sr2 .srf .srw .sti", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "SGI", + 28, + "SGI Image Format", + ".sgi .rgb .rgba .bw", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "TARGA", + 17, + "Truevision Targa", + ".tga .targa", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "TIFF", + 18, + "Tagged Image File Format", + ".tif .tiff", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "WBMP", + 19, + "Wireless Bitmap", + ".wap .wbmp .wbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "WebP", + 35, + "Google WebP image format", + ".webp", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "XBM", + 22, + "X11 Bitmap Format", + ".xbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "XPM", + 23, + "X11 Pixmap Format", + ".xpm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), +] +for name, i, des, ext, mode, class_name, module_name in FREEIMAGE_FORMATS: + config = PluginConfig( + name=name.upper() + "-FI", + class_name=class_name, + module_name=module_name, + is_legacy=True, + install_name="freeimage", + legacy_args={ + "description": des, + "extensions": ext, + "modes": mode, + "fif": i, + }, + ) + known_plugins[config.name] = config + +# exists for backwards compatibility with FormatManager +# delete in V3 +_original_order = [x for x, config in known_plugins.items() if config.is_legacy] diff --git a/minigpt2/lib/python3.10/site-packages/imageio/config/plugins.pyi b/minigpt2/lib/python3.10/site-packages/imageio/config/plugins.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ab5d4a816257e0f501fce1c087e74a3d1f66dc13 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/config/plugins.pyi @@ -0,0 +1,28 @@ +from typing import Any, Dict, Optional +from ..core.v3_plugin_api import PluginV3 + +class PluginConfig: + name: str + class_name: str + module_name: str + is_legacy: bool + package_name: Optional[str] = None + install_name: Optional[str] = None + legacy_args: Optional[dict] = None + @property + def format(self) -> Any: ... + @property + def plugin_class(self) -> PluginV3: ... + def __init__( + self, + name: str, + class_name: str, + module_name: str, + *, + is_legacy: bool = False, + package_name: str = None, + install_name: str = None, + legacy_args: dict = None, + ) -> None: ... + +known_plugins: Dict[str, PluginConfig] diff --git a/minigpt2/lib/python3.10/site-packages/imageio/freeze.py b/minigpt2/lib/python3.10/site-packages/imageio/freeze.py new file mode 100644 index 0000000000000000000000000000000000000000..3753a29df665e416030b4eb0453ed3430a4c78fc --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/freeze.py @@ -0,0 +1,11 @@ +""" +Helper functions for freezing imageio. +""" + + +def get_includes(): + return ["email", "urllib.request", "numpy", "zipfile", "io"] + + +def get_excludes(): + return [] diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__init__.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..741415e955069d300bee8d8bc529ea0df742d700 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__init__.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +# flake8: noqa + +""" +Here you can find documentation on how to write your own plugin to allow +ImageIO to access a new backend. Plugins are quite object oriented, and +the relevant classes and their interaction are documented here: + +.. currentmodule:: imageio + +.. autosummary:: + :toctree: ../_autosummary + :template: better_class.rst + + imageio.core.Format + imageio.core.Request + +.. note:: + You can always check existing plugins if you want to see examples. + +What methods to implement +------------------------- + +To implement a new plugin, create a new class that inherits from +:class:`imageio.core.Format`. and implement the following functions: + +.. autosummary:: + :toctree: ../_autosummary + + imageio.core.Format.__init__ + imageio.core.Format._can_read + imageio.core.Format._can_write + +Further, each format contains up to two nested classes; one for reading and +one for writing. To support reading and/or writing, the respective classes +need to be defined. + +For reading, create a nested class that inherits from +``imageio.core.Format.Reader`` and that implements the following functions: + + * Implement ``_open(**kwargs)`` to initialize the reader. Deal with the + user-provided keyword arguments here. + * Implement ``_close()`` to clean up. + * Implement ``_get_length()`` to provide a suitable length based on what + the user expects. Can be ``inf`` for streaming data. + * Implement ``_get_data(index)`` to return an array and a meta-data dict. + * Implement ``_get_meta_data(index)`` to return a meta-data dict. If index + is None, it should return the 'global' meta-data. + +For writing, create a nested class that inherits from +``imageio.core.Format.Writer`` and implement the following functions: + + * Implement ``_open(**kwargs)`` to initialize the writer. Deal with the + user-provided keyword arguments here. + * Implement ``_close()`` to clean up. + * Implement ``_append_data(im, meta)`` to add data (and meta-data). + * Implement ``_set_meta_data(meta)`` to set the global meta-data. + +""" + +import importlib +import os +import warnings + + +# v2 imports remove in v3 +from .. import formats + +# v2 allows formatting plugins by environment variable +# this is done here. +env_plugin_order = os.getenv("IMAGEIO_FORMAT_ORDER", None) +if env_plugin_order is not None: # pragma: no cover + warnings.warn( + "Setting plugin priority through an environment variable is" + " deprecated and will be removed in ImageIO v3. There is no" + " replacement planned for this feature. If you have an" + " active use-case for it, please reach out to us on GitHub.", + DeprecationWarning, + ) + + formats.sort(*os.getenv("IMAGEIO_FORMAT_ORDER", "").split(",")) + + +# this class replaces plugin module. For details +# see https://stackoverflow.com/questions/2447353/getattr-on-a-module +def __getattr__(name): + """Lazy-Import Plugins + + This function dynamically loads plugins into the imageio.plugin + namespace upon first access. For example, the following snippet will + delay importing freeimage until the second line: + + >>> import imageio + >>> imageio.plugins.freeimage.download() + + """ + + try: + return importlib.import_module(f"imageio.plugins.{name}") + except ImportError: + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") from None diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/_bsdf.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_bsdf.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f56ce0dc471b0c13bc4e10f0ce9a26b7303bce --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_bsdf.py @@ -0,0 +1,915 @@ +#!/usr/bin/env python +# This file is distributed under the terms of the 2-clause BSD License. +# Copyright (c) 2017-2018, Almar Klein + +""" +Python implementation of the Binary Structured Data Format (BSDF). + +BSDF is a binary format for serializing structured (scientific) data. +See http://bsdf.io for more information. + +This is the reference implementation, which is relatively relatively +sophisticated, providing e.g. lazy loading of blobs and streamed +reading/writing. A simpler Python implementation is available as +``bsdf_lite.py``. + +This module has no dependencies and works on Python 2.7 and 3.4+. + +Note: on Legacy Python (Python 2.7), non-Unicode strings are encoded as bytes. +""" + +# todo: in 2020, remove six stuff, __future__ and _isidentifier +# todo: in 2020, remove 'utf-8' args to encode/decode; it's faster + +from __future__ import absolute_import, division, print_function + +import bz2 +import hashlib +import logging +import os +import re +import struct +import sys +import types +import zlib +from io import BytesIO + +logger = logging.getLogger(__name__) + +# Notes on versioning: the major and minor numbers correspond to the +# BSDF format version. The major number if increased when backward +# incompatible changes are introduced. An implementation must raise an +# exception when the file being read has a higher major version. The +# minor number is increased when new backward compatible features are +# introduced. An implementation must display a warning when the file +# being read has a higher minor version. The patch version is increased +# for subsequent releases of the implementation. +VERSION = 2, 1, 2 +__version__ = ".".join(str(i) for i in VERSION) + + +# %% The encoder and decoder implementation + +# From six.py +PY3 = sys.version_info[0] >= 3 +if PY3: + text_type = str + string_types = str + unicode_types = str + integer_types = int + classtypes = type +else: # pragma: no cover + logging.basicConfig() # avoid "no handlers found" error + text_type = unicode # noqa + string_types = basestring # noqa + unicode_types = unicode # noqa + integer_types = (int, long) # noqa + classtypes = type, types.ClassType + +# Shorthands +spack = struct.pack +strunpack = struct.unpack + + +def lencode(x): + """Encode an unsigned integer into a variable sized blob of bytes.""" + # We could support 16 bit and 32 bit as well, but the gain is low, since + # 9 bytes for collections with over 250 elements is marginal anyway. + if x <= 250: + return spack(" extension + self._extensions_by_cls = {} # cls -> (name, extension.encode) + if extensions is None: + extensions = standard_extensions + for extension in extensions: + self.add_extension(extension) + self._parse_options(**options) + + def _parse_options( + self, + compression=0, + use_checksum=False, + float64=True, + load_streaming=False, + lazy_blob=False, + ): + # Validate compression + if isinstance(compression, string_types): + m = {"no": 0, "zlib": 1, "bz2": 2} + compression = m.get(compression.lower(), compression) + if compression not in (0, 1, 2): + raise TypeError("Compression must be 0, 1, 2, " '"no", "zlib", or "bz2"') + self._compression = compression + + # Other encoding args + self._use_checksum = bool(use_checksum) + self._float64 = bool(float64) + + # Decoding args + self._load_streaming = bool(load_streaming) + self._lazy_blob = bool(lazy_blob) + + def add_extension(self, extension_class): + """Add an extension to this serializer instance, which must be + a subclass of Extension. Can be used as a decorator. + """ + # Check class + if not ( + isinstance(extension_class, type) and issubclass(extension_class, Extension) + ): + raise TypeError("add_extension() expects a Extension class.") + extension = extension_class() + + # Get name + name = extension.name + if not isinstance(name, str): + raise TypeError("Extension name must be str.") + if len(name) == 0 or len(name) > 250: + raise NameError( + "Extension names must be nonempty and shorter " "than 251 chars." + ) + if name in self._extensions: + logger.warning( + 'BSDF warning: overwriting extension "%s", ' + "consider removing first" % name + ) + + # Get classes + cls = extension.cls + if not cls: + clss = [] + elif isinstance(cls, (tuple, list)): + clss = cls + else: + clss = [cls] + for cls in clss: + if not isinstance(cls, classtypes): + raise TypeError("Extension classes must be types.") + + # Store + for cls in clss: + self._extensions_by_cls[cls] = name, extension.encode + self._extensions[name] = extension + return extension_class + + def remove_extension(self, name): + """Remove a converted by its unique name.""" + if not isinstance(name, str): + raise TypeError("Extension name must be str.") + if name in self._extensions: + self._extensions.pop(name) + for cls in list(self._extensions_by_cls.keys()): + if self._extensions_by_cls[cls][0] == name: + self._extensions_by_cls.pop(cls) + + def _encode(self, f, value, streams, ext_id): + """Main encoder function.""" + x = encode_type_id + + if value is None: + f.write(x(b"v", ext_id)) # V for void + elif value is True: + f.write(x(b"y", ext_id)) # Y for yes + elif value is False: + f.write(x(b"n", ext_id)) # N for no + elif isinstance(value, integer_types): + if -32768 <= value <= 32767: + f.write(x(b"h", ext_id) + spack("h", value)) # H for ... + else: + f.write(x(b"i", ext_id) + spack(" 0: + raise ValueError("Can only have one stream per file.") + streams.append(value) + value._activate(f, self._encode, self._decode) # noqa + else: + if ext_id is not None: + raise ValueError( + "Extension %s wronfully encodes object to another " + "extension object (though it may encode to a list/dict " + "that contains other extension objects)." % ext_id + ) + # Try if the value is of a type we know + ex = self._extensions_by_cls.get(value.__class__, None) + # Maybe its a subclass of a type we know + if ex is None: + for name, c in self._extensions.items(): + if c.match(self, value): + ex = name, c.encode + break + else: + ex = None + # Success or fail + if ex is not None: + ext_id2, extension_encode = ex + self._encode(f, extension_encode(self, value), streams, ext_id2) + else: + t = ( + "Class %r is not a valid base BSDF type, nor is it " + "handled by an extension." + ) + raise TypeError(t % value.__class__.__name__) + + def _decode(self, f): + """Main decoder function.""" + + # Get value + char = f.read(1) + c = char.lower() + + # Conversion (uppercase value identifiers signify converted values) + if not char: + raise EOFError() + elif char != c: + n = strunpack("= 254: + # Streaming + closed = n == 254 + n = strunpack(" 0 + name = f.read(n_name).decode("UTF-8") + value[name] = self._decode(f) + elif c == b"b": + if self._lazy_blob: + value = Blob((f, True)) + else: + blob = Blob((f, False)) + value = blob.get_bytes() + else: + raise RuntimeError("Parse error %r" % char) + + # Convert value if we have an extension for it + if ext_id is not None: + extension = self._extensions.get(ext_id, None) + if extension is not None: + value = extension.decode(self, value) + else: + logger.warning("BSDF warning: no extension found for %r" % ext_id) + + return value + + def encode(self, ob): + """Save the given object to bytes.""" + f = BytesIO() + self.save(f, ob) + return f.getvalue() + + def save(self, f, ob): + """Write the given object to the given file object.""" + f.write(b"BSDF") + f.write(struct.pack(" 0: + stream = streams[0] + if stream._start_pos != f.tell(): + raise ValueError( + "The stream object must be " "the last object to be encoded." + ) + + def decode(self, bb): + """Load the data structure that is BSDF-encoded in the given bytes.""" + f = BytesIO(bb) + return self.load(f) + + def load(self, f): + """Load a BSDF-encoded object from the given file object.""" + # Check magic string + f4 = f.read(4) + if f4 != b"BSDF": + raise RuntimeError("This does not look like a BSDF file: %r" % f4) + # Check version + major_version = strunpack(" VERSION[1]: # minor should be < ours + t = ( + "BSDF warning: reading file with higher minor version (%s) " + "than the implementation (%s)." + ) + logger.warning(t % (__version__, file_version)) + + return self._decode(f) + + +# %% Streaming and blob-files + + +class BaseStream(object): + """Base class for streams.""" + + def __init__(self, mode="w"): + self._i = 0 + self._count = -1 + if isinstance(mode, int): + self._count = mode + mode = "r" + elif mode == "w": + self._count = 0 + assert mode in ("r", "w") + self._mode = mode + self._f = None + self._start_pos = 0 + + def _activate(self, file, encode_func, decode_func): + if self._f is not None: # Associated with another write + raise IOError("Stream object cannot be activated twice?") + self._f = file + self._start_pos = self._f.tell() + self._encode = encode_func + self._decode = decode_func + + @property + def mode(self): + """The mode of this stream: 'r' or 'w'.""" + return self._mode + + +class ListStream(BaseStream): + """A streamable list object used for writing or reading. + In read mode, it can also be iterated over. + """ + + @property + def count(self): + """The number of elements in the stream (can be -1 for unclosed + streams in read-mode). + """ + return self._count + + @property + def index(self): + """The current index of the element to read/write.""" + return self._i + + def append(self, item): + """Append an item to the streaming list. The object is immediately + serialized and written to the underlying file. + """ + # if self._mode != 'w': + # raise IOError('This ListStream is not in write mode.') + if self._count != self._i: + raise IOError("Can only append items to the end of the stream.") + if self._f is None: + raise IOError("List stream is not associated with a file yet.") + if self._f.closed: + raise IOError("Cannot stream to a close file.") + self._encode(self._f, item, [self], None) + self._i += 1 + self._count += 1 + + def close(self, unstream=False): + """Close the stream, marking the number of written elements. New + elements may still be appended, but they won't be read during decoding. + If ``unstream`` is False, the stream is turned into a regular list + (not streaming). + """ + # if self._mode != 'w': + # raise IOError('This ListStream is not in write mode.') + if self._count != self._i: + raise IOError("Can only close when at the end of the stream.") + if self._f is None: + raise IOError("ListStream is not associated with a file yet.") + if self._f.closed: + raise IOError("Cannot close a stream on a close file.") + i = self._f.tell() + self._f.seek(self._start_pos - 8 - 1) + self._f.write(spack("= 0: + if self._i >= self._count: + raise StopIteration() + self._i += 1 + return self._decode(self._f) + else: + # This raises EOFError at some point. + try: + res = self._decode(self._f) + self._i += 1 + return res + except EOFError: + self._count = self._i + raise StopIteration() + + def __iter__(self): + if self._mode != "r": + raise IOError("Cannot iterate: ListStream in not in read mode.") + return self + + def __next__(self): + return self.next() + + +class Blob(object): + """Object to represent a blob of bytes. When used to write a BSDF file, + it's a wrapper for bytes plus properties such as what compression to apply. + When used to read a BSDF file, it can be used to read the data lazily, and + also modify the data if reading in 'r+' mode and the blob isn't compressed. + """ + + # For now, this does not allow re-sizing blobs (within the allocated size) + # but this can be added later. + + def __init__(self, bb, compression=0, extra_size=0, use_checksum=False): + if isinstance(bb, bytes): + self._f = None + self.compressed = self._from_bytes(bb, compression) + self.compression = compression + self.allocated_size = self.used_size + extra_size + self.use_checksum = use_checksum + elif isinstance(bb, tuple) and len(bb) == 2 and hasattr(bb[0], "read"): + self._f, allow_seek = bb + self.compressed = None + self._from_file(self._f, allow_seek) + self._modified = False + else: + raise TypeError("Wrong argument to create Blob.") + + def _from_bytes(self, value, compression): + """When used to wrap bytes in a blob.""" + if compression == 0: + compressed = value + elif compression == 1: + compressed = zlib.compress(value, 9) + elif compression == 2: + compressed = bz2.compress(value, 9) + else: # pragma: no cover + assert False, "Unknown compression identifier" + + self.data_size = len(value) + self.used_size = len(compressed) + return compressed + + def _to_file(self, f): + """Private friend method called by encoder to write a blob to a file.""" + # Write sizes - write at least in a size that allows resizing + if self.allocated_size <= 250 and self.compression == 0: + f.write(spack(" self.allocated_size: + raise IOError("Seek beyond blob boundaries.") + self._f.seek(self.start_pos + p) + + def tell(self): + """Get the current file pointer position (relative to the blob start).""" + if self._f is None: + raise RuntimeError( + "Cannot tell in a blob " "that is not created by the BSDF decoder." + ) + return self._f.tell() - self.start_pos + + def write(self, bb): + """Write bytes to the blob.""" + if self._f is None: + raise RuntimeError( + "Cannot write in a blob " "that is not created by the BSDF decoder." + ) + if self.compression: + raise IOError("Cannot arbitrarily write in compressed blob.") + if self._f.tell() + len(bb) > self.end_pos: + raise IOError("Write beyond blob boundaries.") + self._modified = True + return self._f.write(bb) + + def read(self, n): + """Read n bytes from the blob.""" + if self._f is None: + raise RuntimeError( + "Cannot read in a blob " "that is not created by the BSDF decoder." + ) + if self.compression: + raise IOError("Cannot arbitrarily read in compressed blob.") + if self._f.tell() + n > self.end_pos: + raise IOError("Read beyond blob boundaries.") + return self._f.read(n) + + def get_bytes(self): + """Get the contents of the blob as bytes.""" + if self.compressed is not None: + compressed = self.compressed + else: + i = self._f.tell() + self.seek(0) + compressed = self._f.read(self.used_size) + self._f.seek(i) + if self.compression == 0: + value = compressed + elif self.compression == 1: + value = zlib.decompress(compressed) + elif self.compression == 2: + value = bz2.decompress(compressed) + else: # pragma: no cover + raise RuntimeError("Invalid compression %i" % self.compression) + return value + + def update_checksum(self): + """Reset the blob's checksum if present. Call this after modifying + the data. + """ + # or ... should the presence of a checksum mean that data is proteced? + if self.use_checksum and self._modified: + self.seek(0) + compressed = self._f.read(self.used_size) + self._f.seek(self.start_pos - self.alignment - 1 - 16) + self._f.write(hashlib.md5(compressed).digest()) + + +# %% High-level functions + + +def encode(ob, extensions=None, **options): + """Save (BSDF-encode) the given object to bytes. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + return s.encode(ob) + + +def save(f, ob, extensions=None, **options): + """Save (BSDF-encode) the given object to the given filename or + file object. See` BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + if isinstance(f, string_types): + with open(f, "wb") as fp: + return s.save(fp, ob) + else: + return s.save(f, ob) + + +def decode(bb, extensions=None, **options): + """Load a (BSDF-encoded) structure from bytes. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + return s.decode(bb) + + +def load(f, extensions=None, **options): + """Load a (BSDF-encoded) structure from the given filename or file object. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + if isinstance(f, string_types): + if f.startswith(("~/", "~\\")): # pragma: no cover + f = os.path.expanduser(f) + with open(f, "rb") as fp: + return s.load(fp) + else: + return s.load(f) + + +# Aliases for json compat +loads = decode +dumps = encode + + +# %% Standard extensions + +# Defining extensions as a dict would be more compact and feel lighter, but +# that would only allow lambdas, which is too limiting, e.g. for ndarray +# extension. + + +class Extension(object): + """Base class to implement BSDF extensions for special data types. + + Extension classes are provided to the BSDF serializer, which + instantiates the class. That way, the extension can be somewhat dynamic: + e.g. the NDArrayExtension exposes the ndarray class only when numpy + is imported. + + A extension instance must have two attributes. These can be attributes of + the class, or of the instance set in ``__init__()``: + + * name (str): the name by which encoded values will be identified. + * cls (type): the type (or list of types) to match values with. + This is optional, but it makes the encoder select extensions faster. + + Further, it needs 3 methods: + + * `match(serializer, value) -> bool`: return whether the extension can + convert the given value. The default is ``isinstance(value, self.cls)``. + * `encode(serializer, value) -> encoded_value`: the function to encode a + value to more basic data types. + * `decode(serializer, encoded_value) -> value`: the function to decode an + encoded value back to its intended representation. + + """ + + name = "" + cls = () + + def __repr__(self): + return "" % (self.name, hex(id(self))) + + def match(self, s, v): + return isinstance(v, self.cls) + + def encode(self, s, v): + raise NotImplementedError() + + def decode(self, s, v): + raise NotImplementedError() + + +class ComplexExtension(Extension): + name = "c" + cls = complex + + def encode(self, s, v): + return (v.real, v.imag) + + def decode(self, s, v): + return complex(v[0], v[1]) + + +class NDArrayExtension(Extension): + name = "ndarray" + + def __init__(self): + if "numpy" in sys.modules: + import numpy as np + + self.cls = np.ndarray + + def match(self, s, v): # pragma: no cover - e.g. work for nd arrays in JS + return hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "tobytes") + + def encode(self, s, v): + return dict(shape=v.shape, dtype=text_type(v.dtype), data=v.tobytes()) + + def decode(self, s, v): + try: + import numpy as np + except ImportError: # pragma: no cover + return v + a = np.frombuffer(v["data"], dtype=v["dtype"]) + a.shape = v["shape"] + return a + + +standard_extensions = [ComplexExtension, NDArrayExtension] + + +if __name__ == "__main__": + # Invoke CLI + import bsdf_cli + + bsdf_cli.main() diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/_dicom.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_dicom.py new file mode 100644 index 0000000000000000000000000000000000000000..2f2f7ac511e2ef75b25c203b075724dcc152f564 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_dicom.py @@ -0,0 +1,932 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin for reading DICOM files. +""" + +# todo: Use pydicom: +# * Note: is not py3k ready yet +# * Allow reading the full meta info +# I think we can more or less replace the SimpleDicomReader with a +# pydicom.Dataset For series, only ned to read the full info from one +# file: speed still high +# * Perhaps allow writing? + +import sys +import os +import struct +import logging + +import numpy as np + + +logger = logging.getLogger(__name__) + +# Determine endianity of system +sys_is_little_endian = sys.byteorder == "little" + +# Define a dictionary that contains the tags that we would like to know +MINIDICT = { + (0x7FE0, 0x0010): ("PixelData", "OB"), + # Date and time + (0x0008, 0x0020): ("StudyDate", "DA"), + (0x0008, 0x0021): ("SeriesDate", "DA"), + (0x0008, 0x0022): ("AcquisitionDate", "DA"), + (0x0008, 0x0023): ("ContentDate", "DA"), + (0x0008, 0x0030): ("StudyTime", "TM"), + (0x0008, 0x0031): ("SeriesTime", "TM"), + (0x0008, 0x0032): ("AcquisitionTime", "TM"), + (0x0008, 0x0033): ("ContentTime", "TM"), + # With what, where, by whom? + (0x0008, 0x0060): ("Modality", "CS"), + (0x0008, 0x0070): ("Manufacturer", "LO"), + (0x0008, 0x0080): ("InstitutionName", "LO"), + # Descriptions + (0x0008, 0x1030): ("StudyDescription", "LO"), + (0x0008, 0x103E): ("SeriesDescription", "LO"), + # UID's + (0x0008, 0x0016): ("SOPClassUID", "UI"), + (0x0008, 0x0018): ("SOPInstanceUID", "UI"), + (0x0020, 0x000D): ("StudyInstanceUID", "UI"), + (0x0020, 0x000E): ("SeriesInstanceUID", "UI"), + (0x0008, 0x0117): ("ContextUID", "UI"), + # Numbers + (0x0020, 0x0011): ("SeriesNumber", "IS"), + (0x0020, 0x0012): ("AcquisitionNumber", "IS"), + (0x0020, 0x0013): ("InstanceNumber", "IS"), + (0x0020, 0x0014): ("IsotopeNumber", "IS"), + (0x0020, 0x0015): ("PhaseNumber", "IS"), + (0x0020, 0x0016): ("IntervalNumber", "IS"), + (0x0020, 0x0017): ("TimeSlotNumber", "IS"), + (0x0020, 0x0018): ("AngleNumber", "IS"), + (0x0020, 0x0019): ("ItemNumber", "IS"), + (0x0020, 0x0020): ("PatientOrientation", "CS"), + (0x0020, 0x0030): ("ImagePosition", "CS"), + (0x0020, 0x0032): ("ImagePositionPatient", "CS"), + (0x0020, 0x0035): ("ImageOrientation", "CS"), + (0x0020, 0x0037): ("ImageOrientationPatient", "CS"), + # Patient information + (0x0010, 0x0010): ("PatientName", "PN"), + (0x0010, 0x0020): ("PatientID", "LO"), + (0x0010, 0x0030): ("PatientBirthDate", "DA"), + (0x0010, 0x0040): ("PatientSex", "CS"), + (0x0010, 0x1010): ("PatientAge", "AS"), + (0x0010, 0x1020): ("PatientSize", "DS"), + (0x0010, 0x1030): ("PatientWeight", "DS"), + # Image specific (required to construct numpy array) + (0x0028, 0x0002): ("SamplesPerPixel", "US"), + (0x0028, 0x0008): ("NumberOfFrames", "IS"), + (0x0028, 0x0100): ("BitsAllocated", "US"), + (0x0028, 0x0101): ("BitsStored", "US"), + (0x0028, 0x0102): ("HighBit", "US"), + (0x0028, 0x0103): ("PixelRepresentation", "US"), + (0x0028, 0x0010): ("Rows", "US"), + (0x0028, 0x0011): ("Columns", "US"), + (0x0028, 0x1052): ("RescaleIntercept", "DS"), + (0x0028, 0x1053): ("RescaleSlope", "DS"), + # Image specific (for the user) + (0x0028, 0x0030): ("PixelSpacing", "DS"), + (0x0018, 0x0088): ("SliceSpacing", "DS"), +} + +# Define some special tags: +# See PS 3.5-2008 section 7.5 (p.40) +ItemTag = (0xFFFE, 0xE000) # start of Sequence Item +ItemDelimiterTag = (0xFFFE, 0xE00D) # end of Sequence Item +SequenceDelimiterTag = (0xFFFE, 0xE0DD) # end of Sequence of undefined length + +# Define set of groups that we're interested in (so we can quickly skip others) +GROUPS = set([key[0] for key in MINIDICT.keys()]) +VRS = set([val[1] for val in MINIDICT.values()]) + + +class NotADicomFile(Exception): + pass + + +class CompressedDicom(RuntimeError): + pass + + +class SimpleDicomReader(object): + """ + This class provides reading of pixel data from DICOM files. It is + focussed on getting the pixel data, not the meta info. + + To use, first create an instance of this class (giving it + a file object or filename). Next use the info attribute to + get a dict of the meta data. The loading of pixel data is + deferred until get_numpy_array() is called. + + Comparison with Pydicom + ----------------------- + + This code focusses on getting the pixel data out, which allows some + shortcuts, resulting in the code being much smaller. + + Since the processing of data elements is much cheaper (it skips a lot + of tags), this code is about 3x faster than pydicom (except for the + deflated DICOM files). + + This class does borrow some code (and ideas) from the pydicom + project, and (to the best of our knowledge) has the same limitations + as pydicom with regard to the type of files that it can handle. + + Limitations + ----------- + + For more advanced DICOM processing, please check out pydicom. + + * Only a predefined subset of data elements (meta information) is read. + * This is a reader; it can not write DICOM files. + * (just like pydicom) it can handle none of the compressed DICOM + formats except for "Deflated Explicit VR Little Endian" + (1.2.840.10008.1.2.1.99). + + """ + + def __init__(self, file): + # Open file if filename given + if isinstance(file, str): + self._filename = file + self._file = open(file, "rb") + else: + self._filename = "" + self._file = file + # Init variable to store position and size of pixel data + self._pixel_data_loc = None + # The meta header is always explicit and little endian + self.is_implicit_VR = False + self.is_little_endian = True + self._unpackPrefix = "<" + # Dict to store data elements of interest in + self._info = {} + # VR Conversion + self._converters = { + # Numbers + "US": lambda x: self._unpack("H", x), + "UL": lambda x: self._unpack("L", x), + # Numbers encoded as strings + "DS": lambda x: self._splitValues(x, float, "\\"), + "IS": lambda x: self._splitValues(x, int, "\\"), + # strings + "AS": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "DA": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "TM": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "UI": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "LO": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(), + "CS": lambda x: self._splitValues(x, float, "\\"), + "PN": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(), + } + + # Initiate reading + self._read() + + @property + def info(self): + return self._info + + def _splitValues(self, x, type, splitter): + s = x.decode("ascii").strip("\x00") + try: + if splitter in s: + return tuple([type(v) for v in s.split(splitter) if v.strip()]) + else: + return type(s) + except ValueError: + return s + + def _unpack(self, fmt, value): + return struct.unpack(self._unpackPrefix + fmt, value)[0] + + # Really only so we need minimal changes to _pixel_data_numpy + def __iter__(self): + return iter(self._info.keys()) + + def __getattr__(self, key): + info = object.__getattribute__(self, "_info") + if key in info: + return info[key] + return object.__getattribute__(self, key) # pragma: no cover + + def _read(self): + f = self._file + # Check prefix after peamble + f.seek(128) + if f.read(4) != b"DICM": + raise NotADicomFile("Not a valid DICOM file.") + # Read + self._read_header() + self._read_data_elements() + self._get_shape_and_sampling() + # Close if done, reopen if necessary to read pixel data + if os.path.isfile(self._filename): + self._file.close() + self._file = None + + def _readDataElement(self): + f = self._file + # Get group and element + group = self._unpack("H", f.read(2)) + element = self._unpack("H", f.read(2)) + # Get value length + if self.is_implicit_VR: + vl = self._unpack("I", f.read(4)) + else: + vr = f.read(2) + if vr in (b"OB", b"OW", b"SQ", b"UN"): + reserved = f.read(2) # noqa + vl = self._unpack("I", f.read(4)) + else: + vl = self._unpack("H", f.read(2)) + # Get value + if group == 0x7FE0 and element == 0x0010: + here = f.tell() + self._pixel_data_loc = here, vl + f.seek(here + vl) + return group, element, b"Deferred loading of pixel data" + else: + if vl == 0xFFFFFFFF: + value = self._read_undefined_length_value() + else: + value = f.read(vl) + return group, element, value + + def _read_undefined_length_value(self, read_size=128): + """Copied (in compacted form) from PyDicom + Copyright Darcy Mason. + """ + fp = self._file + # data_start = fp.tell() + search_rewind = 3 + bytes_to_find = struct.pack( + self._unpackPrefix + "HH", SequenceDelimiterTag[0], SequenceDelimiterTag[1] + ) + + found = False + value_chunks = [] + while not found: + chunk_start = fp.tell() + bytes_read = fp.read(read_size) + if len(bytes_read) < read_size: + # try again, + # if still don't get required amount, this is last block + new_bytes = fp.read(read_size - len(bytes_read)) + bytes_read += new_bytes + if len(bytes_read) < read_size: + raise EOFError( + "End of file reached before sequence " "delimiter found." + ) + index = bytes_read.find(bytes_to_find) + if index != -1: + found = True + value_chunks.append(bytes_read[:index]) + fp.seek(chunk_start + index + 4) # rewind to end of delimiter + length = fp.read(4) + if length != b"\0\0\0\0": + logger.warning( + "Expected 4 zero bytes after undefined length " "delimiter" + ) + else: + fp.seek(fp.tell() - search_rewind) # rewind a bit + # accumulate the bytes read (not including the rewind) + value_chunks.append(bytes_read[:-search_rewind]) + + # if get here then have found the byte string + return b"".join(value_chunks) + + def _read_header(self): + f = self._file + TransferSyntaxUID = None + + # Read all elements, store transferSyntax when we encounter it + try: + while True: + fp_save = f.tell() + # Get element + group, element, value = self._readDataElement() + if group == 0x02: + if group == 0x02 and element == 0x10: + TransferSyntaxUID = value.decode("ascii").strip("\x00") + else: + # No more group 2: rewind and break + # (don't trust group length) + f.seek(fp_save) + break + except (EOFError, struct.error): # pragma: no cover + raise RuntimeError("End of file reached while still in header.") + + # Handle transfer syntax + self._info["TransferSyntaxUID"] = TransferSyntaxUID + # + if TransferSyntaxUID is None: + # Assume ExplicitVRLittleEndian + is_implicit_VR, is_little_endian = False, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.1": + # ExplicitVRLittleEndian + is_implicit_VR, is_little_endian = False, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.2": + # ExplicitVRBigEndian + is_implicit_VR, is_little_endian = False, False + elif TransferSyntaxUID == "1.2.840.10008.1.2": + # implicit VR little endian + is_implicit_VR, is_little_endian = True, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.1.99": + # DeflatedExplicitVRLittleEndian: + is_implicit_VR, is_little_endian = False, True + self._inflate() + else: + # http://www.dicomlibrary.com/dicom/transfer-syntax/ + t, extra_info = TransferSyntaxUID, "" + if "1.2.840.10008.1.2.4.50" <= t < "1.2.840.10008.1.2.4.99": + extra_info = " (JPEG)" + if "1.2.840.10008.1.2.4.90" <= t < "1.2.840.10008.1.2.4.99": + extra_info = " (JPEG 2000)" + if t == "1.2.840.10008.1.2.5": + extra_info = " (RLE)" + if t == "1.2.840.10008.1.2.6.1": + extra_info = " (RFC 2557)" + raise CompressedDicom( + "The dicom reader can only read files with " + "uncompressed image data - not %r%s. You " + "can try using dcmtk or gdcm to convert the " + "image." % (t, extra_info) + ) + + # From hereon, use implicit/explicit big/little endian + self.is_implicit_VR = is_implicit_VR + self.is_little_endian = is_little_endian + self._unpackPrefix = "><"[is_little_endian] + + def _read_data_elements(self): + info = self._info + try: + while True: + # Get element + group, element, value = self._readDataElement() + # Is it a group we are interested in? + if group in GROUPS: + key = (group, element) + name, vr = MINIDICT.get(key, (None, None)) + # Is it an element we are interested in? + if name: + # Store value + converter = self._converters.get(vr, lambda x: x) + info[name] = converter(value) + except (EOFError, struct.error): + pass # end of file ... + + def get_numpy_array(self): + """Get numpy arra for this DICOM file, with the correct shape, + and pixel values scaled appropriately. + """ + # Is there pixel data at all? + if "PixelData" not in self: + raise TypeError("No pixel data found in this dataset.") + + # Load it now if it was not already loaded + if self._pixel_data_loc and len(self.PixelData) < 100: + # Reopen file? + close_file = False + if self._file is None: + close_file = True + self._file = open(self._filename, "rb") + # Read data + self._file.seek(self._pixel_data_loc[0]) + if self._pixel_data_loc[1] == 0xFFFFFFFF: + value = self._read_undefined_length_value() + else: + value = self._file.read(self._pixel_data_loc[1]) + # Close file + if close_file: + self._file.close() + self._file = None + # Overwrite + self._info["PixelData"] = value + + # Get data + data = self._pixel_data_numpy() + data = self._apply_slope_and_offset(data) + + # Remove data again to preserve memory + # Note that the data for the original file is loaded twice ... + self._info["PixelData"] = ( + b"Data converted to numpy array, " + b"raw data removed to preserve memory" + ) + return data + + def _get_shape_and_sampling(self): + """Get shape and sampling without actuall using the pixel data. + In this way, the user can get an idea what's inside without having + to load it. + """ + # Get shape (in the same way that pydicom does) + if "NumberOfFrames" in self and self.NumberOfFrames > 1: + if self.SamplesPerPixel > 1: + shape = ( + self.SamplesPerPixel, + self.NumberOfFrames, + self.Rows, + self.Columns, + ) + else: + shape = self.NumberOfFrames, self.Rows, self.Columns + elif "SamplesPerPixel" in self: + if self.SamplesPerPixel > 1: + if self.BitsAllocated == 8: + shape = self.SamplesPerPixel, self.Rows, self.Columns + else: + raise NotImplementedError( + "DICOM plugin only handles " + "SamplesPerPixel > 1 if Bits " + "Allocated = 8" + ) + else: + shape = self.Rows, self.Columns + else: + raise RuntimeError( + "DICOM file has no SamplesPerPixel " "(perhaps this is a report?)" + ) + + # Try getting sampling between pixels + if "PixelSpacing" in self: + sampling = float(self.PixelSpacing[0]), float(self.PixelSpacing[1]) + else: + sampling = 1.0, 1.0 + if "SliceSpacing" in self: + sampling = (abs(self.SliceSpacing),) + sampling + + # Ensure that sampling has as many elements as shape + sampling = (1.0,) * (len(shape) - len(sampling)) + sampling[-len(shape) :] + + # Set shape and sampling + self._info["shape"] = shape + self._info["sampling"] = sampling + + def _pixel_data_numpy(self): + """Return a NumPy array of the pixel data.""" + # Taken from pydicom + # Copyright (c) 2008-2012 Darcy Mason + + if "PixelData" not in self: + raise TypeError("No pixel data found in this dataset.") + + # determine the type used for the array + need_byteswap = self.is_little_endian != sys_is_little_endian + + # Make NumPy format code, e.g. "uint16", "int32" etc + # from two pieces of info: + # self.PixelRepresentation -- 0 for unsigned, 1 for signed; + # self.BitsAllocated -- 8, 16, or 32 + format_str = "%sint%d" % ( + ("u", "")[self.PixelRepresentation], + self.BitsAllocated, + ) + try: + numpy_format = np.dtype(format_str) + except TypeError: # pragma: no cover + raise TypeError( + "Data type not understood by NumPy: format='%s', " + " PixelRepresentation=%d, BitsAllocated=%d" + % (numpy_format, self.PixelRepresentation, self.BitsAllocated) + ) + + # Have correct Numpy format, so create the NumPy array + arr = np.frombuffer(self.PixelData, numpy_format).copy() + + # XXX byte swap - may later handle this in read_file!!? + if need_byteswap: + arr.byteswap(True) # True means swap in-place, don't make new copy + + # Note the following reshape operations return a new *view* onto arr, + # but don't copy the data + arr = arr.reshape(*self._info["shape"]) + return arr + + def _apply_slope_and_offset(self, data): + """ + If RescaleSlope and RescaleIntercept are present in the data, + apply them. The data type of the data is changed if necessary. + """ + # Obtain slope and offset + slope, offset = 1, 0 + needFloats, needApplySlopeOffset = False, False + if "RescaleSlope" in self: + needApplySlopeOffset = True + slope = self.RescaleSlope + if "RescaleIntercept" in self: + needApplySlopeOffset = True + offset = self.RescaleIntercept + if int(slope) != slope or int(offset) != offset: + needFloats = True + if not needFloats: + slope, offset = int(slope), int(offset) + + # Apply slope and offset + if needApplySlopeOffset: + # Maybe we need to change the datatype? + if data.dtype in [np.float32, np.float64]: + pass + elif needFloats: + data = data.astype(np.float32) + else: + # Determine required range + minReq, maxReq = data.min().item(), data.max().item() + minReq = min([minReq, minReq * slope + offset, maxReq * slope + offset]) + maxReq = max([maxReq, minReq * slope + offset, maxReq * slope + offset]) + + # Determine required datatype from that + dtype = None + if minReq < 0: + # Signed integer type + maxReq = max([-minReq, maxReq]) + if maxReq < 2**7: + dtype = np.int8 + elif maxReq < 2**15: + dtype = np.int16 + elif maxReq < 2**31: + dtype = np.int32 + else: + dtype = np.float32 + else: + # Unsigned integer type + if maxReq < 2**8: + dtype = np.int8 + elif maxReq < 2**16: + dtype = np.int16 + elif maxReq < 2**32: + dtype = np.int32 + else: + dtype = np.float32 + # Change datatype + if dtype != data.dtype: + data = data.astype(dtype) + + # Apply slope and offset + data *= slope + data += offset + + # Done + return data + + def _inflate(self): + # Taken from pydicom + # Copyright (c) 2008-2012 Darcy Mason + import zlib + from io import BytesIO + + # See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset + # following the file metadata was prepared the normal way, + # then "deflate" compression applied. + # All that is needed here is to decompress and then + # use as normal in a file-like object + zipped = self._file.read() + # -MAX_WBITS part is from comp.lang.python answer: + # groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799 + unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS) + self._file = BytesIO(unzipped) # a file-like object + + +class DicomSeries(object): + """DicomSeries + This class represents a serie of dicom files (SimpleDicomReader + objects) that belong together. If these are multiple files, they + represent the slices of a volume (like for CT or MRI). + """ + + def __init__(self, suid, progressIndicator): + # Init dataset list and the callback + self._entries = [] + + # Init props + self._suid = suid + self._info = {} + self._progressIndicator = progressIndicator + + def __len__(self): + return len(self._entries) + + def __iter__(self): + return iter(self._entries) + + def __getitem__(self, index): + return self._entries[index] + + @property + def suid(self): + return self._suid + + @property + def shape(self): + """The shape of the data (nz, ny, nx).""" + return self._info["shape"] + + @property + def sampling(self): + """The sampling (voxel distances) of the data (dz, dy, dx).""" + return self._info["sampling"] + + @property + def info(self): + """A dictionary containing the information as present in the + first dicomfile of this serie. None if there are no entries.""" + return self._info + + @property + def description(self): + """A description of the dicom series. Used fields are + PatientName, shape of the data, SeriesDescription, and + ImageComments. + """ + info = self.info + + # If no info available, return simple description + if not info: # pragma: no cover + return "DicomSeries containing %i images" % len(self) + + fields = [] + # Give patient name + if "PatientName" in info: + fields.append("" + info["PatientName"]) + # Also add dimensions + if self.shape: + tmp = [str(d) for d in self.shape] + fields.append("x".join(tmp)) + # Try adding more fields + if "SeriesDescription" in info: + fields.append("'" + info["SeriesDescription"] + "'") + if "ImageComments" in info: + fields.append("'" + info["ImageComments"] + "'") + + # Combine + return " ".join(fields) + + def __repr__(self): + adr = hex(id(self)).upper() + return "" % (len(self), adr) + + def get_numpy_array(self): + """Get (load) the data that this DicomSeries represents, and return + it as a numpy array. If this serie contains multiple images, the + resulting array is 3D, otherwise it's 2D. + """ + + # It's easy if no file or if just a single file + if len(self) == 0: + raise ValueError("Serie does not contain any files.") + elif len(self) == 1: + return self[0].get_numpy_array() + + # Check info + if self.info is None: + raise RuntimeError("Cannot return volume if series not finished.") + + # Init data (using what the dicom packaged produces as a reference) + slice = self[0].get_numpy_array() + vol = np.zeros(self.shape, dtype=slice.dtype) + vol[0] = slice + + # Fill volume + self._progressIndicator.start("loading data", "", len(self)) + for z in range(1, len(self)): + vol[z] = self[z].get_numpy_array() + self._progressIndicator.set_progress(z + 1) + self._progressIndicator.finish() + + # Done + import gc + + gc.collect() + return vol + + def _append(self, dcm): + self._entries.append(dcm) + + def _sort(self): + self._entries.sort( + key=lambda k: ( + k.InstanceNumber, + ( + k.ImagePositionPatient[2] + if hasattr(k, "ImagePositionPatient") + else None + ), + ) + ) + + def _finish(self): + """ + Evaluate the series of dicom files. Together they should make up + a volumetric dataset. This means the files should meet certain + conditions. Also some additional information has to be calculated, + such as the distance between the slices. This method sets the + attributes for "shape", "sampling" and "info". + + This method checks: + * that there are no missing files + * that the dimensions of all images match + * that the pixel spacing of all images match + """ + + # The datasets list should be sorted by instance number + L = self._entries + if len(L) == 0: + return + elif len(L) == 1: + self._info = L[0].info + return + + # Get previous + ds1 = L[0] + # Init measures to calculate average of + distance_sum = 0.0 + # Init measures to check (these are in 2D) + dimensions = ds1.Rows, ds1.Columns + # sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1]) + sampling = ds1.info["sampling"][:2] # row, column + + for index in range(len(L)): + # The first round ds1 and ds2 will be the same, for the + # distance calculation this does not matter + # Get current + ds2 = L[index] + # Get positions + pos1 = float(ds1.ImagePositionPatient[2]) + pos2 = float(ds2.ImagePositionPatient[2]) + # Update distance_sum to calculate distance later + distance_sum += abs(pos1 - pos2) + # Test measures + dimensions2 = ds2.Rows, ds2.Columns + # sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1]) + sampling2 = ds2.info["sampling"][:2] # row, column + if dimensions != dimensions2: + # We cannot produce a volume if the dimensions match + raise ValueError("Dimensions of slices does not match.") + if sampling != sampling2: + # We can still produce a volume, but we should notify the user + self._progressIndicator.write("Warn: sampling does not match.") + # Store previous + ds1 = ds2 + + # Finish calculating average distance + # (Note that there are len(L)-1 distances) + distance_mean = distance_sum / (len(L) - 1) + + # Set info dict + self._info = L[0].info.copy() + + # Store information that is specific for the serie + self._info["shape"] = (len(L),) + ds2.info["shape"] + self._info["sampling"] = (distance_mean,) + ds2.info["sampling"] + + +def list_files(files, path): + """List all files in the directory, recursively.""" + for item in os.listdir(path): + item = os.path.join(path, item) + if os.path.isdir(item): + list_files(files, item) + elif os.path.isfile(item): + files.append(item) + + +def process_directory(request, progressIndicator, readPixelData=False): + """ + Reads dicom files and returns a list of DicomSeries objects, which + contain information about the data, and can be used to load the + image or volume data. + + if readPixelData is True, the pixel data of all series is read. By + default the loading of pixeldata is deferred until it is requested + using the DicomSeries.get_pixel_array() method. In general, both + methods should be equally fast. + """ + # Get directory to examine + if os.path.isdir(request.filename): + path = request.filename + elif os.path.isfile(request.filename): + path = os.path.dirname(request.filename) + else: # pragma: no cover - tested earlier + raise ValueError("Dicom plugin needs a valid filename to examine the directory") + + # Check files + files = [] + list_files(files, path) # Find files recursively + + # Gather file data and put in DicomSeries + series = {} + count = 0 + progressIndicator.start("examining files", "files", len(files)) + for filename in files: + # Show progress (note that we always start with a 0.0) + count += 1 + progressIndicator.set_progress(count) + # Skip DICOMDIR files + if filename.count("DICOMDIR"): # pragma: no cover + continue + # Try loading dicom ... + try: + dcm = SimpleDicomReader(filename) + except NotADicomFile: + continue # skip non-dicom file + except Exception as why: # pragma: no cover + progressIndicator.write(str(why)) + continue + # Get SUID and register the file with an existing or new series object + try: + suid = dcm.SeriesInstanceUID + except AttributeError: # pragma: no cover + continue # some other kind of dicom file + if suid not in series: + series[suid] = DicomSeries(suid, progressIndicator) + series[suid]._append(dcm) + + # Finish progress + # progressIndicator.finish('Found %i series.' % len(series)) + + # Make a list and sort, so that the order is deterministic + series = list(series.values()) + series.sort(key=lambda x: x.suid) + + # Split series if necessary + for serie in reversed([serie for serie in series]): + splitSerieIfRequired(serie, series, progressIndicator) + + # Finish all series + # progressIndicator.start('analyse series', '', len(series)) + series_ = [] + for i in range(len(series)): + try: + series[i]._finish() + series_.append(series[i]) + except Exception as err: # pragma: no cover + progressIndicator.write(str(err)) + pass # Skip serie (probably report-like file without pixels) + # progressIndicator.set_progress(i+1) + progressIndicator.finish("Found %i correct series." % len(series_)) + + # Done + return series_ + + +def splitSerieIfRequired(serie, series, progressIndicator): + """ + Split the serie in multiple series if this is required. The choice + is based on examing the image position relative to the previous + image. If it differs too much, it is assumed that there is a new + dataset. This can happen for example in unspitted gated CT data. + """ + + # Sort the original list and get local name + serie._sort() + L = serie._entries + # Init previous slice + ds1 = L[0] + # Check whether we can do this + if "ImagePositionPatient" not in ds1: + return + # Initialize a list of new lists + L2 = [[ds1]] + # Init slice distance estimate + distance = 0 + + for index in range(1, len(L)): + # Get current slice + ds2 = L[index] + # Get positions + pos1 = float(ds1.ImagePositionPatient[2]) + pos2 = float(ds2.ImagePositionPatient[2]) + # Get distances + newDist = abs(pos1 - pos2) + # deltaDist = abs(firstPos-pos2) + # If the distance deviates more than 2x from what we've seen, + # we can agree it's a new dataset. + if distance and newDist > 2.1 * distance: + L2.append([]) + distance = 0 + else: + # Test missing file + if distance and newDist > 1.5 * distance: + progressIndicator.write( + "Warning: missing file after %r" % ds1._filename + ) + distance = newDist + # Add to last list + L2[-1].append(ds2) + # Store previous + ds1 = ds2 + + # Split if we should + if len(L2) > 1: + # At what position are we now? + i = series.index(serie) + # Create new series + series2insert = [] + for L in L2: + newSerie = DicomSeries(serie.suid, progressIndicator) + newSerie._entries = L + series2insert.append(newSerie) + # Insert series and remove self + for newSerie in reversed(series2insert): + series.insert(i, newSerie) + series.remove(serie) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/_freeimage.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_freeimage.py new file mode 100644 index 0000000000000000000000000000000000000000..99f7cbb1b2b05dd23c7a976bbc9bf31b85af0f38 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_freeimage.py @@ -0,0 +1,1312 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +# styletest: ignore E261 + +""" Module imageio/freeimage.py + +This module contains the wrapper code for the freeimage library. +The functions defined in this module are relatively thin; just thin +enough so that arguments and results are native Python/numpy data +types. + +""" + +import os +import sys +import ctypes +import threading +import logging +import numpy + +from ..core import ( + get_remote_file, + load_lib, + Dict, + resource_dirs, + IS_PYPY, + get_platform, + InternetNotAllowedError, + NeedDownloadError, +) + +logger = logging.getLogger(__name__) + +TEST_NUMPY_NO_STRIDES = False # To test pypy fallback + +FNAME_PER_PLATFORM = { + "osx32": "libfreeimage-3.16.0-osx10.6.dylib", # universal library + "osx64": "libfreeimage-3.16.0-osx10.6.dylib", + "win32": "FreeImage-3.18.0-win32.dll", + "win64": "FreeImage-3.18.0-win64.dll", + "linux32": "libfreeimage-3.16.0-linux32.so", + "linux64": "libfreeimage-3.16.0-linux64.so", +} + + +def download(directory=None, force_download=False): + """Download the FreeImage library to your computer. + + Parameters + ---------- + directory : str | None + The directory where the file will be cached if a download was + required to obtain the file. By default, the appdata directory + is used. This is also the first directory that is checked for + a local version of the file. + force_download : bool | str + If True, the file will be downloaded even if a local copy exists + (and this copy will be overwritten). Can also be a YYYY-MM-DD date + to ensure a file is up-to-date (modified date of a file on disk, + if present, is checked). + """ + plat = get_platform() + if plat and plat in FNAME_PER_PLATFORM: + fname = "freeimage/" + FNAME_PER_PLATFORM[plat] + get_remote_file(fname=fname, directory=directory, force_download=force_download) + fi._lib = None # allow trying again (needed to make tests work) + + +def get_freeimage_lib(): + """Ensure we have our version of the binary freeimage lib.""" + + lib = os.getenv("IMAGEIO_FREEIMAGE_LIB", None) + if lib: # pragma: no cover + return lib + + # Get filename to load + # If we do not provide a binary, the system may still do ... + plat = get_platform() + if plat and plat in FNAME_PER_PLATFORM: + try: + return get_remote_file("freeimage/" + FNAME_PER_PLATFORM[plat], auto=False) + except InternetNotAllowedError: + pass + except NeedDownloadError: + raise NeedDownloadError( + "Need FreeImage library. " + "You can obtain it with either:\n" + " - download using the command: " + "imageio_download_bin freeimage\n" + " - download by calling (in Python): " + "imageio.plugins.freeimage.download()\n" + ) + except RuntimeError as e: # pragma: no cover + logger.warning(str(e)) + + +# Define function to encode a filename to bytes (for the current system) +def efn(x): + return x.encode(sys.getfilesystemencoding()) + + +# 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255 +GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32) + + +class FI_TYPES(object): + FIT_UNKNOWN = 0 + FIT_BITMAP = 1 + FIT_UINT16 = 2 + FIT_INT16 = 3 + FIT_UINT32 = 4 + FIT_INT32 = 5 + FIT_FLOAT = 6 + FIT_DOUBLE = 7 + FIT_COMPLEX = 8 + FIT_RGB16 = 9 + FIT_RGBA16 = 10 + FIT_RGBF = 11 + FIT_RGBAF = 12 + + dtypes = { + FIT_BITMAP: numpy.uint8, + FIT_UINT16: numpy.uint16, + FIT_INT16: numpy.int16, + FIT_UINT32: numpy.uint32, + FIT_INT32: numpy.int32, + FIT_FLOAT: numpy.float32, + FIT_DOUBLE: numpy.float64, + FIT_COMPLEX: numpy.complex128, + FIT_RGB16: numpy.uint16, + FIT_RGBA16: numpy.uint16, + FIT_RGBF: numpy.float32, + FIT_RGBAF: numpy.float32, + } + + fi_types = { + (numpy.uint8, 1): FIT_BITMAP, + (numpy.uint8, 3): FIT_BITMAP, + (numpy.uint8, 4): FIT_BITMAP, + (numpy.uint16, 1): FIT_UINT16, + (numpy.int16, 1): FIT_INT16, + (numpy.uint32, 1): FIT_UINT32, + (numpy.int32, 1): FIT_INT32, + (numpy.float32, 1): FIT_FLOAT, + (numpy.float64, 1): FIT_DOUBLE, + (numpy.complex128, 1): FIT_COMPLEX, + (numpy.uint16, 3): FIT_RGB16, + (numpy.uint16, 4): FIT_RGBA16, + (numpy.float32, 3): FIT_RGBF, + (numpy.float32, 4): FIT_RGBAF, + } + + extra_dims = { + FIT_UINT16: [], + FIT_INT16: [], + FIT_UINT32: [], + FIT_INT32: [], + FIT_FLOAT: [], + FIT_DOUBLE: [], + FIT_COMPLEX: [], + FIT_RGB16: [3], + FIT_RGBA16: [4], + FIT_RGBF: [3], + FIT_RGBAF: [4], + } + + +class IO_FLAGS(object): + FIF_LOAD_NOPIXELS = 0x8000 # loading: load the image header only + # # (not supported by all plugins) + BMP_DEFAULT = 0 + BMP_SAVE_RLE = 1 + CUT_DEFAULT = 0 + DDS_DEFAULT = 0 + EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression + EXR_FLOAT = 0x0001 # save data as float instead of half (not recommended) + EXR_NONE = 0x0002 # save with no compression + EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines + EXR_PIZ = 0x0008 # save with piz-based wavelet compression + EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression + EXR_B44 = 0x0020 # save with lossy 44% float compression + # # - goes to 22% when combined with EXR_LC + EXR_LC = 0x0040 # save images with one luminance and two chroma channels, + # # rather than as RGB (lossy compression) + FAXG3_DEFAULT = 0 + GIF_DEFAULT = 0 + GIF_LOAD256 = 1 # Load the image as a 256 color image with ununsed + # # palette entries, if it's 16 or 2 color + GIF_PLAYBACK = 2 # 'Play' the GIF to generate each frame (as 32bpp) + # # instead of returning raw frame data when loading + HDR_DEFAULT = 0 + ICO_DEFAULT = 0 + ICO_MAKEALPHA = 1 # convert to 32bpp and create an alpha channel from the + # # AND-mask when loading + IFF_DEFAULT = 0 + J2K_DEFAULT = 0 # save with a 16:1 rate + JP2_DEFAULT = 0 # save with a 16:1 rate + JPEG_DEFAULT = 0 # loading (see JPEG_FAST); + # # saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420) + JPEG_FAST = 0x0001 # load the file as fast as possible, + # # sacrificing some quality + JPEG_ACCURATE = 0x0002 # load the file with the best quality, + # # sacrificing some speed + JPEG_CMYK = 0x0004 # load separated CMYK "as is" + # # (use | to combine with other load flags) + JPEG_EXIFROTATE = 0x0008 # load and rotate according to + # # Exif 'Orientation' tag if available + JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1) + JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1) + JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1) + JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1) + JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1) + JPEG_PROGRESSIVE = 0x2000 # save as a progressive-JPEG + # # (use | to combine with other save flags) + JPEG_SUBSAMPLING_411 = 0x1000 # save with high 4x1 chroma + # # subsampling (4:1:1) + JPEG_SUBSAMPLING_420 = 0x4000 # save with medium 2x2 medium chroma + # # subsampling (4:2:0) - default value + JPEG_SUBSAMPLING_422 = 0x8000 # save /w low 2x1 chroma subsampling (4:2:2) + JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4) + JPEG_OPTIMIZE = 0x20000 # on saving, compute optimal Huffman coding tables + # # (can reduce a few percent of file size) + JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers + KOALA_DEFAULT = 0 + LBM_DEFAULT = 0 + MNG_DEFAULT = 0 + PCD_DEFAULT = 0 + PCD_BASE = 1 # load the bitmap sized 768 x 512 + PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256 + PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128 + PCX_DEFAULT = 0 + PFM_DEFAULT = 0 + PICT_DEFAULT = 0 + PNG_DEFAULT = 0 + PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction + PNG_Z_BEST_SPEED = 0x0001 # save using ZLib level 1 compression flag + # # (default value is 6) + PNG_Z_DEFAULT_COMPRESSION = 0x0006 # save using ZLib level 6 compression + # # flag (default recommended value) + PNG_Z_BEST_COMPRESSION = 0x0009 # save using ZLib level 9 compression flag + # # (default value is 6) + PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression + PNG_INTERLACED = 0x0200 # save using Adam7 interlacing (use | to combine + # # with other save flags) + PNM_DEFAULT = 0 + PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6) + PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3) + PSD_DEFAULT = 0 + PSD_CMYK = 1 # reads tags for separated CMYK (default is conversion to RGB) + PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB) + RAS_DEFAULT = 0 + RAW_DEFAULT = 0 # load the file as linear RGB 48-bit + RAW_PREVIEW = 1 # try to load the embedded JPEG preview with included + # # Exif Data or default to RGB 24-bit + RAW_DISPLAY = 2 # load the file as RGB 24-bit + SGI_DEFAULT = 0 + TARGA_DEFAULT = 0 + TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888. + TARGA_SAVE_RLE = 2 # Save with RLE compression + TIFF_DEFAULT = 0 + TIFF_CMYK = 0x0001 # reads/stores tags for separated CMYK + # # (use | to combine with compression flags) + TIFF_PACKBITS = 0x0100 # save using PACKBITS compression + TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression + TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression + TIFF_NONE = 0x0800 # save without any compression + TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding + TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding + TIFF_LZW = 0x4000 # save using LZW compression + TIFF_JPEG = 0x8000 # save using JPEG compression + TIFF_LOGLUV = 0x10000 # save using LogLuv compression + WBMP_DEFAULT = 0 + XBM_DEFAULT = 0 + XPM_DEFAULT = 0 + + +class METADATA_MODELS(object): + FIMD_COMMENTS = 0 + FIMD_EXIF_MAIN = 1 + FIMD_EXIF_EXIF = 2 + FIMD_EXIF_GPS = 3 + FIMD_EXIF_MAKERNOTE = 4 + FIMD_EXIF_INTEROP = 5 + FIMD_IPTC = 6 + FIMD_XMP = 7 + FIMD_GEOTIFF = 8 + FIMD_ANIMATION = 9 + + +class METADATA_DATATYPE(object): + FIDT_BYTE = 1 # 8-bit unsigned integer + FIDT_ASCII = 2 # 8-bit bytes w/ last byte null + FIDT_SHORT = 3 # 16-bit unsigned integer + FIDT_LONG = 4 # 32-bit unsigned integer + FIDT_RATIONAL = 5 # 64-bit unsigned fraction + FIDT_SBYTE = 6 # 8-bit signed integer + FIDT_UNDEFINED = 7 # 8-bit untyped data + FIDT_SSHORT = 8 # 16-bit signed integer + FIDT_SLONG = 9 # 32-bit signed integer + FIDT_SRATIONAL = 10 # 64-bit signed fraction + FIDT_FLOAT = 11 # 32-bit IEEE floating point + FIDT_DOUBLE = 12 # 64-bit IEEE floating point + FIDT_IFD = 13 # 32-bit unsigned integer (offset) + FIDT_PALETTE = 14 # 32-bit RGBQUAD + FIDT_LONG8 = 16 # 64-bit unsigned integer + FIDT_SLONG8 = 17 # 64-bit signed integer + FIDT_IFD8 = 18 # 64-bit unsigned integer (offset) + + dtypes = { + FIDT_BYTE: numpy.uint8, + FIDT_SHORT: numpy.uint16, + FIDT_LONG: numpy.uint32, + FIDT_RATIONAL: [("numerator", numpy.uint32), ("denominator", numpy.uint32)], + FIDT_LONG8: numpy.uint64, + FIDT_SLONG8: numpy.int64, + FIDT_IFD8: numpy.uint64, + FIDT_SBYTE: numpy.int8, + FIDT_UNDEFINED: numpy.uint8, + FIDT_SSHORT: numpy.int16, + FIDT_SLONG: numpy.int32, + FIDT_SRATIONAL: [("numerator", numpy.int32), ("denominator", numpy.int32)], + FIDT_FLOAT: numpy.float32, + FIDT_DOUBLE: numpy.float64, + FIDT_IFD: numpy.uint32, + FIDT_PALETTE: [ + ("R", numpy.uint8), + ("G", numpy.uint8), + ("B", numpy.uint8), + ("A", numpy.uint8), + ], + } + + +class Freeimage(object): + """Class to represent an interface to the FreeImage library. + This class is relatively thin. It provides a Pythonic API that converts + Freeimage objects to Python objects, but that's about it. + The actual implementation should be provided by the plugins. + + The recommended way to call into the Freeimage library (so that + errors and warnings show up in the right moment) is to use this + object as a context manager: + with imageio.fi as lib: + lib.FreeImage_GetPalette() + + """ + + _API = { + # All we're doing here is telling ctypes that some of the + # FreeImage functions return pointers instead of integers. (On + # 64-bit systems, without this information the pointers get + # truncated and crashes result). There's no need to list + # functions that return ints, or the types of the parameters + # to these or other functions -- that's fine to do implicitly. + # Note that the ctypes immediately converts the returned void_p + # back to a python int again! This is really not helpful, + # because then passing it back to another library call will + # cause truncation-to-32-bits on 64-bit systems. Thanks, ctypes! + # So after these calls one must immediately re-wrap the int as + # a c_void_p if it is to be passed back into FreeImage. + "FreeImage_AllocateT": (ctypes.c_void_p, None), + "FreeImage_FindFirstMetadata": (ctypes.c_void_p, None), + "FreeImage_GetBits": (ctypes.c_void_p, None), + "FreeImage_GetPalette": (ctypes.c_void_p, None), + "FreeImage_GetTagKey": (ctypes.c_char_p, None), + "FreeImage_GetTagValue": (ctypes.c_void_p, None), + "FreeImage_CreateTag": (ctypes.c_void_p, None), + "FreeImage_Save": (ctypes.c_void_p, None), + "FreeImage_Load": (ctypes.c_void_p, None), + "FreeImage_LoadFromMemory": (ctypes.c_void_p, None), + "FreeImage_OpenMultiBitmap": (ctypes.c_void_p, None), + "FreeImage_LoadMultiBitmapFromMemory": (ctypes.c_void_p, None), + "FreeImage_LockPage": (ctypes.c_void_p, None), + "FreeImage_OpenMemory": (ctypes.c_void_p, None), + # 'FreeImage_ReadMemory': (ctypes.c_void_p, None), + # 'FreeImage_CloseMemory': (ctypes.c_void_p, None), + "FreeImage_GetVersion": (ctypes.c_char_p, None), + "FreeImage_GetFIFExtensionList": (ctypes.c_char_p, None), + "FreeImage_GetFormatFromFIF": (ctypes.c_char_p, None), + "FreeImage_GetFIFDescription": (ctypes.c_char_p, None), + "FreeImage_ColorQuantizeEx": (ctypes.c_void_p, None), + # Pypy wants some extra definitions, so here we go ... + "FreeImage_IsLittleEndian": (ctypes.c_int, None), + "FreeImage_SetOutputMessage": (ctypes.c_void_p, None), + "FreeImage_GetFIFCount": (ctypes.c_int, None), + "FreeImage_IsPluginEnabled": (ctypes.c_int, None), + "FreeImage_GetFileType": (ctypes.c_int, None), + # + "FreeImage_GetTagType": (ctypes.c_int, None), + "FreeImage_GetTagLength": (ctypes.c_int, None), + "FreeImage_FindNextMetadata": (ctypes.c_int, None), + "FreeImage_FindCloseMetadata": (ctypes.c_void_p, None), + # + "FreeImage_GetFIFFromFilename": (ctypes.c_int, None), + "FreeImage_FIFSupportsReading": (ctypes.c_int, None), + "FreeImage_FIFSupportsWriting": (ctypes.c_int, None), + "FreeImage_FIFSupportsExportType": (ctypes.c_int, None), + "FreeImage_FIFSupportsExportBPP": (ctypes.c_int, None), + "FreeImage_GetHeight": (ctypes.c_int, None), + "FreeImage_GetWidth": (ctypes.c_int, None), + "FreeImage_GetImageType": (ctypes.c_int, None), + "FreeImage_GetBPP": (ctypes.c_int, None), + "FreeImage_GetColorsUsed": (ctypes.c_int, None), + "FreeImage_ConvertTo32Bits": (ctypes.c_void_p, None), + "FreeImage_GetPitch": (ctypes.c_int, None), + "FreeImage_Unload": (ctypes.c_void_p, None), + } + + def __init__(self): + # Initialize freeimage lib as None + self._lib = None + + # A lock to create thread-safety + self._lock = threading.RLock() + + # Init log messages lists + self._messages = [] + + # Select functype for error handler + if sys.platform.startswith("win"): + functype = ctypes.WINFUNCTYPE + else: + functype = ctypes.CFUNCTYPE + + # Create output message handler + @functype(None, ctypes.c_int, ctypes.c_char_p) + def error_handler(fif, message): + message = message.decode("utf-8") + self._messages.append(message) + while (len(self._messages)) > 256: + self._messages.pop(0) + + # Make sure to keep a ref to function + self._error_handler = error_handler + + @property + def lib(self): + if self._lib is None: + try: + self.load_freeimage() + except OSError as err: + self._lib = "The freeimage library could not be loaded: " + self._lib += str(err) + if isinstance(self._lib, str): + raise RuntimeError(self._lib) + return self._lib + + def has_lib(self): + try: + self.lib + except Exception: + return False + return True + + def load_freeimage(self): + """Try to load the freeimage lib from the system. If not successful, + try to download the imageio version and try again. + """ + # Load library and register API + success = False + try: + # Try without forcing a download, but giving preference + # to the imageio-provided lib (if previously downloaded) + self._load_freeimage() + self._register_api() + if self.lib.FreeImage_GetVersion().decode("utf-8") >= "3.15": + success = True + except OSError: + pass + + if not success: + # Ensure we have our own lib, try again + get_freeimage_lib() + self._load_freeimage() + self._register_api() + + # Wrap up + self.lib.FreeImage_SetOutputMessage(self._error_handler) + self.lib_version = self.lib.FreeImage_GetVersion().decode("utf-8") + + def _load_freeimage(self): + # Define names + lib_names = ["freeimage", "libfreeimage"] + exact_lib_names = [ + "FreeImage", + "libfreeimage.dylib", + "libfreeimage.so", + "libfreeimage.so.3", + ] + # Add names of libraries that we provide (that file may not exist) + res_dirs = resource_dirs() + plat = get_platform() + if plat: # Can be None on e.g. FreeBSD + fname = FNAME_PER_PLATFORM[plat] + for dir in res_dirs: + exact_lib_names.insert(0, os.path.join(dir, "freeimage", fname)) + + # Add the path specified with IMAGEIO_FREEIMAGE_LIB: + lib = os.getenv("IMAGEIO_FREEIMAGE_LIB", None) + if lib is not None: + exact_lib_names.insert(0, lib) + + # Load + try: + lib, fname = load_lib(exact_lib_names, lib_names, res_dirs) + except OSError as err: # pragma: no cover + err_msg = str(err) + "\nPlease install the FreeImage library." + raise OSError(err_msg) + + # Store + self._lib = lib + self.lib_fname = fname + + def _register_api(self): + # Albert's ctypes pattern + for f, (restype, argtypes) in self._API.items(): + func = getattr(self.lib, f) + func.restype = restype + func.argtypes = argtypes + + # Handling of output messages + + def __enter__(self): + self._lock.acquire() + return self.lib + + def __exit__(self, *args): + self._show_any_warnings() + self._lock.release() + + def _reset_log(self): + """Reset the list of output messages. Call this before + loading or saving an image with the FreeImage API. + """ + self._messages = [] + + def _get_error_message(self): + """Get the output messages produced since the last reset as + one string. Returns 'No known reason.' if there are no messages. + Also resets the log. + """ + if self._messages: + res = " ".join(self._messages) + self._reset_log() + return res + else: + return "No known reason." + + def _show_any_warnings(self): + """If there were any messages since the last reset, show them + as a warning. Otherwise do nothing. Also resets the messages. + """ + if self._messages: + logger.warning("imageio.freeimage warning: " + self._get_error_message()) + self._reset_log() + + def get_output_log(self): + """Return a list of the last 256 output messages + (warnings and errors) produced by the FreeImage library. + """ + # This message log is not cleared/reset, but kept to 256 elements. + return [m for m in self._messages] + + def getFIF(self, filename, mode, bb=None): + """Get the freeimage Format (FIF) from a given filename. + If mode is 'r', will try to determine the format by reading + the file, otherwise only the filename is used. + + This function also tests whether the format supports reading/writing. + """ + with self as lib: + # Init + ftype = -1 + if mode not in "rw": + raise ValueError('Invalid mode (must be "r" or "w").') + + # Try getting format from the content. Note that some files + # do not have a header that allows reading the format from + # the file. + if mode == "r": + if bb is not None: + fimemory = lib.FreeImage_OpenMemory(ctypes.c_char_p(bb), len(bb)) + ftype = lib.FreeImage_GetFileTypeFromMemory( + ctypes.c_void_p(fimemory), len(bb) + ) + lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + if (ftype == -1) and os.path.isfile(filename): + ftype = lib.FreeImage_GetFileType(efn(filename), 0) + # Try getting the format from the extension + if ftype == -1: + ftype = lib.FreeImage_GetFIFFromFilename(efn(filename)) + + # Test if ok + if ftype == -1: + raise ValueError('Cannot determine format of file "%s"' % filename) + elif mode == "w" and not lib.FreeImage_FIFSupportsWriting(ftype): + raise ValueError('Cannot write the format of file "%s"' % filename) + elif mode == "r" and not lib.FreeImage_FIFSupportsReading(ftype): + raise ValueError('Cannot read the format of file "%s"' % filename) + return ftype + + def create_bitmap(self, filename, ftype, flags=0): + """create_bitmap(filename, ftype, flags=0) + Create a wrapped bitmap object. + """ + return FIBitmap(self, filename, ftype, flags) + + def create_multipage_bitmap(self, filename, ftype, flags=0): + """create_multipage_bitmap(filename, ftype, flags=0) + Create a wrapped multipage bitmap object. + """ + return FIMultipageBitmap(self, filename, ftype, flags) + + +class FIBaseBitmap(object): + def __init__(self, fi, filename, ftype, flags): + self._fi = fi + self._filename = filename + self._ftype = ftype + self._flags = flags + self._bitmap = None + self._close_funcs = [] + + def __del__(self): + self.close() + + def close(self): + if (self._bitmap is not None) and self._close_funcs: + for close_func in self._close_funcs: + try: + with self._fi: + fun = close_func[0] + fun(*close_func[1:]) + except Exception: # pragma: no cover + pass + self._close_funcs = [] + self._bitmap = None + + def _set_bitmap(self, bitmap, close_func=None): + """Function to set the bitmap and specify the function to unload it.""" + if self._bitmap is not None: + pass # bitmap is converted + if close_func is None: + close_func = self._fi.lib.FreeImage_Unload, bitmap + + self._bitmap = bitmap + if close_func: + self._close_funcs.append(close_func) + + def get_meta_data(self): + # todo: there is also FreeImage_TagToString, is that useful? + # and would that work well when reading and then saving? + + # Create a list of (model_name, number) tuples + models = [ + (name[5:], number) + for name, number in METADATA_MODELS.__dict__.items() + if name.startswith("FIMD_") + ] + + # Prepare + metadata = Dict() + tag = ctypes.c_void_p() + + with self._fi as lib: + # Iterate over all FreeImage meta models + for model_name, number in models: + # Find beginning, get search handle + mdhandle = lib.FreeImage_FindFirstMetadata( + number, self._bitmap, ctypes.byref(tag) + ) + mdhandle = ctypes.c_void_p(mdhandle) + if mdhandle: + # Iterate over all tags in this model + more = True + while more: + # Get info about tag + tag_name = lib.FreeImage_GetTagKey(tag).decode("utf-8") + tag_type = lib.FreeImage_GetTagType(tag) + byte_size = lib.FreeImage_GetTagLength(tag) + char_ptr = ctypes.c_char * byte_size + data = char_ptr.from_address(lib.FreeImage_GetTagValue(tag)) + # Convert in a way compatible with Pypy + tag_bytes = bytes(bytearray(data)) + # The default value is the raw bytes + tag_val = tag_bytes + # Convert to a Python value in the metadata dict + if tag_type == METADATA_DATATYPE.FIDT_ASCII: + tag_val = tag_bytes.decode("utf-8", "replace") + elif tag_type in METADATA_DATATYPE.dtypes: + dtype = METADATA_DATATYPE.dtypes[tag_type] + if IS_PYPY and isinstance(dtype, (list, tuple)): + pass # pragma: no cover - or we get a segfault + else: + try: + tag_val = numpy.frombuffer( + tag_bytes, dtype=dtype + ).copy() + if len(tag_val) == 1: + tag_val = tag_val[0] + except Exception: # pragma: no cover + pass + # Store data in dict + subdict = metadata.setdefault(model_name, Dict()) + subdict[tag_name] = tag_val + # Next + more = lib.FreeImage_FindNextMetadata( + mdhandle, ctypes.byref(tag) + ) + + # Close search handle for current meta model + lib.FreeImage_FindCloseMetadata(mdhandle) + + # Done + return metadata + + def set_meta_data(self, metadata): + # Create a dict mapping model_name to number + models = {} + for name, number in METADATA_MODELS.__dict__.items(): + if name.startswith("FIMD_"): + models[name[5:]] = number + + # Create a mapping from numpy.dtype to METADATA_DATATYPE + def get_tag_type_number(dtype): + for number, numpy_dtype in METADATA_DATATYPE.dtypes.items(): + if dtype == numpy_dtype: + return number + else: + return None + + with self._fi as lib: + for model_name, subdict in metadata.items(): + # Get model number + number = models.get(model_name, None) + if number is None: + continue # Unknown model, silent ignore + + for tag_name, tag_val in subdict.items(): + # Create new tag + tag = lib.FreeImage_CreateTag() + tag = ctypes.c_void_p(tag) + + try: + # Convert Python value to FI type, val + is_ascii = False + if isinstance(tag_val, str): + try: + tag_bytes = tag_val.encode("ascii") + is_ascii = True + except UnicodeError: + pass + if is_ascii: + tag_type = METADATA_DATATYPE.FIDT_ASCII + tag_count = len(tag_bytes) + else: + if not hasattr(tag_val, "dtype"): + tag_val = numpy.array([tag_val]) + tag_type = get_tag_type_number(tag_val.dtype) + if tag_type is None: + logger.warning( + "imageio.freeimage warning: Could not " + "determine tag type of %r." % tag_name + ) + continue + tag_bytes = tag_val.tobytes() + tag_count = tag_val.size + # Set properties + lib.FreeImage_SetTagKey(tag, tag_name.encode("utf-8")) + lib.FreeImage_SetTagType(tag, tag_type) + lib.FreeImage_SetTagCount(tag, tag_count) + lib.FreeImage_SetTagLength(tag, len(tag_bytes)) + lib.FreeImage_SetTagValue(tag, tag_bytes) + # Store tag + tag_key = lib.FreeImage_GetTagKey(tag) + lib.FreeImage_SetMetadata(number, self._bitmap, tag_key, tag) + + except Exception as err: # pragma: no cover + logger.warning( + "imagio.freeimage warning: Could not set tag " + "%r: %s, %s" + % (tag_name, self._fi._get_error_message(), str(err)) + ) + finally: + lib.FreeImage_DeleteTag(tag) + + +class FIBitmap(FIBaseBitmap): + """Wrapper for the FI bitmap object.""" + + def allocate(self, array): + # Prepare array + assert isinstance(array, numpy.ndarray) + shape = array.shape + dtype = array.dtype + + # Get shape and channel info + r, c = shape[:2] + if len(shape) == 2: + n_channels = 1 + elif len(shape) == 3: + n_channels = shape[2] + else: + n_channels = shape[0] + + # Get fi_type + try: + fi_type = FI_TYPES.fi_types[(dtype.type, n_channels)] + self._fi_type = fi_type + except KeyError: + raise ValueError("Cannot write arrays of given type and shape.") + + # Allocate bitmap + with self._fi as lib: + bpp = 8 * dtype.itemsize * n_channels + bitmap = lib.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0) + bitmap = ctypes.c_void_p(bitmap) + + # Check and store + if not bitmap: # pragma: no cover + raise RuntimeError( + "Could not allocate bitmap for storage: %s" + % self._fi._get_error_message() + ) + self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + def load_from_filename(self, filename=None): + if filename is None: + filename = self._filename + + with self._fi as lib: + # Create bitmap + bitmap = lib.FreeImage_Load(self._ftype, efn(filename), self._flags) + bitmap = ctypes.c_void_p(bitmap) + + # Check and store + if not bitmap: # pragma: no cover + raise ValueError( + 'Could not load bitmap "%s": %s' + % (self._filename, self._fi._get_error_message()) + ) + self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + # def load_from_bytes(self, bb): + # with self._fi as lib: + # # Create bitmap + # fimemory = lib.FreeImage_OpenMemory( + # ctypes.c_char_p(bb), len(bb)) + # bitmap = lib.FreeImage_LoadFromMemory( + # self._ftype, ctypes.c_void_p(fimemory), self._flags) + # bitmap = ctypes.c_void_p(bitmap) + # lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # + # # Check + # if not bitmap: + # raise ValueError('Could not load bitmap "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # else: + # self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + def save_to_filename(self, filename=None): + if filename is None: + filename = self._filename + + ftype = self._ftype + bitmap = self._bitmap + fi_type = self._fi_type # element type + + with self._fi as lib: + # Check if can write + if fi_type == FI_TYPES.FIT_BITMAP: + can_write = lib.FreeImage_FIFSupportsExportBPP( + ftype, lib.FreeImage_GetBPP(bitmap) + ) + else: + can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type) + if not can_write: + raise TypeError("Cannot save image of this format to this file type") + + # Save to file + res = lib.FreeImage_Save(ftype, bitmap, efn(filename), self._flags) + # Check + if res is None: # pragma: no cover, we do so many checks, this is rare + raise RuntimeError( + f"Could not save file `{self._filename}`: {self._fi._get_error_message()}" + ) + + # def save_to_bytes(self): + # ftype = self._ftype + # bitmap = self._bitmap + # fi_type = self._fi_type # element type + # + # with self._fi as lib: + # # Check if can write + # if fi_type == FI_TYPES.FIT_BITMAP: + # can_write = lib.FreeImage_FIFSupportsExportBPP(ftype, + # lib.FreeImage_GetBPP(bitmap)) + # else: + # can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type) + # if not can_write: + # raise TypeError('Cannot save image of this format ' + # 'to this file type') + # + # # Extract the bytes + # fimemory = lib.FreeImage_OpenMemory(0, 0) + # res = lib.FreeImage_SaveToMemory(ftype, bitmap, + # ctypes.c_void_p(fimemory), + # self._flags) + # if res: + # N = lib.FreeImage_TellMemory(ctypes.c_void_p(fimemory)) + # result = ctypes.create_string_buffer(N) + # lib.FreeImage_SeekMemory(ctypes.c_void_p(fimemory), 0) + # lib.FreeImage_ReadMemory(result, 1, N, ctypes.c_void_p(fimemory)) + # result = result.raw + # lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # + # # Check + # if not res: + # raise RuntimeError('Could not save file "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # + # # Done + # return result + + def get_image_data(self): + dtype, shape, bpp = self._get_type_and_shape() + array = self._wrap_bitmap_bits_in_array(shape, dtype, False) + with self._fi as lib: + isle = lib.FreeImage_IsLittleEndian() + + # swizzle the color components and flip the scanlines to go from + # FreeImage's BGR[A] and upside-down internal memory format to + # something more normal + def n(arr): + # return arr[..., ::-1].T # Does not work on numpypy yet + if arr.ndim == 1: # pragma: no cover + return arr[::-1].T + elif arr.ndim == 2: # Always the case here ... + return arr[:, ::-1].T + elif arr.ndim == 3: # pragma: no cover + return arr[:, :, ::-1].T + elif arr.ndim == 4: # pragma: no cover + return arr[:, :, :, ::-1].T + + if len(shape) == 3 and isle and dtype.type == numpy.uint8: + b = n(array[0]) + g = n(array[1]) + r = n(array[2]) + if shape[0] == 3: + return numpy.dstack((r, g, b)) + elif shape[0] == 4: + a = n(array[3]) + return numpy.dstack((r, g, b, a)) + else: # pragma: no cover - we check this earlier + raise ValueError("Cannot handle images of shape %s" % shape) + + # We need to copy because array does *not* own its memory + # after bitmap is freed. + a = n(array).copy() + return a + + def set_image_data(self, array): + # Prepare array + assert isinstance(array, numpy.ndarray) + shape = array.shape + dtype = array.dtype + with self._fi as lib: + isle = lib.FreeImage_IsLittleEndian() + + # Calculate shape and channels + r, c = shape[:2] + if len(shape) == 2: + n_channels = 1 + w_shape = (c, r) + elif len(shape) == 3: + n_channels = shape[2] + w_shape = (n_channels, c, r) + else: + n_channels = shape[0] + + def n(arr): # normalise to freeimage's in-memory format + return arr[::-1].T + + wrapped_array = self._wrap_bitmap_bits_in_array(w_shape, dtype, True) + # swizzle the color components and flip the scanlines to go to + # FreeImage's BGR[A] and upside-down internal memory format + # The BGR[A] order is only used for 8bits per channel images + # on little endian machines. For everything else RGB[A] is + # used. + if len(shape) == 3 and isle and dtype.type == numpy.uint8: + R = array[:, :, 0] + G = array[:, :, 1] + B = array[:, :, 2] + wrapped_array[0] = n(B) + wrapped_array[1] = n(G) + wrapped_array[2] = n(R) + if shape[2] == 4: + A = array[:, :, 3] + wrapped_array[3] = n(A) + else: + wrapped_array[:] = n(array) + if self._need_finish: + self._finish_wrapped_array(wrapped_array) + + if len(shape) == 2 and dtype.type == numpy.uint8: + with self._fi as lib: + palette = lib.FreeImage_GetPalette(self._bitmap) + palette = ctypes.c_void_p(palette) + if not palette: + raise RuntimeError("Could not get image palette") + try: + palette_data = GREY_PALETTE.ctypes.data + except Exception: # pragma: no cover - IS_PYPY + palette_data = GREY_PALETTE.__array_interface__["data"][0] + ctypes.memmove(palette, palette_data, 1024) + + def _wrap_bitmap_bits_in_array(self, shape, dtype, save): + """Return an ndarray view on the data in a FreeImage bitmap. Only + valid for as long as the bitmap is loaded (if single page) / locked + in memory (if multipage). This is used in loading data, but + also during saving, to prepare a strided numpy array buffer. + + """ + # Get bitmap info + with self._fi as lib: + pitch = lib.FreeImage_GetPitch(self._bitmap) + bits = lib.FreeImage_GetBits(self._bitmap) + + # Get more info + height = shape[-1] + byte_size = height * pitch + itemsize = dtype.itemsize + + # Get strides + if len(shape) == 3: + strides = (itemsize, shape[0] * itemsize, pitch) + else: + strides = (itemsize, pitch) + + # Create numpy array and return + data = (ctypes.c_char * byte_size).from_address(bits) + try: + self._need_finish = False + if TEST_NUMPY_NO_STRIDES: + raise NotImplementedError() + return numpy.ndarray(shape, dtype=dtype, buffer=data, strides=strides) + except NotImplementedError: + # IS_PYPY - not very efficient. We create a C-contiguous + # numpy array (because pypy does not support Fortran-order) + # and shape it such that the rest of the code can remain. + if save: + self._need_finish = True # Flag to use _finish_wrapped_array + return numpy.zeros(shape, dtype=dtype) + else: + bb = bytes(bytearray(data)) + array = numpy.frombuffer(bb, dtype=dtype).copy() + # Deal with strides + if len(shape) == 3: + array.shape = shape[2], strides[-1] // shape[0], shape[0] + array2 = array[: shape[2], : shape[1], : shape[0]] + array = numpy.zeros(shape, dtype=array.dtype) + for i in range(shape[0]): + array[i] = array2[:, :, i].T + else: + array.shape = shape[1], strides[-1] + array = array[: shape[1], : shape[0]].T + return array + + def _finish_wrapped_array(self, array): # IS_PYPY + """Hardcore way to inject numpy array in bitmap.""" + # Get bitmap info + with self._fi as lib: + pitch = lib.FreeImage_GetPitch(self._bitmap) + bits = lib.FreeImage_GetBits(self._bitmap) + bpp = lib.FreeImage_GetBPP(self._bitmap) + # Get channels and realwidth + nchannels = bpp // 8 // array.itemsize + realwidth = pitch // nchannels + # Apply padding for pitch if necessary + extra = realwidth - array.shape[-2] + assert 0 <= extra < 10 + # Make sort of Fortran, also take padding (i.e. pitch) into account + newshape = array.shape[-1], realwidth, nchannels + array2 = numpy.zeros(newshape, array.dtype) + if nchannels == 1: + array2[:, : array.shape[-2], 0] = array.T + else: + for i in range(nchannels): + array2[:, : array.shape[-2], i] = array[i, :, :].T + # copy data + data_ptr = array2.__array_interface__["data"][0] + ctypes.memmove(bits, data_ptr, array2.nbytes) + del array2 + + def _get_type_and_shape(self): + bitmap = self._bitmap + + # Get info on bitmap + with self._fi as lib: + w = lib.FreeImage_GetWidth(bitmap) + h = lib.FreeImage_GetHeight(bitmap) + self._fi_type = fi_type = lib.FreeImage_GetImageType(bitmap) + if not fi_type: + raise ValueError("Unknown image pixel type") + + # Determine required props for numpy array + bpp = None + dtype = FI_TYPES.dtypes[fi_type] + + if fi_type == FI_TYPES.FIT_BITMAP: + with self._fi as lib: + bpp = lib.FreeImage_GetBPP(bitmap) + has_pallette = lib.FreeImage_GetColorsUsed(bitmap) + if has_pallette: + # Examine the palette. If it is grayscale, we return as such + if has_pallette == 256: + palette = lib.FreeImage_GetPalette(bitmap) + palette = ctypes.c_void_p(palette) + p = (ctypes.c_uint8 * (256 * 4)).from_address(palette.value) + p = numpy.frombuffer(p, numpy.uint32).copy() + if (GREY_PALETTE == p).all(): + extra_dims = [] + return numpy.dtype(dtype), extra_dims + [w, h], bpp + # Convert bitmap and call this method again + newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap) + newbitmap = ctypes.c_void_p(newbitmap) + self._set_bitmap(newbitmap) + return self._get_type_and_shape() + elif bpp == 8: + extra_dims = [] + elif bpp == 24: + extra_dims = [3] + elif bpp == 32: + extra_dims = [4] + else: # pragma: no cover + # raise ValueError('Cannot convert %d BPP bitmap' % bpp) + # Convert bitmap and call this method again + newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap) + newbitmap = ctypes.c_void_p(newbitmap) + self._set_bitmap(newbitmap) + return self._get_type_and_shape() + else: + extra_dims = FI_TYPES.extra_dims[fi_type] + + # Return dtype and shape + return numpy.dtype(dtype), extra_dims + [w, h], bpp + + def quantize(self, quantizer=0, palettesize=256): + """Quantize the bitmap to make it 8-bit (paletted). Returns a new + FIBitmap object. + Only for 24 bit images. + """ + with self._fi as lib: + # New bitmap + bitmap = lib.FreeImage_ColorQuantizeEx( + self._bitmap, quantizer, palettesize, 0, None + ) + bitmap = ctypes.c_void_p(bitmap) + + # Check and return + if not bitmap: + raise ValueError( + 'Could not quantize bitmap "%s": %s' + % (self._filename, self._fi._get_error_message()) + ) + + new = FIBitmap(self._fi, self._filename, self._ftype, self._flags) + new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + new._fi_type = self._fi_type + return new + + +# def convert_to_32bit(self): +# """ Convert to 32bit image. +# """ +# with self._fi as lib: +# # New bitmap +# bitmap = lib.FreeImage_ConvertTo32Bits(self._bitmap) +# bitmap = ctypes.c_void_p(bitmap) +# +# # Check and return +# if not bitmap: +# raise ValueError('Could not convert bitmap to 32bit "%s": %s' % +# (self._filename, +# self._fi._get_error_message())) +# else: +# new = FIBitmap(self._fi, self._filename, self._ftype, +# self._flags) +# new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) +# new._fi_type = self._fi_type +# return new + + +class FIMultipageBitmap(FIBaseBitmap): + """Wrapper for the multipage FI bitmap object.""" + + def load_from_filename(self, filename=None): + if filename is None: # pragma: no cover + filename = self._filename + + # Prepare + create_new = False + read_only = True + keep_cache_in_memory = False + + # Try opening + with self._fi as lib: + # Create bitmap + multibitmap = lib.FreeImage_OpenMultiBitmap( + self._ftype, + efn(filename), + create_new, + read_only, + keep_cache_in_memory, + self._flags, + ) + multibitmap = ctypes.c_void_p(multibitmap) + + # Check + if not multibitmap: # pragma: no cover + err = self._fi._get_error_message() + raise ValueError( + 'Could not open file "%s" as multi-image: %s' + % (self._filename, err) + ) + self._set_bitmap(multibitmap, (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + # def load_from_bytes(self, bb): + # with self._fi as lib: + # # Create bitmap + # fimemory = lib.FreeImage_OpenMemory( + # ctypes.c_char_p(bb), len(bb)) + # multibitmap = lib.FreeImage_LoadMultiBitmapFromMemory( + # self._ftype, ctypes.c_void_p(fimemory), self._flags) + # multibitmap = ctypes.c_void_p(multibitmap) + # #lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # self._mem = fimemory + # self._bytes = bb + # # Check + # if not multibitmap: + # raise ValueError('Could not load multibitmap "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # else: + # self._set_bitmap(multibitmap, + # (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + def save_to_filename(self, filename=None): + if filename is None: # pragma: no cover + filename = self._filename + + # Prepare + create_new = True + read_only = False + keep_cache_in_memory = False + + # Open the file + # todo: Set flags at close func + with self._fi as lib: + multibitmap = lib.FreeImage_OpenMultiBitmap( + self._ftype, + efn(filename), + create_new, + read_only, + keep_cache_in_memory, + 0, + ) + multibitmap = ctypes.c_void_p(multibitmap) + + # Check + if not multibitmap: # pragma: no cover + msg = 'Could not open file "%s" for writing multi-image: %s' % ( + self._filename, + self._fi._get_error_message(), + ) + raise ValueError(msg) + self._set_bitmap(multibitmap, (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + def __len__(self): + with self._fi as lib: + return lib.FreeImage_GetPageCount(self._bitmap) + + def get_page(self, index): + """Return the sub-bitmap for the given page index. + Please close the returned bitmap when done. + """ + with self._fi as lib: + # Create low-level bitmap in freeimage + bitmap = lib.FreeImage_LockPage(self._bitmap, index) + bitmap = ctypes.c_void_p(bitmap) + if not bitmap: # pragma: no cover + raise ValueError( + "Could not open sub-image %i in %r: %s" + % (index, self._filename, self._fi._get_error_message()) + ) + + # Get bitmap object to wrap this bitmap + bm = FIBitmap(self._fi, self._filename, self._ftype, self._flags) + bm._set_bitmap( + bitmap, (lib.FreeImage_UnlockPage, self._bitmap, bitmap, False) + ) + return bm + + def append_bitmap(self, bitmap): + """Add a sub-bitmap to the multi-page bitmap.""" + with self._fi as lib: + # no return value + lib.FreeImage_AppendPage(self._bitmap, bitmap._bitmap) + + +# Create instance +fi = Freeimage() diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/_tifffile.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_tifffile.py new file mode 100644 index 0000000000000000000000000000000000000000..bcdf728d8f214db6c7080f8951d73645a7bf7227 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_tifffile.py @@ -0,0 +1,10675 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# tifffile.py + +# Copyright (c) 2008-2018, Christoph Gohlke +# Copyright (c) 2008-2018, The Regents of the University of California +# Produced at the Laboratory for Fluorescence Dynamics +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holders nor the names of any +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Read image and meta data from (bio) TIFF(R) files. Save numpy arrays as TIFF. + +Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH, +SGI, ImageJ, MicroManager, FluoView, ScanImage, SEQ, GEL, and GeoTIFF files. + +Tifffile is not a general-purpose TIFF library. +Only a subset of the TIFF specification is supported, mainly uncompressed and +losslessly compressed 1, 8, 16, 32 and 64 bit integer, 16, 32 and 64-bit float, +grayscale and RGB(A) images, which are commonly used in scientific imaging. +Specifically, reading slices of image data, image trees defined via SubIFDs, +CCITT and OJPEG compression, chroma subsampling without JPEG compression, +or IPTC and XMP metadata are not implemented. + +TIFF(R), the tagged Image File Format, is a trademark and under control of +Adobe Systems Incorporated. BigTIFF allows for files greater than 4 GB. +STK, LSM, FluoView, SGI, SEQ, GEL, and OME-TIFF, are custom extensions +defined by Molecular Devices (Universal Imaging Corporation), Carl Zeiss +MicroImaging, Olympus, Silicon Graphics International, Media Cybernetics, +Molecular Dynamics, and the Open Microscopy Environment consortium +respectively. + +For command line usage run C{python -m tifffile --help} + +:Author: + `Christoph Gohlke `_ + +:Organization: + Laboratory for Fluorescence Dynamics, University of California, Irvine + +:Version: 2018.06.15 + +Requirements +------------ +* `CPython 3.6 64-bit `_ +* `Numpy 1.14 `_ +* `Matplotlib 2.2 `_ (optional for plotting) +* `Tifffile.c 2018.02.10 `_ + (recommended for faster decoding of PackBits and LZW encoded strings) +* `Tifffile_geodb.py 2018.02.10 `_ + (optional enums for GeoTIFF metadata) +* Python 2 requires 'futures', 'enum34', 'pathlib'. + +Revisions +--------- +2018.06.15 + Pass 2680 tests. + Towards reading JPEG and other compressions via imagecodecs package (WIP). + Add function to validate TIFF using 'jhove -m TIFF-hul'. + Save bool arrays as bilevel TIFF. + Accept pathlib.Path as filenames. + Move 'software' argument from TiffWriter __init__ to save. + Raise DOS limit to 16 TB. + Lazy load lzma and zstd compressors and decompressors. + Add option to save IJMetadata tags. + Return correct number of pages for truncated series (bug fix). + Move EXIF tags to TIFF.TAG as per TIFF/EP standard. +2018.02.18 + Pass 2293 tests. + Always save RowsPerStrip and Resolution tags as required by TIFF standard. + Do not use badly typed ImageDescription. + Coherce bad ASCII string tags to bytes. + Tuning of __str__ functions. + Fix reading 'undefined' tag values (bug fix). + Read and write ZSTD compressed data. + Use hexdump to print byte strings. + Determine TIFF byte order from data dtype in imsave. + Add option to specify RowsPerStrip for compressed strips. + Allow memory map of arrays with non-native byte order. + Attempt to handle ScanImage <= 5.1 files. + Restore TiffPageSeries.pages sequence interface. + Use numpy.frombuffer instead of fromstring to read from binary data. + Parse GeoTIFF metadata. + Add option to apply horizontal differencing before compression. + Towards reading PerkinElmer QPTIFF (no test files). + Do not index out of bounds data in tifffile.c unpackbits and decodelzw. +2017.09.29 (tentative) + Many backwards incompatible changes improving speed and resource usage: + Pass 2268 tests. + Add detail argument to __str__ function. Remove info functions. + Fix potential issue correcting offsets of large LSM files with positions. + Remove TiffFile sequence interface; use TiffFile.pages instead. + Do not make tag values available as TiffPage attributes. + Use str (not bytes) type for tag and metadata strings (WIP). + Use documented standard tag and value names (WIP). + Use enums for some documented TIFF tag values. + Remove 'memmap' and 'tmpfile' options; use out='memmap' instead. + Add option to specify output in asarray functions. + Add option to concurrently decode image strips or tiles using threads. + Add TiffPage.asrgb function (WIP). + Do not apply colormap in asarray. + Remove 'colormapped', 'rgbonly', and 'scale_mdgel' options from asarray. + Consolidate metadata in TiffFile _metadata functions. + Remove non-tag metadata properties from TiffPage. + Add function to convert LSM to tiled BIN files. + Align image data in file. + Make TiffPage.dtype a numpy.dtype. + Add 'ndim' and 'size' properties to TiffPage and TiffPageSeries. + Allow imsave to write non-BigTIFF files up to ~4 GB. + Only read one page for shaped series if possible. + Add memmap function to create memory-mapped array stored in TIFF file. + Add option to save empty arrays to TIFF files. + Add option to save truncated TIFF files. + Allow single tile images to be saved contiguously. + Add optional movie mode for files with uniform pages. + Lazy load pages. + Use lightweight TiffFrame for IFDs sharing properties with key TiffPage. + Move module constants to 'TIFF' namespace (speed up module import). + Remove 'fastij' option from TiffFile. + Remove 'pages' parameter from TiffFile. + Remove TIFFfile alias. + Deprecate Python 2. + Require enum34 and futures packages on Python 2.7. + Remove Record class and return all metadata as dict instead. + Add functions to parse STK, MetaSeries, ScanImage, SVS, Pilatus metadata. + Read tags from EXIF and GPS IFDs. + Use pformat for tag and metadata values. + Fix reading some UIC tags (bug fix). + Do not modify input array in imshow (bug fix). + Fix Python implementation of unpack_ints. +2017.05.23 + Pass 1961 tests. + Write correct number of SampleFormat values (bug fix). + Use Adobe deflate code to write ZIP compressed files. + Add option to pass tag values as packed binary data for writing. + Defer tag validation to attribute access. + Use property instead of lazyattr decorator for simple expressions. +2017.03.17 + Write IFDs and tag values on word boundaries. + Read ScanImage metadata. + Remove is_rgb and is_indexed attributes from TiffFile. + Create files used by doctests. +2017.01.12 + Read Zeiss SEM metadata. + Read OME-TIFF with invalid references to external files. + Rewrite C LZW decoder (5x faster). + Read corrupted LSM files missing EOI code in LZW stream. +2017.01.01 + Add option to append images to existing TIFF files. + Read files without pages. + Read S-FEG and Helios NanoLab tags created by FEI software. + Allow saving Color Filter Array (CFA) images. + Add info functions returning more information about TiffFile and TiffPage. + Add option to read specific pages only. + Remove maxpages argument (backwards incompatible). + Remove test_tifffile function. +2016.10.28 + Pass 1944 tests. + Improve detection of ImageJ hyperstacks. + Read TVIPS metadata created by EM-MENU (by Marco Oster). + Add option to disable using OME-XML metadata. + Allow non-integer range attributes in modulo tags (by Stuart Berg). +2016.06.21 + Do not always memmap contiguous data in page series. +2016.05.13 + Add option to specify resolution unit. + Write grayscale images with extra samples when planarconfig is specified. + Do not write RGB color images with 2 samples. + Reorder TiffWriter.save keyword arguments (backwards incompatible). +2016.04.18 + Pass 1932 tests. + TiffWriter, imread, and imsave accept open binary file streams. +2016.04.13 + Correctly handle reversed fill order in 2 and 4 bps images (bug fix). + Implement reverse_bitorder in C. +2016.03.18 + Fix saving additional ImageJ metadata. +2016.02.22 + Pass 1920 tests. + Write 8 bytes double tag values using offset if necessary (bug fix). + Add option to disable writing second image description tag. + Detect tags with incorrect counts. + Disable color mapping for LSM. +2015.11.13 + Read LSM 6 mosaics. + Add option to specify directory of memory-mapped files. + Add command line options to specify vmin and vmax values for colormapping. +2015.10.06 + New helper function to apply colormaps. + Renamed is_palette attributes to is_indexed (backwards incompatible). + Color-mapped samples are now contiguous (backwards incompatible). + Do not color-map ImageJ hyperstacks (backwards incompatible). + Towards reading Leica SCN. +2015.09.25 + Read images with reversed bit order (FillOrder is LSB2MSB). +2015.09.21 + Read RGB OME-TIFF. + Warn about malformed OME-XML. +2015.09.16 + Detect some corrupted ImageJ metadata. + Better axes labels for 'shaped' files. + Do not create TiffTag for default values. + Chroma subsampling is not supported. + Memory-map data in TiffPageSeries if possible (optional). +2015.08.17 + Pass 1906 tests. + Write ImageJ hyperstacks (optional). + Read and write LZMA compressed data. + Specify datetime when saving (optional). + Save tiled and color-mapped images (optional). + Ignore void bytecounts and offsets if possible. + Ignore bogus image_depth tag created by ISS Vista software. + Decode floating point horizontal differencing (not tiled). + Save image data contiguously if possible. + Only read first IFD from ImageJ files if possible. + Read ImageJ 'raw' format (files larger than 4 GB). + TiffPageSeries class for pages with compatible shape and data type. + Try to read incomplete tiles. + Open file dialog if no filename is passed on command line. + Ignore errors when decoding OME-XML. + Rename decoder functions (backwards incompatible). +2014.08.24 + TiffWriter class for incremental writing images. + Simplify examples. +2014.08.19 + Add memmap function to FileHandle. + Add function to determine if image data in TiffPage is memory-mappable. + Do not close files if multifile_close parameter is False. +2014.08.10 + Pass 1730 tests. + Return all extrasamples by default (backwards incompatible). + Read data from series of pages into memory-mapped array (optional). + Squeeze OME dimensions (backwards incompatible). + Workaround missing EOI code in strips. + Support image and tile depth tags (SGI extension). + Better handling of STK/UIC tags (backwards incompatible). + Disable color mapping for STK. + Julian to datetime converter. + TIFF ASCII type may be NULL separated. + Unwrap strip offsets for LSM files greater than 4 GB. + Correct strip byte counts in compressed LSM files. + Skip missing files in OME series. + Read embedded TIFF files. +2014.02.05 + Save rational numbers as type 5 (bug fix). +2013.12.20 + Keep other files in OME multi-file series closed. + FileHandle class to abstract binary file handle. + Disable color mapping for bad OME-TIFF produced by bio-formats. + Read bad OME-XML produced by ImageJ when cropping. +2013.11.03 + Allow zlib compress data in imsave function (optional). + Memory-map contiguous image data (optional). +2013.10.28 + Read MicroManager metadata and little-endian ImageJ tag. + Save extra tags in imsave function. + Save tags in ascending order by code (bug fix). +2012.10.18 + Accept file like objects (read from OIB files). +2012.08.21 + Rename TIFFfile to TiffFile and TIFFpage to TiffPage. + TiffSequence class for reading sequence of TIFF files. + Read UltraQuant tags. + Allow float numbers as resolution in imsave function. +2012.08.03 + Read MD GEL tags and NIH Image header. +2012.07.25 + Read ImageJ tags. + ... + +Notes +----- +The API is not stable yet and might change between revisions. + +Tested on little-endian platforms only. + +Other Python packages and modules for reading (bio) scientific TIFF files: + +* `python-bioformats `_ +* `Imread `_ +* `PyLibTiff `_ +* `ITK `_ +* `PyLSM `_ +* `PyMca.TiffIO.py `_ (same as fabio.TiffIO) +* `BioImageXD.Readers `_ +* `Cellcognition.io `_ +* `pymimage `_ +* `pytiff `_ + +Acknowledgements +---------------- +* Egor Zindy, University of Manchester, for lsm_scan_info specifics. +* Wim Lewis for a bug fix and some LSM functions. +* Hadrien Mary for help on reading MicroManager files. +* Christian Kliche for help writing tiled and color-mapped files. + +References +---------- +1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated. + http://partners.adobe.com/public/developer/tiff/ +2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html +3) MetaMorph Stack (STK) Image File Format. + http://support.meta.moleculardevices.com/docs/t10243.pdf +4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010). + Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011 +5) The OME-TIFF format. + http://www.openmicroscopy.org/site/support/file-formats/ome-tiff +6) UltraQuant(r) Version 6.0 for Windows Start-Up Guide. + http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf +7) Micro-Manager File Formats. + http://www.micro-manager.org/wiki/Micro-Manager_File_Formats +8) Tags for TIFF and Related Specifications. Digital Preservation. + http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml +9) ScanImage BigTiff Specification - ScanImage 2016. + http://scanimage.vidriotechnologies.com/display/SI2016/ + ScanImage+BigTiff+Specification +10) CIPA DC-008-2016: Exchangeable image file format for digital still cameras: + Exif Version 2.31. + http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf + +Examples +-------- +>>> # write numpy array to TIFF file +>>> data = numpy.random.rand(4, 301, 219) +>>> imsave('temp.tif', data, photometric='minisblack') + +>>> # read numpy array from TIFF file +>>> image = imread('temp.tif') +>>> numpy.testing.assert_array_equal(image, data) + +>>> # iterate over pages and tags in TIFF file +>>> with TiffFile('temp.tif') as tif: +... images = tif.asarray() +... for page in tif.pages: +... for tag in page.tags.values(): +... _ = tag.name, tag.value +... image = page.asarray() + +""" + +from __future__ import division, print_function + +import sys +import os +import io +import re +import glob +import math +import zlib +import time +import json +import enum +import struct +import pathlib +import warnings +import binascii +import tempfile +import datetime +import threading +import collections +import multiprocessing +import concurrent.futures + +import numpy + +# delay imports: mmap, pprint, fractions, xml, tkinter, matplotlib, lzma, zstd, +# subprocess + +__version__ = "2018.06.15" +__docformat__ = "restructuredtext en" +__all__ = ( + "imsave", + "imread", + "imshow", + "memmap", + "TiffFile", + "TiffWriter", + "TiffSequence", + # utility functions used by oiffile or czifile + "FileHandle", + "lazyattr", + "natural_sorted", + "decode_lzw", + "stripnull", + "create_output", + "repeat_nd", + "format_size", + "product", + "xml2dict", +) + + +def imread(files, **kwargs): + """Return image data from TIFF file(s) as numpy array. + + Refer to the TiffFile class and member functions for documentation. + + Parameters + ---------- + files : str, binary stream, or sequence + File name, seekable binary stream, glob pattern, or sequence of + file names. + kwargs : dict + Parameters 'multifile' and 'is_ome' are passed to the TiffFile class. + The 'pattern' parameter is passed to the TiffSequence class. + Other parameters are passed to the asarray functions. + The first image series is returned if no arguments are provided. + + Examples + -------- + >>> # get image from first page + >>> imsave('temp.tif', numpy.random.rand(3, 4, 301, 219)) + >>> im = imread('temp.tif', key=0) + >>> im.shape + (4, 301, 219) + + >>> # get images from sequence of files + >>> ims = imread(['temp.tif', 'temp.tif']) + >>> ims.shape + (2, 3, 4, 301, 219) + + """ + kwargs_file = parse_kwargs(kwargs, "multifile", "is_ome") + kwargs_seq = parse_kwargs(kwargs, "pattern") + + if isinstance(files, basestring) and any(i in files for i in "?*"): + files = glob.glob(files) + if not files: + raise ValueError("no files found") + if not hasattr(files, "seek") and len(files) == 1: + files = files[0] + + if isinstance(files, basestring) or hasattr(files, "seek"): + with TiffFile(files, **kwargs_file) as tif: + return tif.asarray(**kwargs) + else: + with TiffSequence(files, **kwargs_seq) as imseq: + return imseq.asarray(**kwargs) + + +def imsave(file, data=None, shape=None, dtype=None, bigsize=2**32 - 2**25, **kwargs): + """Write numpy array to TIFF file. + + Refer to the TiffWriter class and member functions for documentation. + + Parameters + ---------- + file : str or binary stream + File name or writable binary stream, such as an open file or BytesIO. + data : array_like + Input image. The last dimensions are assumed to be image depth, + height, width, and samples. + If None, an empty array of the specified shape and dtype is + saved to file. + Unless 'byteorder' is specified in 'kwargs', the TIFF file byte order + is determined from the data's dtype or the dtype argument. + shape : tuple + If 'data' is None, shape of an empty array to save to the file. + dtype : numpy.dtype + If 'data' is None, data-type of an empty array to save to the file. + bigsize : int + Create a BigTIFF file if the size of data in bytes is larger than + this threshold and 'imagej' or 'truncate' are not enabled. + By default, the threshold is 4 GB minus 32 MB reserved for metadata. + Use the 'bigtiff' parameter to explicitly specify the type of + file created. + kwargs : dict + Parameters 'append', 'byteorder', 'bigtiff', and 'imagej', are passed + to TiffWriter(). Other parameters are passed to TiffWriter.save(). + + Returns + ------- + If the image data are written contiguously, return offset and bytecount + of image data in the file. + + Examples + -------- + >>> # save a RGB image + >>> data = numpy.random.randint(0, 255, (256, 256, 3), 'uint8') + >>> imsave('temp.tif', data, photometric='rgb') + + >>> # save a random array and metadata, using compression + >>> data = numpy.random.rand(2, 5, 3, 301, 219) + >>> imsave('temp.tif', data, compress=6, metadata={'axes': 'TZCYX'}) + + """ + tifargs = parse_kwargs(kwargs, "append", "bigtiff", "byteorder", "imagej") + if data is None: + size = product(shape) * numpy.dtype(dtype).itemsize + byteorder = numpy.dtype(dtype).byteorder + else: + try: + size = data.nbytes + byteorder = data.dtype.byteorder + except Exception: + size = 0 + byteorder = None + if ( + size > bigsize + and "bigtiff" not in tifargs + and not (tifargs.get("imagej", False) or tifargs.get("truncate", False)) + ): + tifargs["bigtiff"] = True + if "byteorder" not in tifargs: + tifargs["byteorder"] = byteorder + + with TiffWriter(file, **tifargs) as tif: + return tif.save(data, shape, dtype, **kwargs) + + +def memmap(filename, shape=None, dtype=None, page=None, series=0, mode="r+", **kwargs): + """Return memory-mapped numpy array stored in TIFF file. + + Memory-mapping requires data stored in native byte order, without tiling, + compression, predictors, etc. + If 'shape' and 'dtype' are provided, existing files will be overwritten or + appended to depending on the 'append' parameter. + Otherwise the image data of a specified page or series in an existing + file will be memory-mapped. By default, the image data of the first page + series is memory-mapped. + Call flush() to write any changes in the array to the file. + Raise ValueError if the image data in the file is not memory-mappable. + + Parameters + ---------- + filename : str + Name of the TIFF file which stores the array. + shape : tuple + Shape of the empty array. + dtype : numpy.dtype + Data-type of the empty array. + page : int + Index of the page which image data to memory-map. + series : int + Index of the page series which image data to memory-map. + mode : {'r+', 'r', 'c'}, optional + The file open mode. Default is to open existing file for reading and + writing ('r+'). + kwargs : dict + Additional parameters passed to imsave() or TiffFile(). + + Examples + -------- + >>> # create an empty TIFF file and write to memory-mapped image + >>> im = memmap('temp.tif', shape=(256, 256), dtype='float32') + >>> im[255, 255] = 1.0 + >>> im.flush() + >>> im.shape, im.dtype + ((256, 256), dtype('float32')) + >>> del im + + >>> # memory-map image data in a TIFF file + >>> im = memmap('temp.tif', page=0) + >>> im[255, 255] + 1.0 + + """ + if shape is not None and dtype is not None: + # create a new, empty array + kwargs.update( + data=None, + shape=shape, + dtype=dtype, + returnoffset=True, + align=TIFF.ALLOCATIONGRANULARITY, + ) + result = imsave(filename, **kwargs) + if result is None: + # TODO: fail before creating file or writing data + raise ValueError("image data are not memory-mappable") + offset = result[0] + else: + # use existing file + with TiffFile(filename, **kwargs) as tif: + if page is not None: + page = tif.pages[page] + if not page.is_memmappable: + raise ValueError("image data are not memory-mappable") + offset, _ = page.is_contiguous + shape = page.shape + dtype = page.dtype + else: + series = tif.series[series] + if series.offset is None: + raise ValueError("image data are not memory-mappable") + shape = series.shape + dtype = series.dtype + offset = series.offset + dtype = tif.byteorder + dtype.char + return numpy.memmap(filename, dtype, mode, offset, shape, "C") + + +class lazyattr(object): + """Attribute whose value is computed on first access.""" + + # TODO: help() doesn't work + __slots__ = ("func",) + + def __init__(self, func): + self.func = func + # self.__name__ = func.__name__ + # self.__doc__ = func.__doc__ + # self.lock = threading.RLock() + + def __get__(self, instance, owner): + # with self.lock: + if instance is None: + return self + try: + value = self.func(instance) + except AttributeError as e: + raise RuntimeError(e) + if value is NotImplemented: + return getattr(super(owner, instance), self.func.__name__) + setattr(instance, self.func.__name__, value) + return value + + +class TiffWriter(object): + """Write numpy arrays to TIFF file. + + TiffWriter instances must be closed using the 'close' method, which is + automatically called when using the 'with' context manager. + + TiffWriter's main purpose is saving nD numpy array's as TIFF, + not to create any possible TIFF format. Specifically, JPEG compression, + SubIFDs, ExifIFD, or GPSIFD tags are not supported. + + Examples + -------- + >>> # successively append images to BigTIFF file + >>> data = numpy.random.rand(2, 5, 3, 301, 219) + >>> with TiffWriter('temp.tif', bigtiff=True) as tif: + ... for i in range(data.shape[0]): + ... tif.save(data[i], compress=6, photometric='minisblack') + + """ + + def __init__(self, file, bigtiff=False, byteorder=None, append=False, imagej=False): + """Open a TIFF file for writing. + + An empty TIFF file is created if the file does not exist, else the + file is overwritten with an empty TIFF file unless 'append' + is true. Use bigtiff=True when creating files larger than 4 GB. + + Parameters + ---------- + file : str, binary stream, or FileHandle + File name or writable binary stream, such as an open file + or BytesIO. + bigtiff : bool + If True, the BigTIFF format is used. + byteorder : {'<', '>', '=', '|'} + The endianness of the data in the file. + By default, this is the system's native byte order. + append : bool + If True and 'file' is an existing standard TIFF file, image data + and tags are appended to the file. + Appending data may corrupt specifically formatted TIFF files + such as LSM, STK, ImageJ, NIH, or FluoView. + imagej : bool + If True, write an ImageJ hyperstack compatible file. + This format can handle data types uint8, uint16, or float32 and + data shapes up to 6 dimensions in TZCYXS order. + RGB images (S=3 or S=4) must be uint8. + ImageJ's default byte order is big-endian but this implementation + uses the system's native byte order by default. + ImageJ does not support BigTIFF format or LZMA compression. + The ImageJ file format is undocumented. + + """ + if append: + # determine if file is an existing TIFF file that can be extended + try: + with FileHandle(file, mode="rb", size=0) as fh: + pos = fh.tell() + try: + with TiffFile(fh) as tif: + if append != "force" and any( + getattr(tif, "is_" + a) + for a in ( + "lsm", + "stk", + "imagej", + "nih", + "fluoview", + "micromanager", + ) + ): + raise ValueError("file contains metadata") + byteorder = tif.byteorder + bigtiff = tif.is_bigtiff + self._ifdoffset = tif.pages.next_page_offset + except Exception as e: + raise ValueError("cannot append to file: %s" % str(e)) + finally: + fh.seek(pos) + except (IOError, FileNotFoundError): + append = False + + if byteorder in (None, "=", "|"): + byteorder = "<" if sys.byteorder == "little" else ">" + elif byteorder not in ("<", ">"): + raise ValueError("invalid byteorder %s" % byteorder) + if imagej and bigtiff: + warnings.warn("writing incompatible BigTIFF ImageJ") + + self._byteorder = byteorder + self._imagej = bool(imagej) + self._truncate = False + self._metadata = None + self._colormap = None + + self._descriptionoffset = 0 + self._descriptionlen = 0 + self._descriptionlenoffset = 0 + self._tags = None + self._shape = None # normalized shape of data in consecutive pages + self._datashape = None # shape of data in consecutive pages + self._datadtype = None # data type + self._dataoffset = None # offset to data + self._databytecounts = None # byte counts per plane + self._tagoffsets = None # strip or tile offset tag code + + if bigtiff: + self._bigtiff = True + self._offsetsize = 8 + self._tagsize = 20 + self._tagnoformat = "Q" + self._offsetformat = "Q" + self._valueformat = "8s" + else: + self._bigtiff = False + self._offsetsize = 4 + self._tagsize = 12 + self._tagnoformat = "H" + self._offsetformat = "I" + self._valueformat = "4s" + + if append: + self._fh = FileHandle(file, mode="r+b", size=0) + self._fh.seek(0, 2) + else: + self._fh = FileHandle(file, mode="wb", size=0) + self._fh.write({"<": b"II", ">": b"MM"}[byteorder]) + if bigtiff: + self._fh.write(struct.pack(byteorder + "HHH", 43, 8, 0)) + else: + self._fh.write(struct.pack(byteorder + "H", 42)) + # first IFD + self._ifdoffset = self._fh.tell() + self._fh.write(struct.pack(byteorder + self._offsetformat, 0)) + + def save( + self, + data=None, + shape=None, + dtype=None, + returnoffset=False, + photometric=None, + planarconfig=None, + tile=None, + contiguous=True, + align=16, + truncate=False, + compress=0, + rowsperstrip=None, + predictor=False, + colormap=None, + description=None, + datetime=None, + resolution=None, + software="tifffile.py", + metadata={}, + ijmetadata=None, + extratags=(), + ): + """Write numpy array and tags to TIFF file. + + The data shape's last dimensions are assumed to be image depth, + height (length), width, and samples. + If a colormap is provided, the data's dtype must be uint8 or uint16 + and the data values are indices into the last dimension of the + colormap. + If 'shape' and 'dtype' are specified, an empty array is saved. + This option cannot be used with compression or multiple tiles. + Image data are written uncompressed in one strip per plane by default. + Dimensions larger than 2 to 4 (depending on photometric mode, planar + configuration, and SGI mode) are flattened and saved as separate pages. + The SampleFormat and BitsPerSample tags are derived from the data type. + + Parameters + ---------- + data : numpy.ndarray or None + Input image array. + shape : tuple or None + Shape of the empty array to save. Used only if 'data' is None. + dtype : numpy.dtype or None + Data-type of the empty array to save. Used only if 'data' is None. + returnoffset : bool + If True and the image data in the file is memory-mappable, return + the offset and number of bytes of the image data in the file. + photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'} + The color space of the image data. + By default, this setting is inferred from the data shape and the + value of colormap. + For CFA images, DNG tags must be specified in 'extratags'. + planarconfig : {'CONTIG', 'SEPARATE'} + Specifies if samples are stored contiguous or in separate planes. + By default, this setting is inferred from the data shape. + If this parameter is set, extra samples are used to store grayscale + images. + 'CONTIG': last dimension contains samples. + 'SEPARATE': third last dimension contains samples. + tile : tuple of int + The shape (depth, length, width) of image tiles to write. + If None (default), image data are written in strips. + The tile length and width must be a multiple of 16. + If the tile depth is provided, the SGI ImageDepth and TileDepth + tags are used to save volume data. + Unless a single tile is used, tiles cannot be used to write + contiguous files. + Few software can read the SGI format, e.g. MeVisLab. + contiguous : bool + If True (default) and the data and parameters are compatible with + previous ones, if any, the image data are stored contiguously after + the previous one. Parameters 'photometric' and 'planarconfig' + are ignored. Parameters 'description', datetime', and 'extratags' + are written to the first page of a contiguous series only. + align : int + Byte boundary on which to align the image data in the file. + Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data. + Following contiguous writes are not aligned. + truncate : bool + If True, only write the first page including shape metadata if + possible (uncompressed, contiguous, not tiled). + Other TIFF readers will only be able to read part of the data. + compress : int or 'LZMA', 'ZSTD' + Values from 0 to 9 controlling the level of zlib compression. + If 0 (default), data are written uncompressed. + Compression cannot be used to write contiguous files. + If 'LZMA' or 'ZSTD', LZMA or ZSTD compression is used, which is + not available on all platforms. + rowsperstrip : int + The number of rows per strip used for compression. + Uncompressed data are written in one strip per plane. + predictor : bool + If True, apply horizontal differencing to integer type images + before compression. + colormap : numpy.ndarray + RGB color values for the corresponding data value. + Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16. + description : str + The subject of the image. Must be 7-bit ASCII. Cannot be used with + the ImageJ format. Saved with the first page only. + datetime : datetime + Date and time of image creation in '%Y:%m:%d %H:%M:%S' format. + If None (default), the current date and time is used. + Saved with the first page only. + resolution : (float, float[, str]) or ((int, int), (int, int)[, str]) + X and Y resolutions in pixels per resolution unit as float or + rational numbers. A third, optional parameter specifies the + resolution unit, which must be None (default for ImageJ), + 'INCH' (default), or 'CENTIMETER'. + software : str + Name of the software used to create the file. Must be 7-bit ASCII. + Saved with the first page only. + metadata : dict + Additional meta data to be saved along with shape information + in JSON or ImageJ formats in an ImageDescription tag. + If None, do not write a second ImageDescription tag. + Strings must be 7-bit ASCII. Saved with the first page only. + ijmetadata : dict + Additional meta data to be saved in application specific + IJMetadata and IJMetadataByteCounts tags. Refer to the + imagej_metadata_tags function for valid keys and values. + Saved with the first page only. + extratags : sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + + code : int + The TIFF tag Id. + dtype : str + Data type of items in 'value' in Python struct format. + One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q. + count : int + Number of data values. Not used for string or byte string + values. + value : sequence + 'Count' values compatible with 'dtype'. + Byte strings must contain count values of dtype packed as + binary data. + writeonce : bool + If True, the tag is written to the first page only. + + """ + # TODO: refactor this function + fh = self._fh + byteorder = self._byteorder + + if data is None: + if compress: + raise ValueError("cannot save compressed empty file") + datashape = shape + datadtype = numpy.dtype(dtype).newbyteorder(byteorder) + datadtypechar = datadtype.char + else: + data = numpy.asarray(data, byteorder + data.dtype.char, "C") + if data.size == 0: + raise ValueError("cannot save empty array") + datashape = data.shape + datadtype = data.dtype + datadtypechar = data.dtype.char + + returnoffset = returnoffset and datadtype.isnative + bilevel = datadtypechar == "?" + if bilevel: + index = -1 if datashape[-1] > 1 else -2 + datasize = product(datashape[:index]) + if datashape[index] % 8: + datasize *= datashape[index] // 8 + 1 + else: + datasize *= datashape[index] // 8 + else: + datasize = product(datashape) * datadtype.itemsize + + # just append contiguous data if possible + self._truncate = bool(truncate) + if self._datashape: + if ( + not contiguous + or self._datashape[1:] != datashape + or self._datadtype != datadtype + or (compress and self._tags) + or tile + or not numpy.array_equal(colormap, self._colormap) + ): + # incompatible shape, dtype, compression mode, or colormap + self._write_remaining_pages() + self._write_image_description() + self._truncate = False + self._descriptionoffset = 0 + self._descriptionlenoffset = 0 + self._datashape = None + self._colormap = None + if self._imagej: + raise ValueError("ImageJ does not support non-contiguous data") + else: + # consecutive mode + self._datashape = (self._datashape[0] + 1,) + datashape + if not compress: + # write contiguous data, write IFDs/tags later + offset = fh.tell() + if data is None: + fh.write_empty(datasize) + else: + fh.write_array(data) + if returnoffset: + return offset, datasize + return + + input_shape = datashape + tagnoformat = self._tagnoformat + valueformat = self._valueformat + offsetformat = self._offsetformat + offsetsize = self._offsetsize + tagsize = self._tagsize + + MINISBLACK = TIFF.PHOTOMETRIC.MINISBLACK + RGB = TIFF.PHOTOMETRIC.RGB + CFA = TIFF.PHOTOMETRIC.CFA + PALETTE = TIFF.PHOTOMETRIC.PALETTE + CONTIG = TIFF.PLANARCONFIG.CONTIG + SEPARATE = TIFF.PLANARCONFIG.SEPARATE + + # parse input + if photometric is not None: + photometric = enumarg(TIFF.PHOTOMETRIC, photometric) + if planarconfig: + planarconfig = enumarg(TIFF.PLANARCONFIG, planarconfig) + if not compress: + compress = False + compresstag = 1 + predictor = False + else: + if isinstance(compress, (tuple, list)): + compress, compresslevel = compress + elif isinstance(compress, int): + compress, compresslevel = "ADOBE_DEFLATE", int(compress) + if not 0 <= compresslevel <= 9: + raise ValueError("invalid compression level %s" % compress) + else: + compresslevel = None + compress = compress.upper() + compresstag = enumarg(TIFF.COMPRESSION, compress) + + # prepare ImageJ format + if self._imagej: + if compress in ("LZMA", "ZSTD"): + raise ValueError("ImageJ cannot handle LZMA or ZSTD compression") + if description: + warnings.warn("not writing description to ImageJ file") + description = None + volume = False + if datadtypechar not in "BHhf": + raise ValueError("ImageJ does not support data type %s" % datadtypechar) + ijrgb = photometric == RGB if photometric else None + if datadtypechar not in "B": + ijrgb = False + ijshape = imagej_shape(datashape, ijrgb) + if ijshape[-1] in (3, 4): + photometric = RGB + if datadtypechar not in "B": + raise ValueError( + "ImageJ does not support data type %s " + "for RGB" % datadtypechar + ) + elif photometric is None: + photometric = MINISBLACK + planarconfig = None + if planarconfig == SEPARATE: + raise ValueError("ImageJ does not support planar images") + else: + planarconfig = CONTIG if ijrgb else None + + # define compress function + if compress: + if compresslevel is None: + compressor, compresslevel = TIFF.COMPESSORS[compresstag] + else: + compressor, _ = TIFF.COMPESSORS[compresstag] + compresslevel = int(compresslevel) + if predictor: + if datadtype.kind not in "iu": + raise ValueError("prediction not implemented for %s" % datadtype) + + def compress(data, level=compresslevel): + # horizontal differencing + diff = numpy.diff(data, axis=-2) + data = numpy.insert(diff, 0, data[..., 0, :], axis=-2) + return compressor(data, level) + + else: + + def compress(data, level=compresslevel): + return compressor(data, level) + + # verify colormap and indices + if colormap is not None: + if datadtypechar not in "BH": + raise ValueError("invalid data dtype for palette mode") + colormap = numpy.asarray(colormap, dtype=byteorder + "H") + if colormap.shape != (3, 2 ** (datadtype.itemsize * 8)): + raise ValueError("invalid color map shape") + self._colormap = colormap + + # verify tile shape + if tile: + tile = tuple(int(i) for i in tile[:3]) + volume = len(tile) == 3 + if ( + len(tile) < 2 + or tile[-1] % 16 + or tile[-2] % 16 + or any(i < 1 for i in tile) + ): + raise ValueError("invalid tile shape") + else: + tile = () + volume = False + + # normalize data shape to 5D or 6D, depending on volume: + # (pages, planar_samples, [depth,] height, width, contig_samples) + datashape = reshape_nd(datashape, 3 if photometric == RGB else 2) + shape = datashape + ndim = len(datashape) + + samplesperpixel = 1 + extrasamples = 0 + if volume and ndim < 3: + volume = False + if colormap is not None: + photometric = PALETTE + planarconfig = None + if photometric is None: + photometric = MINISBLACK + if bilevel: + photometric = TIFF.PHOTOMETRIC.MINISWHITE + elif planarconfig == CONTIG: + if ndim > 2 and shape[-1] in (3, 4): + photometric = RGB + elif planarconfig == SEPARATE: + if volume and ndim > 3 and shape[-4] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-3] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-1] in (3, 4): + photometric = RGB + elif self._imagej: + photometric = MINISBLACK + elif volume and ndim > 3 and shape[-4] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-3] in (3, 4): + photometric = RGB + if planarconfig and len(shape) <= (3 if volume else 2): + planarconfig = None + photometric = MINISBLACK + if photometric == RGB: + if len(shape) < 3: + raise ValueError("not a RGB(A) image") + if len(shape) < 4: + volume = False + if planarconfig is None: + if shape[-1] in (3, 4): + planarconfig = CONTIG + elif shape[-4 if volume else -3] in (3, 4): + planarconfig = SEPARATE + elif shape[-1] > shape[-4 if volume else -3]: + planarconfig = SEPARATE + else: + planarconfig = CONTIG + if planarconfig == CONTIG: + datashape = (-1, 1) + shape[(-4 if volume else -3) :] + samplesperpixel = datashape[-1] + else: + datashape = (-1,) + shape[(-4 if volume else -3) :] + (1,) + samplesperpixel = datashape[1] + if samplesperpixel > 3: + extrasamples = samplesperpixel - 3 + elif photometric == CFA: + if len(shape) != 2: + raise ValueError("invalid CFA image") + volume = False + planarconfig = None + datashape = (-1, 1) + shape[-2:] + (1,) + if 50706 not in (et[0] for et in extratags): + raise ValueError("must specify DNG tags for CFA image") + elif planarconfig and len(shape) > (3 if volume else 2): + if planarconfig == CONTIG: + datashape = (-1, 1) + shape[(-4 if volume else -3) :] + samplesperpixel = datashape[-1] + else: + datashape = (-1,) + shape[(-4 if volume else -3) :] + (1,) + samplesperpixel = datashape[1] + extrasamples = samplesperpixel - 1 + else: + planarconfig = None + # remove trailing 1s + while len(shape) > 2 and shape[-1] == 1: + shape = shape[:-1] + if len(shape) < 3: + volume = False + datashape = (-1, 1) + shape[(-3 if volume else -2) :] + (1,) + + # normalize shape to 6D + assert len(datashape) in (5, 6) + if len(datashape) == 5: + datashape = datashape[:2] + (1,) + datashape[2:] + if datashape[0] == -1: + s0 = product(input_shape) // product(datashape[1:]) + datashape = (s0,) + datashape[1:] + shape = datashape + if data is not None: + data = data.reshape(shape) + + if tile and not volume: + tile = (1, tile[-2], tile[-1]) + + if photometric == PALETTE: + if samplesperpixel != 1 or extrasamples or shape[1] != 1 or shape[-1] != 1: + raise ValueError("invalid data shape for palette mode") + + if photometric == RGB and samplesperpixel == 2: + raise ValueError("not a RGB image (samplesperpixel=2)") + + if bilevel: + if compress: + raise ValueError("cannot save compressed bilevel image") + if tile: + raise ValueError("cannot save tiled bilevel image") + if photometric not in (0, 1): + raise ValueError("cannot save bilevel image as %s" % str(photometric)) + datashape = list(datashape) + if datashape[-2] % 8: + datashape[-2] = datashape[-2] // 8 + 1 + else: + datashape[-2] = datashape[-2] // 8 + datashape = tuple(datashape) + assert datasize == product(datashape) + if data is not None: + data = numpy.packbits(data, axis=-2) + assert datashape[-2] == data.shape[-2] + + bytestr = ( + bytes + if sys.version[0] == "2" + else (lambda x: bytes(x, "ascii") if isinstance(x, str) else x) + ) + tags = [] # list of (code, ifdentry, ifdvalue, writeonce) + + strip_or_tile = "Tile" if tile else "Strip" + tagbytecounts = TIFF.TAG_NAMES[strip_or_tile + "ByteCounts"] + tag_offsets = TIFF.TAG_NAMES[strip_or_tile + "Offsets"] + self._tagoffsets = tag_offsets + + def pack(fmt, *val): + return struct.pack(byteorder + fmt, *val) + + def addtag(code, dtype, count, value, writeonce=False): + # Compute ifdentry & ifdvalue bytes from code, dtype, count, value + # Append (code, ifdentry, ifdvalue, writeonce) to tags list + code = int(TIFF.TAG_NAMES.get(code, code)) + try: + tifftype = TIFF.DATA_DTYPES[dtype] + except KeyError: + raise ValueError("unknown dtype %s" % dtype) + rawcount = count + + if dtype == "s": + # strings + value = bytestr(value) + b"\0" + count = rawcount = len(value) + rawcount = value.find(b"\0\0") + if rawcount < 0: + rawcount = count + else: + rawcount += 1 # length of string without buffer + value = (value,) + elif isinstance(value, bytes): + # packed binary data + dtsize = struct.calcsize(dtype) + if len(value) % dtsize: + raise ValueError("invalid packed binary data") + count = len(value) // dtsize + if len(dtype) > 1: + count *= int(dtype[:-1]) + dtype = dtype[-1] + ifdentry = [pack("HH", code, tifftype), pack(offsetformat, rawcount)] + ifdvalue = None + if struct.calcsize(dtype) * count <= offsetsize: + # value(s) can be written directly + if isinstance(value, bytes): + ifdentry.append(pack(valueformat, value)) + elif count == 1: + if isinstance(value, (tuple, list, numpy.ndarray)): + value = value[0] + ifdentry.append(pack(valueformat, pack(dtype, value))) + else: + ifdentry.append(pack(valueformat, pack(str(count) + dtype, *value))) + else: + # use offset to value(s) + ifdentry.append(pack(offsetformat, 0)) + if isinstance(value, bytes): + ifdvalue = value + elif isinstance(value, numpy.ndarray): + assert value.size == count + assert value.dtype.char == dtype + ifdvalue = value.tostring() + elif isinstance(value, (tuple, list)): + ifdvalue = pack(str(count) + dtype, *value) + else: + ifdvalue = pack(dtype, value) + tags.append((code, b"".join(ifdentry), ifdvalue, writeonce)) + + def rational(arg, max_denominator=1000000): + """ "Return nominator and denominator from float or two integers.""" + from fractions import Fraction # delayed import + + try: + f = Fraction.from_float(arg) + except TypeError: + f = Fraction(arg[0], arg[1]) + f = f.limit_denominator(max_denominator) + return f.numerator, f.denominator + + if description: + # user provided description + addtag("ImageDescription", "s", 0, description, writeonce=True) + + # write shape and metadata to ImageDescription + self._metadata = {} if not metadata else metadata.copy() + if self._imagej: + description = imagej_description( + input_shape, + shape[-1] in (3, 4), + self._colormap is not None, + **self._metadata + ) + elif metadata or metadata == {}: + if self._truncate: + self._metadata.update(truncated=True) + description = json_description(input_shape, **self._metadata) + else: + description = None + if description: + # add 64 bytes buffer + # the image description might be updated later with the final shape + description = str2bytes(description, "ascii") + description += b"\0" * 64 + self._descriptionlen = len(description) + addtag("ImageDescription", "s", 0, description, writeonce=True) + + if software: + addtag("Software", "s", 0, software, writeonce=True) + if datetime is None: + datetime = self._now() + addtag( + "DateTime", "s", 0, datetime.strftime("%Y:%m:%d %H:%M:%S"), writeonce=True + ) + addtag("Compression", "H", 1, compresstag) + if predictor: + addtag("Predictor", "H", 1, 2) + addtag("ImageWidth", "I", 1, shape[-2]) + addtag("ImageLength", "I", 1, shape[-3]) + if tile: + addtag("TileWidth", "I", 1, tile[-1]) + addtag("TileLength", "I", 1, tile[-2]) + if tile[0] > 1: + addtag("ImageDepth", "I", 1, shape[-4]) + addtag("TileDepth", "I", 1, tile[0]) + addtag("NewSubfileType", "I", 1, 0) + if not bilevel: + sampleformat = {"u": 1, "i": 2, "f": 3, "c": 6}[datadtype.kind] + addtag( + "SampleFormat", "H", samplesperpixel, (sampleformat,) * samplesperpixel + ) + addtag("PhotometricInterpretation", "H", 1, photometric.value) + if colormap is not None: + addtag("ColorMap", "H", colormap.size, colormap) + addtag("SamplesPerPixel", "H", 1, samplesperpixel) + if bilevel: + pass + elif planarconfig and samplesperpixel > 1: + addtag("PlanarConfiguration", "H", 1, planarconfig.value) + addtag( + "BitsPerSample", + "H", + samplesperpixel, + (datadtype.itemsize * 8,) * samplesperpixel, + ) + else: + addtag("BitsPerSample", "H", 1, datadtype.itemsize * 8) + if extrasamples: + if photometric == RGB and extrasamples == 1: + addtag("ExtraSamples", "H", 1, 1) # associated alpha channel + else: + addtag("ExtraSamples", "H", extrasamples, (0,) * extrasamples) + if resolution is not None: + addtag("XResolution", "2I", 1, rational(resolution[0])) + addtag("YResolution", "2I", 1, rational(resolution[1])) + if len(resolution) > 2: + unit = resolution[2] + unit = 1 if unit is None else enumarg(TIFF.RESUNIT, unit) + elif self._imagej: + unit = 1 + else: + unit = 2 + addtag("ResolutionUnit", "H", 1, unit) + elif not self._imagej: + addtag("XResolution", "2I", 1, (1, 1)) + addtag("YResolution", "2I", 1, (1, 1)) + addtag("ResolutionUnit", "H", 1, 1) + if ijmetadata: + for t in imagej_metadata_tags(ijmetadata, byteorder): + addtag(*t) + + contiguous = not compress + if tile: + # one chunk per tile per plane + tiles = ( + (shape[2] + tile[0] - 1) // tile[0], + (shape[3] + tile[1] - 1) // tile[1], + (shape[4] + tile[2] - 1) // tile[2], + ) + numtiles = product(tiles) * shape[1] + stripbytecounts = [ + product(tile) * shape[-1] * datadtype.itemsize + ] * numtiles + addtag(tagbytecounts, offsetformat, numtiles, stripbytecounts) + addtag(tag_offsets, offsetformat, numtiles, [0] * numtiles) + contiguous = contiguous and product(tiles) == 1 + if not contiguous: + # allocate tile buffer + chunk = numpy.empty(tile + (shape[-1],), dtype=datadtype) + elif contiguous: + # one strip per plane + if bilevel: + stripbytecounts = [product(datashape[2:])] * shape[1] + else: + stripbytecounts = [product(datashape[2:]) * datadtype.itemsize] * shape[ + 1 + ] + addtag(tagbytecounts, offsetformat, shape[1], stripbytecounts) + addtag(tag_offsets, offsetformat, shape[1], [0] * shape[1]) + addtag("RowsPerStrip", "I", 1, shape[-3]) + else: + # compress rowsperstrip or ~64 KB chunks + rowsize = product(shape[-2:]) * datadtype.itemsize + if rowsperstrip is None: + rowsperstrip = 65536 // rowsize + if rowsperstrip < 1: + rowsperstrip = 1 + elif rowsperstrip > shape[-3]: + rowsperstrip = shape[-3] + addtag("RowsPerStrip", "I", 1, rowsperstrip) + + numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip + numstrips *= shape[1] + stripbytecounts = [0] * numstrips + addtag(tagbytecounts, offsetformat, numstrips, [0] * numstrips) + addtag(tag_offsets, offsetformat, numstrips, [0] * numstrips) + + if data is None and not contiguous: + raise ValueError("cannot write non-contiguous empty file") + + # add extra tags from user + for t in extratags: + addtag(*t) + + # TODO: check TIFFReadDirectoryCheckOrder warning in files containing + # multiple tags of same code + # the entries in an IFD must be sorted in ascending order by tag code + tags = sorted(tags, key=lambda x: x[0]) + + if not (self._bigtiff or self._imagej) and (fh.tell() + datasize > 2**31 - 1): + raise ValueError("data too large for standard TIFF file") + + # if not compressed or multi-tiled, write the first IFD and then + # all data contiguously; else, write all IFDs and data interleaved + for pageindex in range(1 if contiguous else shape[0]): + # update pointer at ifd_offset + pos = fh.tell() + if pos % 2: + # location of IFD must begin on a word boundary + fh.write(b"\0") + pos += 1 + fh.seek(self._ifdoffset) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + + # write ifdentries + fh.write(pack(tagnoformat, len(tags))) + tag_offset = fh.tell() + fh.write(b"".join(t[1] for t in tags)) + self._ifdoffset = fh.tell() + fh.write(pack(offsetformat, 0)) # offset to next IFD + + # write tag values and patch offsets in ifdentries, if necessary + for tagindex, tag in enumerate(tags): + if tag[2]: + pos = fh.tell() + if pos % 2: + # tag value is expected to begin on word boundary + fh.write(b"\0") + pos += 1 + fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + if tag[0] == tag_offsets: + stripoffsetsoffset = pos + elif tag[0] == tagbytecounts: + strip_bytecounts_offset = pos + elif tag[0] == 270 and tag[2].endswith(b"\0\0\0\0"): + # image description buffer + self._descriptionoffset = pos + self._descriptionlenoffset = tag_offset + tagindex * tagsize + 4 + fh.write(tag[2]) + + # write image data + data_offset = fh.tell() + skip = align - data_offset % align + fh.seek(skip, 1) + data_offset += skip + if contiguous: + if data is None: + fh.write_empty(datasize) + else: + fh.write_array(data) + elif tile: + if data is None: + fh.write_empty(numtiles * stripbytecounts[0]) + else: + stripindex = 0 + for plane in data[pageindex]: + for tz in range(tiles[0]): + for ty in range(tiles[1]): + for tx in range(tiles[2]): + c0 = min(tile[0], shape[2] - tz * tile[0]) + c1 = min(tile[1], shape[3] - ty * tile[1]) + c2 = min(tile[2], shape[4] - tx * tile[2]) + chunk[c0:, c1:, c2:] = 0 + chunk[:c0, :c1, :c2] = plane[ + tz * tile[0] : tz * tile[0] + c0, + ty * tile[1] : ty * tile[1] + c1, + tx * tile[2] : tx * tile[2] + c2, + ] + if compress: + t = compress(chunk) + fh.write(t) + stripbytecounts[stripindex] = len(t) + stripindex += 1 + else: + fh.write_array(chunk) + fh.flush() + elif compress: + # write one strip per rowsperstrip + assert data.shape[2] == 1 # not handling depth + numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip + stripindex = 0 + for plane in data[pageindex]: + for i in range(numstrips): + strip = plane[0, i * rowsperstrip : (i + 1) * rowsperstrip] + strip = compress(strip) + fh.write(strip) + stripbytecounts[stripindex] = len(strip) + stripindex += 1 + + # update strip/tile offsets and bytecounts if necessary + pos = fh.tell() + for tagindex, tag in enumerate(tags): + if tag[0] == tag_offsets: # strip/tile offsets + if tag[2]: + fh.seek(stripoffsetsoffset) + strip_offset = data_offset + for size in stripbytecounts: + fh.write(pack(offsetformat, strip_offset)) + strip_offset += size + else: + fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4) + fh.write(pack(offsetformat, data_offset)) + elif tag[0] == tagbytecounts: # strip/tile bytecounts + if compress: + if tag[2]: + fh.seek(strip_bytecounts_offset) + for size in stripbytecounts: + fh.write(pack(offsetformat, size)) + else: + fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4) + fh.write(pack(offsetformat, stripbytecounts[0])) + break + fh.seek(pos) + fh.flush() + + # remove tags that should be written only once + if pageindex == 0: + tags = [tag for tag in tags if not tag[-1]] + + self._shape = shape + self._datashape = (1,) + input_shape + self._datadtype = datadtype + self._dataoffset = data_offset + self._databytecounts = stripbytecounts + + if contiguous: + # write remaining IFDs/tags later + self._tags = tags + # return offset and size of image data + if returnoffset: + return data_offset, sum(stripbytecounts) + + def _write_remaining_pages(self): + """Write outstanding IFDs and tags to file.""" + if not self._tags or self._truncate: + return + + fh = self._fh + fhpos = fh.tell() + if fhpos % 2: + fh.write(b"\0") + fhpos += 1 + byteorder = self._byteorder + offsetformat = self._offsetformat + offsetsize = self._offsetsize + tagnoformat = self._tagnoformat + tagsize = self._tagsize + dataoffset = self._dataoffset + pagedatasize = sum(self._databytecounts) + pageno = self._shape[0] * self._datashape[0] - 1 + + def pack(fmt, *val): + return struct.pack(byteorder + fmt, *val) + + # construct template IFD in memory + # need to patch offsets to next IFD and data before writing to disk + ifd = io.BytesIO() + ifd.write(pack(tagnoformat, len(self._tags))) + tagoffset = ifd.tell() + ifd.write(b"".join(t[1] for t in self._tags)) + ifdoffset = ifd.tell() + ifd.write(pack(offsetformat, 0)) # offset to next IFD + # tag values + for tagindex, tag in enumerate(self._tags): + offset2value = tagoffset + tagindex * tagsize + offsetsize + 4 + if tag[2]: + pos = ifd.tell() + if pos % 2: # tag value is expected to begin on word boundary + ifd.write(b"\0") + pos += 1 + ifd.seek(offset2value) + try: + ifd.write(pack(offsetformat, pos + fhpos)) + except Exception: # struct.error + if self._imagej: + warnings.warn("truncating ImageJ file") + self._truncate = True + return + raise ValueError("data too large for non-BigTIFF file") + ifd.seek(pos) + ifd.write(tag[2]) + if tag[0] == self._tagoffsets: + # save strip/tile offsets for later updates + stripoffset2offset = offset2value + stripoffset2value = pos + elif tag[0] == self._tagoffsets: + # save strip/tile offsets for later updates + stripoffset2offset = None + stripoffset2value = offset2value + # size to word boundary + if ifd.tell() % 2: + ifd.write(b"\0") + + # check if all IFDs fit in file + pos = fh.tell() + if not self._bigtiff and pos + ifd.tell() * pageno > 2**32 - 256: + if self._imagej: + warnings.warn("truncating ImageJ file") + self._truncate = True + return + raise ValueError("data too large for non-BigTIFF file") + + # TODO: assemble IFD chain in memory + for _ in range(pageno): + # update pointer at IFD offset + pos = fh.tell() + fh.seek(self._ifdoffset) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + self._ifdoffset = pos + ifdoffset + # update strip/tile offsets in IFD + dataoffset += pagedatasize # offset to image data + if stripoffset2offset is None: + ifd.seek(stripoffset2value) + ifd.write(pack(offsetformat, dataoffset)) + else: + ifd.seek(stripoffset2offset) + ifd.write(pack(offsetformat, pos + stripoffset2value)) + ifd.seek(stripoffset2value) + stripoffset = dataoffset + for size in self._databytecounts: + ifd.write(pack(offsetformat, stripoffset)) + stripoffset += size + # write IFD entry + fh.write(ifd.getvalue()) + + self._tags = None + self._datadtype = None + self._dataoffset = None + self._databytecounts = None + # do not reset _shape or _data_shape + + def _write_image_description(self): + """Write meta data to ImageDescription tag.""" + if ( + not self._datashape + or self._datashape[0] == 1 + or self._descriptionoffset <= 0 + ): + return + + colormapped = self._colormap is not None + if self._imagej: + isrgb = self._shape[-1] in (3, 4) + description = imagej_description( + self._datashape, isrgb, colormapped, **self._metadata + ) + else: + description = json_description(self._datashape, **self._metadata) + + # rewrite description and its length to file + description = description.encode("utf-8") + description = description[: self._descriptionlen - 1] + pos = self._fh.tell() + self._fh.seek(self._descriptionoffset) + self._fh.write(description) + self._fh.seek(self._descriptionlenoffset) + self._fh.write( + struct.pack(self._byteorder + self._offsetformat, len(description) + 1) + ) + self._fh.seek(pos) + + self._descriptionoffset = 0 + self._descriptionlenoffset = 0 + self._descriptionlen = 0 + + def _now(self): + """Return current date and time.""" + return datetime.datetime.now() + + def close(self): + """Write remaining pages and close file handle.""" + if not self._truncate: + self._write_remaining_pages() + self._write_image_description() + self._fh.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + +class TiffFile(object): + """Read image and metadata from TIFF file. + + TiffFile instances must be closed using the 'close' method, which is + automatically called when using the 'with' context manager. + + Attributes + ---------- + pages : TiffPages + Sequence of TIFF pages in file. + series : list of TiffPageSeries + Sequences of closely related TIFF pages. These are computed + from OME, LSM, ImageJ, etc. metadata or based on similarity + of page properties such as shape, dtype, and compression. + byteorder : '>', '<' + The endianness of data in the file. + '>': big-endian (Motorola). + '>': little-endian (Intel). + is_flag : bool + If True, file is of a certain format. + Flags are: bigtiff, movie, shaped, ome, imagej, stk, lsm, fluoview, + nih, vista, 'micromanager, metaseries, mdgel, mediacy, tvips, fei, + sem, scn, svs, scanimage, andor, epics, pilatus, qptiff. + + All attributes are read-only. + + Examples + -------- + >>> # read image array from TIFF file + >>> imsave('temp.tif', numpy.random.rand(5, 301, 219)) + >>> with TiffFile('temp.tif') as tif: + ... data = tif.asarray() + >>> data.shape + (5, 301, 219) + + """ + + def __init__( + self, + arg, + name=None, + offset=None, + size=None, + multifile=True, + movie=None, + **kwargs + ): + """Initialize instance from file. + + Parameters + ---------- + arg : str or open file + Name of file or open file object. + The file objects are closed in TiffFile.close(). + name : str + Optional name of file in case 'arg' is a file handle. + offset : int + Optional start position of embedded file. By default, this is + the current file position. + size : int + Optional size of embedded file. By default, this is the number + of bytes from the 'offset' to the end of the file. + multifile : bool + If True (default), series may include pages from multiple files. + Currently applies to OME-TIFF only. + movie : bool + If True, assume that later pages differ from first page only by + data offsets and byte counts. Significantly increases speed and + reduces memory usage when reading movies with thousands of pages. + Enabling this for non-movie files will result in data corruption + or crashes. Python 3 only. + kwargs : bool + 'is_ome': If False, disable processing of OME-XML metadata. + + """ + if "fastij" in kwargs: + del kwargs["fastij"] + raise DeprecationWarning("the fastij option will be removed") + for key, value in kwargs.items(): + if key[:3] == "is_" and key[3:] in TIFF.FILE_FLAGS: + if value is not None and not value: + setattr(self, key, bool(value)) + else: + raise TypeError("unexpected keyword argument: %s" % key) + + fh = FileHandle(arg, mode="rb", name=name, offset=offset, size=size) + self._fh = fh + self._multifile = bool(multifile) + self._files = {fh.name: self} # cache of TiffFiles + try: + fh.seek(0) + try: + byteorder = {b"II": "<", b"MM": ">"}[fh.read(2)] + except KeyError: + raise ValueError("not a TIFF file") + sys_byteorder = {"big": ">", "little": "<"}[sys.byteorder] + self.isnative = byteorder == sys_byteorder + + version = struct.unpack(byteorder + "H", fh.read(2))[0] + if version == 43: + # BigTiff + self.is_bigtiff = True + offsetsize, zero = struct.unpack(byteorder + "HH", fh.read(4)) + if zero or offsetsize != 8: + raise ValueError("invalid BigTIFF file") + self.byteorder = byteorder + self.offsetsize = 8 + self.offsetformat = byteorder + "Q" + self.tagnosize = 8 + self.tagnoformat = byteorder + "Q" + self.tagsize = 20 + self.tagformat1 = byteorder + "HH" + self.tagformat2 = byteorder + "Q8s" + elif version == 42: + self.is_bigtiff = False + self.byteorder = byteorder + self.offsetsize = 4 + self.offsetformat = byteorder + "I" + self.tagnosize = 2 + self.tagnoformat = byteorder + "H" + self.tagsize = 12 + self.tagformat1 = byteorder + "HH" + self.tagformat2 = byteorder + "I4s" + else: + raise ValueError("invalid TIFF file") + + # file handle is at offset to offset to first page + self.pages = TiffPages(self) + + if self.is_lsm and ( + self.filehandle.size >= 2**32 + or self.pages[0].compression != 1 + or self.pages[1].compression != 1 + ): + self._lsm_load_pages() + self._lsm_fix_strip_offsets() + self._lsm_fix_strip_bytecounts() + elif movie: + self.pages.useframes = True + + except Exception: + fh.close() + raise + + @property + def filehandle(self): + """Return file handle.""" + return self._fh + + @property + def filename(self): + """Return name of file handle.""" + return self._fh.name + + @lazyattr + def fstat(self): + """Return status of file handle as stat_result object.""" + try: + return os.fstat(self._fh.fileno()) + except Exception: # io.UnsupportedOperation + return None + + def close(self): + """Close open file handle(s).""" + for tif in self._files.values(): + tif.filehandle.close() + self._files = {} + + def asarray(self, key=None, series=None, out=None, validate=True, maxworkers=1): + """Return image data from multiple TIFF pages as numpy array. + + By default, the data from the first series is returned. + + Parameters + ---------- + key : int, slice, or sequence of page indices + Defines which pages to return as array. + series : int or TiffPageSeries + Defines which series of pages to return as array. + out : numpy.ndarray, str, or file-like object; optional + Buffer where image data will be saved. + If None (default), a new array will be created. + If numpy.ndarray, a writable array of compatible dtype and shape. + If 'memmap', directly memory-map the image data in the TIFF file + if possible; else create a memory-mapped array in a temporary file. + If str or open file, the file name or file object used to + create a memory-map to an array stored in a binary file on disk. + validate : bool + If True (default), validate various tags. + Passed to TiffPage.asarray(). + maxworkers : int + Maximum number of threads to concurrently get data from pages. + Default is 1. If None, up to half the CPU cores are used. + Reading data from file is limited to a single thread. + Using multiple threads can significantly speed up this function + if the bottleneck is decoding compressed data, e.g. in case of + large LZW compressed LSM files. + If the bottleneck is I/O or pure Python code, using multiple + threads might be detrimental. + + """ + if not self.pages: + return numpy.array([]) + if key is None and series is None: + series = 0 + if series is not None: + try: + series = self.series[series] + except (KeyError, TypeError): + pass + pages = series._pages + else: + pages = self.pages + + if key is None: + pass + elif isinstance(key, inttypes): + pages = [pages[key]] + elif isinstance(key, slice): + pages = pages[key] + elif isinstance(key, collections.Iterable): + pages = [pages[k] for k in key] + else: + raise TypeError("key must be an int, slice, or sequence") + + if not pages: + raise ValueError("no pages selected") + + if self.is_nih: + result = stack_pages(pages, out=out, maxworkers=maxworkers, squeeze=False) + elif key is None and series and series.offset: + typecode = self.byteorder + series.dtype.char + if out == "memmap" and pages[0].is_memmappable: + result = self.filehandle.memmap_array( + typecode, series.shape, series.offset + ) + else: + if out is not None: + out = create_output(out, series.shape, series.dtype) + self.filehandle.seek(series.offset) + result = self.filehandle.read_array( + typecode, product(series.shape), out=out, native=True + ) + elif len(pages) == 1: + result = pages[0].asarray(out=out, validate=validate) + else: + result = stack_pages(pages, out=out, maxworkers=maxworkers) + + if result is None: + return + + if key is None: + try: + result.shape = series.shape + except ValueError: + try: + warnings.warn( + "failed to reshape %s to %s" % (result.shape, series.shape) + ) + # try series of expected shapes + result.shape = (-1,) + series.shape + except ValueError: + # revert to generic shape + result.shape = (-1,) + pages[0].shape + elif len(pages) == 1: + result.shape = pages[0].shape + else: + result.shape = (-1,) + pages[0].shape + return result + + @lazyattr + def series(self): + """Return related pages as TiffPageSeries. + + Side effect: after calling this function, TiffFile.pages might contain + TiffPage and TiffFrame instances. + + """ + if not self.pages: + return [] + + useframes = self.pages.useframes + keyframe = self.pages.keyframe + series = [] + for name in "ome imagej lsm fluoview nih mdgel shaped".split(): + if getattr(self, "is_" + name, False): + series = getattr(self, "_%s_series" % name)() + break + self.pages.useframes = useframes + self.pages.keyframe = keyframe + if not series: + series = self._generic_series() + + # remove empty series, e.g. in MD Gel files + series = [s for s in series if sum(s.shape) > 0] + + for i, s in enumerate(series): + s.index = i + return series + + def _generic_series(self): + """Return image series in file.""" + if self.pages.useframes: + # movie mode + page = self.pages[0] + shape = page.shape + axes = page.axes + if len(self.pages) > 1: + shape = (len(self.pages),) + shape + axes = "I" + axes + return [ + TiffPageSeries(self.pages[:], shape, page.dtype, axes, stype="movie") + ] + + self.pages.clear(False) + self.pages.load() + result = [] + keys = [] + series = {} + compressions = TIFF.DECOMPESSORS + for page in self.pages: + if not page.shape: + continue + key = page.shape + (page.axes, page.compression in compressions) + if key in series: + series[key].append(page) + else: + keys.append(key) + series[key] = [page] + for key in keys: + pages = series[key] + page = pages[0] + shape = page.shape + axes = page.axes + if len(pages) > 1: + shape = (len(pages),) + shape + axes = "I" + axes + result.append( + TiffPageSeries(pages, shape, page.dtype, axes, stype="Generic") + ) + + return result + + def _shaped_series(self): + """Return image series in "shaped" file.""" + pages = self.pages + pages.useframes = True + lenpages = len(pages) + + def append_series(series, pages, axes, shape, reshape, name, truncated): + page = pages[0] + if not axes: + shape = page.shape + axes = page.axes + if len(pages) > 1: + shape = (len(pages),) + shape + axes = "Q" + axes + size = product(shape) + resize = product(reshape) + if page.is_contiguous and resize > size and resize % size == 0: + if truncated is None: + truncated = True + axes = "Q" + axes + shape = (resize // size,) + shape + try: + axes = reshape_axes(axes, shape, reshape) + shape = reshape + except ValueError as e: + warnings.warn(str(e)) + series.append( + TiffPageSeries( + pages, + shape, + page.dtype, + axes, + name=name, + stype="Shaped", + truncated=truncated, + ) + ) + + keyframe = axes = shape = reshape = name = None + series = [] + index = 0 + while True: + if index >= lenpages: + break + # new keyframe; start of new series + pages.keyframe = index + keyframe = pages[index] + if not keyframe.is_shaped: + warnings.warn("invalid shape metadata or corrupted file") + return + # read metadata + axes = None + shape = None + metadata = json_description_metadata(keyframe.is_shaped) + name = metadata.get("name", "") + reshape = metadata["shape"] + truncated = metadata.get("truncated", None) + if "axes" in metadata: + axes = metadata["axes"] + if len(axes) == len(reshape): + shape = reshape + else: + axes = "" + warnings.warn("axes do not match shape") + # skip pages if possible + spages = [keyframe] + size = product(reshape) + npages, mod = divmod(size, product(keyframe.shape)) + if mod: + warnings.warn("series shape does not match page shape") + return + if 1 < npages <= lenpages - index: + size *= keyframe._dtype.itemsize + if truncated: + npages = 1 + elif ( + keyframe.is_final + and keyframe.offset + size < pages[index + 1].offset + ): + truncated = False + else: + # need to read all pages for series + truncated = False + for j in range(index + 1, index + npages): + page = pages[j] + page.keyframe = keyframe + spages.append(page) + append_series(series, spages, axes, shape, reshape, name, truncated) + index += npages + + return series + + def _imagej_series(self): + """Return image series in ImageJ file.""" + # ImageJ's dimension order is always TZCYXS + # TODO: fix loading of color, composite, or palette images + self.pages.useframes = True + self.pages.keyframe = 0 + + ij = self.imagej_metadata + pages = self.pages + page = pages[0] + + def is_hyperstack(): + # ImageJ hyperstack store all image metadata in the first page and + # image data are stored contiguously before the second page, if any + if not page.is_final: + return False + images = ij.get("images", 0) + if images <= 1: + return False + offset, count = page.is_contiguous + if ( + count != product(page.shape) * page.bitspersample // 8 + or offset + count * images > self.filehandle.size + ): + raise ValueError() + # check that next page is stored after data + if len(pages) > 1 and offset + count * images > pages[1].offset: + return False + return True + + try: + hyperstack = is_hyperstack() + except ValueError: + warnings.warn("invalid ImageJ metadata or corrupted file") + return + if hyperstack: + # no need to read other pages + pages = [page] + else: + self.pages.load() + + shape = [] + axes = [] + if "frames" in ij: + shape.append(ij["frames"]) + axes.append("T") + if "slices" in ij: + shape.append(ij["slices"]) + axes.append("Z") + if "channels" in ij and not ( + page.photometric == 2 and not ij.get("hyperstack", False) + ): + shape.append(ij["channels"]) + axes.append("C") + remain = ij.get("images", len(pages)) // (product(shape) if shape else 1) + if remain > 1: + shape.append(remain) + axes.append("I") + if page.axes[0] == "I": + # contiguous multiple images + shape.extend(page.shape[1:]) + axes.extend(page.axes[1:]) + elif page.axes[:2] == "SI": + # color-mapped contiguous multiple images + shape = page.shape[0:1] + tuple(shape) + page.shape[2:] + axes = list(page.axes[0]) + axes + list(page.axes[2:]) + else: + shape.extend(page.shape) + axes.extend(page.axes) + + truncated = ( + hyperstack + and len(self.pages) == 1 + and page.is_contiguous[1] != product(shape) * page.bitspersample // 8 + ) + + return [ + TiffPageSeries( + pages, shape, page.dtype, axes, stype="ImageJ", truncated=truncated + ) + ] + + def _fluoview_series(self): + """Return image series in FluoView file.""" + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + mm = self.fluoview_metadata + mmhd = list(reversed(mm["Dimensions"])) + axes = "".join( + TIFF.MM_DIMENSIONS.get(i[0].upper(), "Q") for i in mmhd if i[1] > 1 + ) + shape = tuple(int(i[1]) for i in mmhd if i[1] > 1) + return [ + TiffPageSeries( + self.pages, + shape, + self.pages[0].dtype, + axes, + name=mm["ImageName"], + stype="FluoView", + ) + ] + + def _mdgel_series(self): + """Return image series in MD Gel file.""" + # only a single page, scaled according to metadata in second page + self.pages.useframes = False + self.pages.keyframe = 0 + self.pages.load() + md = self.mdgel_metadata + if md["FileTag"] in (2, 128): + dtype = numpy.dtype("float32") + scale = md["ScalePixel"] + scale = scale[0] / scale[1] # rational + if md["FileTag"] == 2: + # squary root data format + def transform(a): + return a.astype("float32") ** 2 * scale + + else: + + def transform(a): + return a.astype("float32") * scale + + else: + transform = None + page = self.pages[0] + return [ + TiffPageSeries( + [page], page.shape, dtype, page.axes, transform=transform, stype="MDGel" + ) + ] + + def _nih_series(self): + """Return image series in NIH file.""" + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + page0 = self.pages[0] + if len(self.pages) == 1: + shape = page0.shape + axes = page0.axes + else: + shape = (len(self.pages),) + page0.shape + axes = "I" + page0.axes + return [TiffPageSeries(self.pages, shape, page0.dtype, axes, stype="NIH")] + + def _ome_series(self): + """Return image series in OME-TIFF file(s).""" + from xml.etree import cElementTree as etree # delayed import + + omexml = self.pages[0].description + try: + root = etree.fromstring(omexml) + except etree.ParseError as e: + # TODO: test badly encoded OME-XML + warnings.warn("ome-xml: %s" % e) + try: + # might work on Python 2 + omexml = omexml.decode("utf-8", "ignore").encode("utf-8") + root = etree.fromstring(omexml) + except Exception: + return + + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + + uuid = root.attrib.get("UUID", None) + self._files = {uuid: self} + dirname = self._fh.dirname + modulo = {} + series = [] + for element in root: + if element.tag.endswith("BinaryOnly"): + # TODO: load OME-XML from master or companion file + warnings.warn("ome-xml: not an ome-tiff master file") + break + if element.tag.endswith("StructuredAnnotations"): + for annot in element: + if not annot.attrib.get("Namespace", "").endswith("modulo"): + continue + for value in annot: + for modul in value: + for along in modul: + if not along.tag[:-1].endswith("Along"): + continue + axis = along.tag[-1] + newaxis = along.attrib.get("Type", "other") + newaxis = TIFF.AXES_LABELS[newaxis] + if "Start" in along.attrib: + step = float(along.attrib.get("Step", 1)) + start = float(along.attrib["Start"]) + stop = float(along.attrib["End"]) + step + labels = numpy.arange(start, stop, step) + else: + labels = [ + label.text + for label in along + if label.tag.endswith("Label") + ] + modulo[axis] = (newaxis, labels) + + if not element.tag.endswith("Image"): + continue + + attr = element.attrib + name = attr.get("Name", None) + + for pixels in element: + if not pixels.tag.endswith("Pixels"): + continue + attr = pixels.attrib + dtype = attr.get("PixelType", None) + axes = "".join(reversed(attr["DimensionOrder"])) + shape = list(int(attr["Size" + ax]) for ax in axes) + size = product(shape[:-2]) + ifds = None + spp = 1 # samples per pixel + # FIXME: this implementation assumes the last two + # dimensions are stored in tiff pages (shape[:-2]). + # Apparently that is not always the case. + for data in pixels: + if data.tag.endswith("Channel"): + attr = data.attrib + if ifds is None: + spp = int(attr.get("SamplesPerPixel", spp)) + ifds = [None] * (size // spp) + elif int(attr.get("SamplesPerPixel", 1)) != spp: + raise ValueError("cannot handle differing SamplesPerPixel") + continue + if ifds is None: + ifds = [None] * (size // spp) + if not data.tag.endswith("TiffData"): + continue + attr = data.attrib + ifd = int(attr.get("IFD", 0)) + num = int(attr.get("NumPlanes", 1 if "IFD" in attr else 0)) + num = int(attr.get("PlaneCount", num)) + idx = [int(attr.get("First" + ax, 0)) for ax in axes[:-2]] + try: + idx = numpy.ravel_multi_index(idx, shape[:-2]) + except ValueError: + # ImageJ produces invalid ome-xml when cropping + warnings.warn("ome-xml: invalid TiffData index") + continue + for uuid in data: + if not uuid.tag.endswith("UUID"): + continue + if uuid.text not in self._files: + if not self._multifile: + # abort reading multifile OME series + # and fall back to generic series + return [] + fname = uuid.attrib["FileName"] + try: + tif = TiffFile(os.path.join(dirname, fname)) + tif.pages.useframes = True + tif.pages.keyframe = 0 + tif.pages.load() + except (IOError, FileNotFoundError, ValueError): + warnings.warn("ome-xml: failed to read '%s'" % fname) + break + self._files[uuid.text] = tif + tif.close() + pages = self._files[uuid.text].pages + try: + for i in range(num if num else len(pages)): + ifds[idx + i] = pages[ifd + i] + except IndexError: + warnings.warn("ome-xml: index out of range") + # only process first UUID + break + else: + pages = self.pages + try: + for i in range(num if num else len(pages)): + ifds[idx + i] = pages[ifd + i] + except IndexError: + warnings.warn("ome-xml: index out of range") + + if all(i is None for i in ifds): + # skip images without data + continue + + # set a keyframe on all IFDs + keyframe = None + for i in ifds: + # try find a TiffPage + if i and i == i.keyframe: + keyframe = i + break + if not keyframe: + # reload a TiffPage from file + for i, keyframe in enumerate(ifds): + if keyframe: + keyframe.parent.pages.keyframe = keyframe.index + keyframe = keyframe.parent.pages[keyframe.index] + ifds[i] = keyframe + break + for i in ifds: + if i is not None: + i.keyframe = keyframe + + dtype = keyframe.dtype + series.append( + TiffPageSeries( + ifds, shape, dtype, axes, parent=self, name=name, stype="OME" + ) + ) + for serie in series: + shape = list(serie.shape) + for axis, (newaxis, labels) in modulo.items(): + i = serie.axes.index(axis) + size = len(labels) + if shape[i] == size: + serie.axes = serie.axes.replace(axis, newaxis, 1) + else: + shape[i] //= size + shape.insert(i + 1, size) + serie.axes = serie.axes.replace(axis, axis + newaxis, 1) + serie.shape = tuple(shape) + # squeeze dimensions + for serie in series: + serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes) + return series + + def _lsm_series(self): + """Return main image series in LSM file. Skip thumbnails.""" + lsmi = self.lsm_metadata + axes = TIFF.CZ_LSMINFO_SCANTYPE[lsmi["ScanType"]] + if self.pages[0].photometric == 2: # RGB; more than one channel + axes = axes.replace("C", "").replace("XY", "XYC") + if lsmi.get("DimensionP", 0) > 1: + axes += "P" + if lsmi.get("DimensionM", 0) > 1: + axes += "M" + axes = axes[::-1] + shape = tuple(int(lsmi[TIFF.CZ_LSMINFO_DIMENSIONS[i]]) for i in axes) + name = lsmi.get("Name", "") + self.pages.keyframe = 0 + pages = self.pages[::2] + dtype = pages[0].dtype + series = [TiffPageSeries(pages, shape, dtype, axes, name=name, stype="LSM")] + + if self.pages[1].is_reduced: + self.pages.keyframe = 1 + pages = self.pages[1::2] + dtype = pages[0].dtype + cp, i = 1, 0 + while cp < len(pages) and i < len(shape) - 2: + cp *= shape[i] + i += 1 + shape = shape[:i] + pages[0].shape + axes = axes[:i] + "CYX" + series.append( + TiffPageSeries(pages, shape, dtype, axes, name=name, stype="LSMreduced") + ) + + return series + + def _lsm_load_pages(self): + """Load all pages from LSM file.""" + self.pages.cache = True + self.pages.useframes = True + # second series: thumbnails + self.pages.keyframe = 1 + keyframe = self.pages[1] + for page in self.pages[1::2]: + page.keyframe = keyframe + # first series: data + self.pages.keyframe = 0 + keyframe = self.pages[0] + for page in self.pages[::2]: + page.keyframe = keyframe + + def _lsm_fix_strip_offsets(self): + """Unwrap strip offsets for LSM files greater than 4 GB. + + Each series and position require separate unwrapping (undocumented). + + """ + if self.filehandle.size < 2**32: + return + + pages = self.pages + npages = len(pages) + series = self.series[0] + axes = series.axes + + # find positions + positions = 1 + for i in 0, 1: + if series.axes[i] in "PM": + positions *= series.shape[i] + + # make time axis first + if positions > 1: + ntimes = 0 + for i in 1, 2: + if axes[i] == "T": + ntimes = series.shape[i] + break + if ntimes: + div, mod = divmod(npages, 2 * positions * ntimes) + assert mod == 0 + shape = (positions, ntimes, div, 2) + indices = numpy.arange(product(shape)).reshape(shape) + indices = numpy.moveaxis(indices, 1, 0) + else: + indices = numpy.arange(npages).reshape(-1, 2) + + # images of reduced page might be stored first + if pages[0].dataoffsets[0] > pages[1].dataoffsets[0]: + indices = indices[..., ::-1] + + # unwrap offsets + wrap = 0 + previousoffset = 0 + for i in indices.flat: + page = pages[i] + dataoffsets = [] + for currentoffset in page.dataoffsets: + if currentoffset < previousoffset: + wrap += 2**32 + dataoffsets.append(currentoffset + wrap) + previousoffset = currentoffset + page.dataoffsets = tuple(dataoffsets) + + def _lsm_fix_strip_bytecounts(self): + """Set databytecounts to size of compressed data. + + The StripByteCounts tag in LSM files contains the number of bytes + for the uncompressed data. + + """ + pages = self.pages + if pages[0].compression == 1: + return + # sort pages by first strip offset + pages = sorted(pages, key=lambda p: p.dataoffsets[0]) + npages = len(pages) - 1 + for i, page in enumerate(pages): + if page.index % 2: + continue + offsets = page.dataoffsets + bytecounts = page.databytecounts + if i < npages: + lastoffset = pages[i + 1].dataoffsets[0] + else: + # LZW compressed strips might be longer than uncompressed + lastoffset = min(offsets[-1] + 2 * bytecounts[-1], self._fh.size) + offsets = offsets + (lastoffset,) + page.databytecounts = tuple( + offsets[j + 1] - offsets[j] for j in range(len(bytecounts)) + ) + + def __getattr__(self, name): + """Return 'is_flag' attributes from first page.""" + if name[3:] in TIFF.FILE_FLAGS: + if not self.pages: + return False + value = bool(getattr(self.pages[0], name)) + setattr(self, name, value) + return value + raise AttributeError( + "'%s' object has no attribute '%s'" % (self.__class__.__name__, name) + ) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def __str__(self, detail=0, width=79): + """Return string containing information about file. + + The detail parameter specifies the level of detail returned: + + 0: file only. + 1: all series, first page of series and its tags. + 2: large tag values and file metadata. + 3: all pages. + + """ + info = [ + "TiffFile '%s'", + format_size(self._fh.size), + {"<": "LittleEndian", ">": "BigEndian"}[self.byteorder], + ] + if self.is_bigtiff: + info.append("BigTiff") + info.append("|".join(f.upper() for f in self.flags)) + if len(self.pages) > 1: + info.append("%i Pages" % len(self.pages)) + if len(self.series) > 1: + info.append("%i Series" % len(self.series)) + if len(self._files) > 1: + info.append("%i Files" % (len(self._files))) + info = " ".join(info) + info = info.replace(" ", " ").replace(" ", " ") + info = info % snipstr(self._fh.name, max(12, width + 2 - len(info))) + if detail <= 0: + return info + info = [info] + info.append("\n".join(str(s) for s in self.series)) + if detail >= 3: + info.extend( + ( + TiffPage.__str__(p, detail=detail, width=width) + for p in self.pages + if p is not None + ) + ) + else: + info.extend( + ( + TiffPage.__str__(s.pages[0], detail=detail, width=width) + for s in self.series + if s.pages[0] is not None + ) + ) + if detail >= 2: + for name in sorted(self.flags): + if hasattr(self, name + "_metadata"): + m = getattr(self, name + "_metadata") + if m: + info.append( + "%s_METADATA\n%s" + % ( + name.upper(), + pformat(m, width=width, height=detail * 12), + ) + ) + return "\n\n".join(info).replace("\n\n\n", "\n\n") + + @lazyattr + def flags(self): + """Return set of file flags.""" + return set( + name.lower() + for name in sorted(TIFF.FILE_FLAGS) + if getattr(self, "is_" + name) + ) + + @lazyattr + def is_mdgel(self): + """File has MD Gel format.""" + try: + return self.pages[0].is_mdgel or self.pages[1].is_mdgel + except IndexError: + return False + + @property + def is_movie(self): + """Return if file is a movie.""" + return self.pages.useframes + + @lazyattr + def shaped_metadata(self): + """Return Tifffile metadata from JSON descriptions as dicts.""" + if not self.is_shaped: + return + return tuple( + json_description_metadata(s.pages[0].is_shaped) + for s in self.series + if s.stype.lower() == "shaped" + ) + + @lazyattr + def ome_metadata(self): + """Return OME XML as dict.""" + # TODO: remove this or return XML? + if not self.is_ome: + return + return xml2dict(self.pages[0].description)["OME"] + + @lazyattr + def qptiff_metadata(self): + """Return PerkinElmer-QPI-ImageDescription XML element as dict.""" + if not self.is_qptiff: + return + root = "PerkinElmer-QPI-ImageDescription" + xml = self.pages[0].description.replace(" " + root + " ", root) + return xml2dict(xml)[root] + + @lazyattr + def lsm_metadata(self): + """Return LSM metadata from CZ_LSMINFO tag as dict.""" + if not self.is_lsm: + return + return self.pages[0].tags["CZ_LSMINFO"].value + + @lazyattr + def stk_metadata(self): + """Return STK metadata from UIC tags as dict.""" + if not self.is_stk: + return + page = self.pages[0] + tags = page.tags + result = {} + result["NumberPlanes"] = tags["UIC2tag"].count + if page.description: + result["PlaneDescriptions"] = page.description.split("\0") + # result['plane_descriptions'] = stk_description_metadata( + # page.image_description) + if "UIC1tag" in tags: + result.update(tags["UIC1tag"].value) + if "UIC3tag" in tags: + result.update(tags["UIC3tag"].value) # wavelengths + if "UIC4tag" in tags: + result.update(tags["UIC4tag"].value) # override uic1 tags + uic2tag = tags["UIC2tag"].value + result["ZDistance"] = uic2tag["ZDistance"] + result["TimeCreated"] = uic2tag["TimeCreated"] + result["TimeModified"] = uic2tag["TimeModified"] + try: + result["DatetimeCreated"] = numpy.array( + [ + julian_datetime(*dt) + for dt in zip(uic2tag["DateCreated"], uic2tag["TimeCreated"]) + ], + dtype="datetime64[ns]", + ) + result["DatetimeModified"] = numpy.array( + [ + julian_datetime(*dt) + for dt in zip(uic2tag["DateModified"], uic2tag["TimeModified"]) + ], + dtype="datetime64[ns]", + ) + except ValueError as e: + warnings.warn("stk_metadata: %s" % e) + return result + + @lazyattr + def imagej_metadata(self): + """Return consolidated ImageJ metadata as dict.""" + if not self.is_imagej: + return + page = self.pages[0] + result = imagej_description_metadata(page.is_imagej) + if "IJMetadata" in page.tags: + try: + result.update(page.tags["IJMetadata"].value) + except Exception: + pass + return result + + @lazyattr + def fluoview_metadata(self): + """Return consolidated FluoView metadata as dict.""" + if not self.is_fluoview: + return + result = {} + page = self.pages[0] + result.update(page.tags["MM_Header"].value) + # TODO: read stamps from all pages + result["Stamp"] = page.tags["MM_Stamp"].value + # skip parsing image description; not reliable + # try: + # t = fluoview_description_metadata(page.image_description) + # if t is not None: + # result['ImageDescription'] = t + # except Exception as e: + # warnings.warn( + # "failed to read FluoView image description: %s" % e) + return result + + @lazyattr + def nih_metadata(self): + """Return NIH Image metadata from NIHImageHeader tag as dict.""" + if not self.is_nih: + return + return self.pages[0].tags["NIHImageHeader"].value + + @lazyattr + def fei_metadata(self): + """Return FEI metadata from SFEG or HELIOS tags as dict.""" + if not self.is_fei: + return + tags = self.pages[0].tags + if "FEI_SFEG" in tags: + return tags["FEI_SFEG"].value + if "FEI_HELIOS" in tags: + return tags["FEI_HELIOS"].value + + @lazyattr + def sem_metadata(self): + """Return SEM metadata from CZ_SEM tag as dict.""" + if not self.is_sem: + return + return self.pages[0].tags["CZ_SEM"].value + + @lazyattr + def mdgel_metadata(self): + """Return consolidated metadata from MD GEL tags as dict.""" + for page in self.pages[:2]: + if "MDFileTag" in page.tags: + tags = page.tags + break + else: + return + result = {} + for code in range(33445, 33453): + name = TIFF.TAGS[code] + if name not in tags: + continue + result[name[2:]] = tags[name].value + return result + + @lazyattr + def andor_metadata(self): + """Return Andor tags as dict.""" + return self.pages[0].andor_tags + + @lazyattr + def epics_metadata(self): + """Return EPICS areaDetector tags as dict.""" + return self.pages[0].epics_tags + + @lazyattr + def tvips_metadata(self): + """Return TVIPS tag as dict.""" + if not self.is_tvips: + return + return self.pages[0].tags["TVIPS"].value + + @lazyattr + def metaseries_metadata(self): + """Return MetaSeries metadata from image description as dict.""" + if not self.is_metaseries: + return + return metaseries_description_metadata(self.pages[0].description) + + @lazyattr + def pilatus_metadata(self): + """Return Pilatus metadata from image description as dict.""" + if not self.is_pilatus: + return + return pilatus_description_metadata(self.pages[0].description) + + @lazyattr + def micromanager_metadata(self): + """Return consolidated MicroManager metadata as dict.""" + if not self.is_micromanager: + return + # from file header + result = read_micromanager_metadata(self._fh) + # from tag + result.update(self.pages[0].tags["MicroManagerMetadata"].value) + return result + + @lazyattr + def scanimage_metadata(self): + """Return ScanImage non-varying frame and ROI metadata as dict.""" + if not self.is_scanimage: + return + result = {} + try: + framedata, roidata = read_scanimage_metadata(self._fh) + result["FrameData"] = framedata + result.update(roidata) + except ValueError: + pass + # TODO: scanimage_artist_metadata + try: + result["Description"] = scanimage_description_metadata( + self.pages[0].description + ) + except Exception as e: + warnings.warn("scanimage_description_metadata failed: %s" % e) + return result + + @property + def geotiff_metadata(self): + """Return GeoTIFF metadata from first page as dict.""" + if not self.is_geotiff: + return + return self.pages[0].geotiff_tags + + +class TiffPages(object): + """Sequence of TIFF image file directories.""" + + def __init__(self, parent): + """Initialize instance from file. Read first TiffPage from file. + + The file position must be at an offset to an offset to a TiffPage. + + """ + self.parent = parent + self.pages = [] # cache of TiffPages, TiffFrames, or their offsets + self.complete = False # True if offsets to all pages were read + self._tiffpage = TiffPage # class for reading tiff pages + self._keyframe = None + self._cache = True + + # read offset to first page + fh = parent.filehandle + self._nextpageoffset = fh.tell() + offset = struct.unpack(parent.offsetformat, fh.read(parent.offsetsize))[0] + + if offset == 0: + # warnings.warn('file contains no pages') + self.complete = True + return + if offset >= fh.size: + warnings.warn("invalid page offset (%i)" % offset) + self.complete = True + return + + # always read and cache first page + fh.seek(offset) + page = TiffPage(parent, index=0) + self.pages.append(page) + self._keyframe = page + + @property + def cache(self): + """Return if pages/frames are currently being cached.""" + return self._cache + + @cache.setter + def cache(self, value): + """Enable or disable caching of pages/frames. Clear cache if False.""" + value = bool(value) + if self._cache and not value: + self.clear() + self._cache = value + + @property + def useframes(self): + """Return if currently using TiffFrame (True) or TiffPage (False).""" + return self._tiffpage == TiffFrame and TiffFrame is not TiffPage + + @useframes.setter + def useframes(self, value): + """Set to use TiffFrame (True) or TiffPage (False).""" + self._tiffpage = TiffFrame if value else TiffPage + + @property + def keyframe(self): + """Return index of current keyframe.""" + return self._keyframe.index + + @keyframe.setter + def keyframe(self, index): + """Set current keyframe. Load TiffPage from file if necessary.""" + if self._keyframe.index == index: + return + if self.complete or 0 <= index < len(self.pages): + page = self.pages[index] + if isinstance(page, TiffPage): + self._keyframe = page + return + elif isinstance(page, TiffFrame): + # remove existing frame + self.pages[index] = page.offset + # load TiffPage from file + useframes = self.useframes + self._tiffpage = TiffPage + self._keyframe = self[index] + self.useframes = useframes + + @property + def next_page_offset(self): + """Return offset where offset to a new page can be stored.""" + if not self.complete: + self._seek(-1) + return self._nextpageoffset + + def load(self): + """Read all remaining pages from file.""" + fh = self.parent.filehandle + keyframe = self._keyframe + pages = self.pages + if not self.complete: + self._seek(-1) + for i, page in enumerate(pages): + if isinstance(page, inttypes): + fh.seek(page) + page = self._tiffpage(self.parent, index=i, keyframe=keyframe) + pages[i] = page + + def clear(self, fully=True): + """Delete all but first page from cache. Set keyframe to first page.""" + pages = self.pages + if not self._cache or len(pages) < 1: + return + self._keyframe = pages[0] + if fully: + # delete all but first TiffPage/TiffFrame + for i, page in enumerate(pages[1:]): + if not isinstance(page, inttypes): + pages[i + 1] = page.offset + elif TiffFrame is not TiffPage: + # delete only TiffFrames + for i, page in enumerate(pages): + if isinstance(page, TiffFrame): + pages[i] = page.offset + + def _seek(self, index, maxpages=2**22): + """Seek file to offset of specified page.""" + pages = self.pages + if not pages: + return + + fh = self.parent.filehandle + if fh.closed: + raise RuntimeError("FileHandle is closed") + + if self.complete or 0 <= index < len(pages): + page = pages[index] + offset = page if isinstance(page, inttypes) else page.offset + fh.seek(offset) + return + + offsetformat = self.parent.offsetformat + offsetsize = self.parent.offsetsize + tagnoformat = self.parent.tagnoformat + tagnosize = self.parent.tagnosize + tagsize = self.parent.tagsize + unpack = struct.unpack + + page = pages[-1] + offset = page if isinstance(page, inttypes) else page.offset + + while len(pages) < maxpages: + # read offsets to pages from file until index is reached + fh.seek(offset) + # skip tags + try: + tagno = unpack(tagnoformat, fh.read(tagnosize))[0] + if tagno > 4096: + raise ValueError("suspicious number of tags") + except Exception: + warnings.warn("corrupted tag list at offset %i" % offset) + del pages[-1] + self.complete = True + break + self._nextpageoffset = offset + tagnosize + tagno * tagsize + fh.seek(self._nextpageoffset) + + # read offset to next page + offset = unpack(offsetformat, fh.read(offsetsize))[0] + if offset == 0: + self.complete = True + break + if offset >= fh.size: + warnings.warn("invalid page offset (%i)" % offset) + self.complete = True + break + + pages.append(offset) + if 0 <= index < len(pages): + break + + if index >= len(pages): + raise IndexError("list index out of range") + + page = pages[index] + fh.seek(page if isinstance(page, inttypes) else page.offset) + + def __bool__(self): + """Return True if file contains any pages.""" + return len(self.pages) > 0 + + def __len__(self): + """Return number of pages in file.""" + if not self.complete: + self._seek(-1) + return len(self.pages) + + def __getitem__(self, key): + """Return specified page(s) from cache or file.""" + pages = self.pages + if not pages: + raise IndexError("list index out of range") + if key == 0: + return pages[key] + + if isinstance(key, slice): + start, stop, _ = key.indices(2**31 - 1) + if not self.complete and max(stop, start) > len(pages): + self._seek(-1) + return [self[i] for i in range(*key.indices(len(pages)))] + + if self.complete and key >= len(pages): + raise IndexError("list index out of range") + + try: + page = pages[key] + except IndexError: + page = 0 + if not isinstance(page, inttypes): + return page + + self._seek(key) + page = self._tiffpage(self.parent, index=key, keyframe=self._keyframe) + if self._cache: + pages[key] = page + return page + + def __iter__(self): + """Return iterator over all pages.""" + i = 0 + while True: + try: + yield self[i] + i += 1 + except IndexError: + break + + +class TiffPage(object): + """TIFF image file directory (IFD). + + Attributes + ---------- + index : int + Index of page in file. + dtype : numpy.dtype or None + Data type (native byte order) of the image in IFD. + shape : tuple + Dimensions of the image in IFD. + axes : str + Axes label codes: + 'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane, + 'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda, + 'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime, + 'L' exposure, 'V' event, 'Q' unknown, '_' missing + tags : dict + Dictionary of tags in IFD. {tag.name: TiffTag} + colormap : numpy.ndarray + Color look up table, if exists. + + All attributes are read-only. + + Notes + ----- + The internal, normalized '_shape' attribute is 6 dimensional: + + 0 : number planes/images (stk, ij). + 1 : planar samplesperpixel. + 2 : imagedepth Z (sgi). + 3 : imagelength Y. + 4 : imagewidth X. + 5 : contig samplesperpixel. + + """ + + # default properties; will be updated from tags + imagewidth = 0 + imagelength = 0 + imagedepth = 1 + tilewidth = 0 + tilelength = 0 + tiledepth = 1 + bitspersample = 1 + samplesperpixel = 1 + sampleformat = 1 + rowsperstrip = 2**32 - 1 + compression = 1 + planarconfig = 1 + fillorder = 1 + photometric = 0 + predictor = 1 + extrasamples = 1 + colormap = None + software = "" + description = "" + description1 = "" + + def __init__(self, parent, index, keyframe=None): + """Initialize instance from file. + + The file handle position must be at offset to a valid IFD. + + """ + self.parent = parent + self.index = index + self.shape = () + self._shape = () + self.dtype = None + self._dtype = None + self.axes = "" + self.tags = {} + + self.dataoffsets = () + self.databytecounts = () + + # read TIFF IFD structure and its tags from file + fh = parent.filehandle + self.offset = fh.tell() # offset to this IFD + try: + tagno = struct.unpack(parent.tagnoformat, fh.read(parent.tagnosize))[0] + if tagno > 4096: + raise ValueError("suspicious number of tags") + except Exception: + raise ValueError("corrupted tag list at offset %i" % self.offset) + + tagsize = parent.tagsize + data = fh.read(tagsize * tagno) + tags = self.tags + index = -tagsize + for _ in range(tagno): + index += tagsize + try: + tag = TiffTag(self.parent, data[index : index + tagsize]) + except TiffTag.Error as e: + warnings.warn(str(e)) + continue + tagname = tag.name + if tagname not in tags: + name = tagname + tags[name] = tag + else: + # some files contain multiple tags with same code + # e.g. MicroManager files contain two ImageDescription tags + i = 1 + while True: + name = "%s%i" % (tagname, i) + if name not in tags: + tags[name] = tag + break + name = TIFF.TAG_ATTRIBUTES.get(name, "") + if name: + if name[:3] in "sof des" and not isinstance(tag.value, str): + pass # wrong string type for software, description + else: + setattr(self, name, tag.value) + + if not tags: + return # found in FIBICS + + # consolidate private tags; remove them from self.tags + if self.is_andor: + self.andor_tags + elif self.is_epics: + self.epics_tags + + if self.is_lsm or (self.index and self.parent.is_lsm): + # correct non standard LSM bitspersample tags + self.tags["BitsPerSample"]._fix_lsm_bitspersample(self) + + if self.is_vista or (self.index and self.parent.is_vista): + # ISS Vista writes wrong ImageDepth tag + self.imagedepth = 1 + + if self.is_stk and "UIC1tag" in tags and not tags["UIC1tag"].value: + # read UIC1tag now that plane count is known + uic1tag = tags["UIC1tag"] + fh.seek(uic1tag.valueoffset) + tags["UIC1tag"].value = read_uic1tag( + fh, + self.parent.byteorder, + uic1tag.dtype, + uic1tag.count, + None, + tags["UIC2tag"].count, + ) + + if "IJMetadata" in tags: + # decode IJMetadata tag + try: + tags["IJMetadata"].value = imagej_metadata( + tags["IJMetadata"].value, + tags["IJMetadataByteCounts"].value, + self.parent.byteorder, + ) + except Exception as e: + warnings.warn(str(e)) + + if "BitsPerSample" in tags: + tag = tags["BitsPerSample"] + if tag.count == 1: + self.bitspersample = tag.value + else: + # LSM might list more items than samplesperpixel + value = tag.value[: self.samplesperpixel] + if any((v - value[0] for v in value)): + self.bitspersample = value + else: + self.bitspersample = value[0] + + if "SampleFormat" in tags: + tag = tags["SampleFormat"] + if tag.count == 1: + self.sampleformat = tag.value + else: + value = tag.value[: self.samplesperpixel] + if any((v - value[0] for v in value)): + self.sampleformat = value + else: + self.sampleformat = value[0] + + if "ImageLength" in tags: + if "RowsPerStrip" not in tags or tags["RowsPerStrip"].count > 1: + self.rowsperstrip = self.imagelength + # self.stripsperimage = int(math.floor( + # float(self.imagelength + self.rowsperstrip - 1) / + # self.rowsperstrip)) + + # determine dtype + dtype = self.sampleformat, self.bitspersample + dtype = TIFF.SAMPLE_DTYPES.get(dtype, None) + if dtype is not None: + dtype = numpy.dtype(dtype) + self.dtype = self._dtype = dtype + + # determine shape of data + imagelength = self.imagelength + imagewidth = self.imagewidth + imagedepth = self.imagedepth + samplesperpixel = self.samplesperpixel + + if self.is_stk: + assert self.imagedepth == 1 + uictag = tags["UIC2tag"].value + planes = tags["UIC2tag"].count + if self.planarconfig == 1: + self._shape = (planes, 1, 1, imagelength, imagewidth, samplesperpixel) + if samplesperpixel == 1: + self.shape = (planes, imagelength, imagewidth) + self.axes = "YX" + else: + self.shape = (planes, imagelength, imagewidth, samplesperpixel) + self.axes = "YXS" + else: + self._shape = (planes, samplesperpixel, 1, imagelength, imagewidth, 1) + if samplesperpixel == 1: + self.shape = (planes, imagelength, imagewidth) + self.axes = "YX" + else: + self.shape = (planes, samplesperpixel, imagelength, imagewidth) + self.axes = "SYX" + # detect type of series + if planes == 1: + self.shape = self.shape[1:] + elif numpy.all(uictag["ZDistance"] != 0): + self.axes = "Z" + self.axes + elif numpy.all(numpy.diff(uictag["TimeCreated"]) != 0): + self.axes = "T" + self.axes + else: + self.axes = "I" + self.axes + elif self.photometric == 2 or samplesperpixel > 1: # PHOTOMETRIC.RGB + if self.planarconfig == 1: + self._shape = ( + 1, + 1, + imagedepth, + imagelength, + imagewidth, + samplesperpixel, + ) + if imagedepth == 1: + self.shape = (imagelength, imagewidth, samplesperpixel) + self.axes = "YXS" + else: + self.shape = (imagedepth, imagelength, imagewidth, samplesperpixel) + self.axes = "ZYXS" + else: + self._shape = ( + 1, + samplesperpixel, + imagedepth, + imagelength, + imagewidth, + 1, + ) + if imagedepth == 1: + self.shape = (samplesperpixel, imagelength, imagewidth) + self.axes = "SYX" + else: + self.shape = (samplesperpixel, imagedepth, imagelength, imagewidth) + self.axes = "SZYX" + else: + self._shape = (1, 1, imagedepth, imagelength, imagewidth, 1) + if imagedepth == 1: + self.shape = (imagelength, imagewidth) + self.axes = "YX" + else: + self.shape = (imagedepth, imagelength, imagewidth) + self.axes = "ZYX" + + # dataoffsets and databytecounts + if "TileOffsets" in tags: + self.dataoffsets = tags["TileOffsets"].value + elif "StripOffsets" in tags: + self.dataoffsets = tags["StripOffsets"].value + else: + self.dataoffsets = (0,) + + if "TileByteCounts" in tags: + self.databytecounts = tags["TileByteCounts"].value + elif "StripByteCounts" in tags: + self.databytecounts = tags["StripByteCounts"].value + else: + self.databytecounts = (product(self.shape) * (self.bitspersample // 8),) + if self.compression != 1: + warnings.warn("required ByteCounts tag is missing") + + assert len(self.shape) == len(self.axes) + + def asarray( + self, + out=None, + squeeze=True, + lock=None, + reopen=True, + maxsize=2**44, + validate=True, + ): + """Read image data from file and return as numpy array. + + Raise ValueError if format is unsupported. + + Parameters + ---------- + out : numpy.ndarray, str, or file-like object; optional + Buffer where image data will be saved. + If None (default), a new array will be created. + If numpy.ndarray, a writable array of compatible dtype and shape. + If 'memmap', directly memory-map the image data in the TIFF file + if possible; else create a memory-mapped array in a temporary file. + If str or open file, the file name or file object used to + create a memory-map to an array stored in a binary file on disk. + squeeze : bool + If True, all length-1 dimensions (except X and Y) are + squeezed out from the array. + If False, the shape of the returned array might be different from + the page.shape. + lock : {RLock, NullContext} + A reentrant lock used to synchronize reads from file. + If None (default), the lock of the parent's filehandle is used. + reopen : bool + If True (default) and the parent file handle is closed, the file + is temporarily re-opened and closed if no exception occurs. + maxsize: int or None + Maximum size of data before a ValueError is raised. + Can be used to catch DOS. Default: 16 TB. + validate : bool + If True (default), validate various parameters. + If None, only validate parameters and return None. + + """ + self_ = self + self = self.keyframe # self or keyframe + + if not self._shape or product(self._shape) == 0: + return + + tags = self.tags + + if validate or validate is None: + if maxsize and product(self._shape) > maxsize: + raise ValueError("data are too large %s" % str(self._shape)) + if self.dtype is None: + raise ValueError( + "data type not supported: %s%i" + % (self.sampleformat, self.bitspersample) + ) + if self.compression not in TIFF.DECOMPESSORS: + raise ValueError("cannot decompress %s" % self.compression.name) + if "SampleFormat" in tags: + tag = tags["SampleFormat"] + if tag.count != 1 and any((i - tag.value[0] for i in tag.value)): + raise ValueError("sample formats do not match %s" % tag.value) + if self.is_chroma_subsampled and ( + self.compression != 7 or self.planarconfig == 2 + ): + raise NotImplementedError("chroma subsampling not supported") + if validate is None: + return + + fh = self_.parent.filehandle + lock = fh.lock if lock is None else lock + with lock: + closed = fh.closed + if closed: + if reopen: + fh.open() + else: + raise IOError("file handle is closed") + + dtype = self._dtype + shape = self._shape + imagewidth = self.imagewidth + imagelength = self.imagelength + imagedepth = self.imagedepth + bitspersample = self.bitspersample + typecode = self.parent.byteorder + dtype.char + lsb2msb = self.fillorder == 2 + offsets, bytecounts = self_.offsets_bytecounts + istiled = self.is_tiled + + if istiled: + tilewidth = self.tilewidth + tilelength = self.tilelength + tiledepth = self.tiledepth + tw = (imagewidth + tilewidth - 1) // tilewidth + tl = (imagelength + tilelength - 1) // tilelength + td = (imagedepth + tiledepth - 1) // tiledepth + shape = ( + shape[0], + shape[1], + td * tiledepth, + tl * tilelength, + tw * tilewidth, + shape[-1], + ) + tileshape = (tiledepth, tilelength, tilewidth, shape[-1]) + runlen = tilewidth + else: + runlen = imagewidth + + if self.planarconfig == 1: + runlen *= self.samplesperpixel + + if out == "memmap" and self.is_memmappable: + with lock: + result = fh.memmap_array(typecode, shape, offset=offsets[0]) + elif self.is_contiguous: + if out is not None: + out = create_output(out, shape, dtype) + with lock: + fh.seek(offsets[0]) + result = fh.read_array(typecode, product(shape), out=out) + if out is None and not result.dtype.isnative: + # swap byte order and dtype without copy + result.byteswap(True) + result = result.newbyteorder() + if lsb2msb: + reverse_bitorder(result) + else: + result = create_output(out, shape, dtype) + + decompress = TIFF.DECOMPESSORS[self.compression] + + if self.compression == 7: # COMPRESSION.JPEG + if bitspersample not in (8, 12): + raise ValueError("unsupported JPEG precision %i" % bitspersample) + if "JPEGTables" in tags: + table = tags["JPEGTables"].value + else: + table = b"" + unpack = identityfunc + colorspace = TIFF.PHOTOMETRIC(self.photometric).name + + def decompress( + x, + func=decompress, + table=table, + bitspersample=bitspersample, + colorspace=colorspace, + ): + return func(x, table, bitspersample, colorspace).reshape(-1) + + elif bitspersample in (8, 16, 32, 64, 128): + if (bitspersample * runlen) % 8: + raise ValueError("data and sample size mismatch") + + def unpack(x, typecode=typecode): + if self.predictor == 3: # PREDICTOR.FLOATINGPOINT + # the floating point horizontal differencing decoder + # needs the raw byte order + typecode = dtype.char + try: + # read only numpy array + return numpy.frombuffer(x, typecode) + except ValueError: + # strips may be missing EOI + # warnings.warn('unpack: %s' % e) + xlen = (len(x) // (bitspersample // 8)) * (bitspersample // 8) + return numpy.frombuffer(x[:xlen], typecode) + + elif isinstance(bitspersample, tuple): + + def unpack(x, typecode=typecode, bitspersample=bitspersample): + return unpack_rgb(x, typecode, bitspersample) + + else: + + def unpack( + x, typecode=typecode, bitspersample=bitspersample, runlen=runlen + ): + return unpack_ints(x, typecode, bitspersample, runlen) + + if istiled: + writable = None + tw, tl, td, pl = 0, 0, 0, 0 + for tile in buffered_read(fh, lock, offsets, bytecounts): + if lsb2msb: + tile = reverse_bitorder(tile) + tile = decompress(tile) + tile = unpack(tile) + try: + tile.shape = tileshape + except ValueError: + # incomplete tiles; see gdal issue #1179 + warnings.warn("invalid tile data") + t = numpy.zeros(tileshape, dtype).reshape(-1) + s = min(tile.size, t.size) + t[:s] = tile[:s] + tile = t.reshape(tileshape) + if self.predictor == 2: # PREDICTOR.HORIZONTAL + if writable is None: + writable = tile.flags["WRITEABLE"] + if writable: + numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile) + else: + tile = numpy.cumsum(tile, axis=-2, dtype=dtype) + elif self.predictor == 3: # PREDICTOR.FLOATINGPOINT + raise NotImplementedError() + result[ + 0, + pl, + td : td + tiledepth, + tl : tl + tilelength, + tw : tw + tilewidth, + :, + ] = tile + del tile + tw += tilewidth + if tw >= shape[4]: + tw, tl = 0, tl + tilelength + if tl >= shape[3]: + tl, td = 0, td + tiledepth + if td >= shape[2]: + td, pl = 0, pl + 1 + result = result[..., :imagedepth, :imagelength, :imagewidth, :] + else: + strip_size = self.rowsperstrip * self.imagewidth + if self.planarconfig == 1: + strip_size *= self.samplesperpixel + result = result.reshape(-1) + index = 0 + for strip in buffered_read(fh, lock, offsets, bytecounts): + if lsb2msb: + strip = reverse_bitorder(strip) + strip = decompress(strip) + strip = unpack(strip) + size = min(result.size, strip.size, strip_size, result.size - index) + result[index : index + size] = strip[:size] + del strip + index += size + + result.shape = self._shape + + if self.predictor != 1 and not (istiled and not self.is_contiguous): + if self.parent.is_lsm and self.compression == 1: + pass # work around bug in LSM510 software + elif self.predictor == 2: # PREDICTOR.HORIZONTAL + numpy.cumsum(result, axis=-2, dtype=dtype, out=result) + elif self.predictor == 3: # PREDICTOR.FLOATINGPOINT + result = decode_floats(result) + + if squeeze: + try: + result.shape = self.shape + except ValueError: + warnings.warn( + "failed to reshape from %s to %s" + % (str(result.shape), str(self.shape)) + ) + + if closed: + # TODO: file should remain open if an exception occurred above + fh.close() + return result + + def asrgb( + self, + uint8=False, + alpha=None, + colormap=None, + dmin=None, + dmax=None, + *args, + **kwargs + ): + """Return image data as RGB(A). + + Work in progress. + + """ + data = self.asarray(*args, **kwargs) + self = self.keyframe # self or keyframe + photometric = self.photometric + PHOTOMETRIC = TIFF.PHOTOMETRIC + + if photometric == PHOTOMETRIC.PALETTE: + colormap = self.colormap + if colormap.shape[1] < 2**self.bitspersample or self.dtype.char not in "BH": + raise ValueError("cannot apply colormap") + if uint8: + if colormap.max() > 255: + colormap >>= 8 + colormap = colormap.astype("uint8") + if "S" in self.axes: + data = data[..., 0] if self.planarconfig == 1 else data[0] + data = apply_colormap(data, colormap) + + elif photometric == PHOTOMETRIC.RGB: + if "ExtraSamples" in self.tags: + if alpha is None: + alpha = TIFF.EXTRASAMPLE + extrasamples = self.extrasamples + if self.tags["ExtraSamples"].count == 1: + extrasamples = (extrasamples,) + for i, exs in enumerate(extrasamples): + if exs in alpha: + if self.planarconfig == 1: + data = data[..., [0, 1, 2, 3 + i]] + else: + data = data[:, [0, 1, 2, 3 + i]] + break + else: + if self.planarconfig == 1: + data = data[..., :3] + else: + data = data[:, :3] + # TODO: convert to uint8? + + elif photometric == PHOTOMETRIC.MINISBLACK: + raise NotImplementedError() + elif photometric == PHOTOMETRIC.MINISWHITE: + raise NotImplementedError() + elif photometric == PHOTOMETRIC.SEPARATED: + raise NotImplementedError() + else: + raise NotImplementedError() + return data + + def aspage(self): + return self + + @property + def keyframe(self): + return self + + @keyframe.setter + def keyframe(self, index): + return + + @lazyattr + def offsets_bytecounts(self): + """Return simplified offsets and bytecounts.""" + if self.is_contiguous: + offset, byte_count = self.is_contiguous + return [offset], [byte_count] + return clean_offsets_counts(self.dataoffsets, self.databytecounts) + + @lazyattr + def is_contiguous(self): + """Return offset and size of contiguous data, else None. + + Excludes prediction and fill_order. + + """ + if self.compression != 1 or self.bitspersample not in (8, 16, 32, 64): + return + if "TileWidth" in self.tags: + if ( + self.imagewidth != self.tilewidth + or self.imagelength % self.tilelength + or self.tilewidth % 16 + or self.tilelength % 16 + ): + return + if ( + "ImageDepth" in self.tags + and "TileDepth" in self.tags + and ( + self.imagelength != self.tilelength + or self.imagedepth % self.tiledepth + ) + ): + return + + offsets = self.dataoffsets + bytecounts = self.databytecounts + if len(offsets) == 1: + return offsets[0], bytecounts[0] + if self.is_stk or all( + ( + offsets[i] + bytecounts[i] == offsets[i + 1] or bytecounts[i + 1] == 0 + ) # no data/ignore offset + for i in range(len(offsets) - 1) + ): + return offsets[0], sum(bytecounts) + + @lazyattr + def is_final(self): + """Return if page's image data are stored in final form. + + Excludes byte-swapping. + + """ + return ( + self.is_contiguous + and self.fillorder == 1 + and self.predictor == 1 + and not self.is_chroma_subsampled + ) + + @lazyattr + def is_memmappable(self): + """Return if page's image data in file can be memory-mapped.""" + return ( + self.parent.filehandle.is_file + and self.is_final + and + # (self.bitspersample == 8 or self.parent.isnative) and + self.is_contiguous[0] % self.dtype.itemsize == 0 + ) # aligned? + + def __str__(self, detail=0, width=79): + """Return string containing information about page.""" + if self.keyframe != self: + return TiffFrame.__str__(self, detail) + attr = "" + for name in ("memmappable", "final", "contiguous"): + attr = getattr(self, "is_" + name) + if attr: + attr = name.upper() + break + info = " ".join( + s + for s in ( + "x".join(str(i) for i in self.shape), + "%s%s" + % (TIFF.SAMPLEFORMAT(self.sampleformat).name, self.bitspersample), + "|".join( + i + for i in ( + TIFF.PHOTOMETRIC(self.photometric).name, + "TILED" if self.is_tiled else "", + self.compression.name if self.compression != 1 else "", + self.planarconfig.name if self.planarconfig != 1 else "", + self.predictor.name if self.predictor != 1 else "", + self.fillorder.name if self.fillorder != 1 else "", + ) + if i + ), + attr, + "|".join((f.upper() for f in self.flags)), + ) + if s + ) + info = "TiffPage %i @%i %s" % (self.index, self.offset, info) + if detail <= 0: + return info + info = [info] + tags = self.tags + tlines = [] + vlines = [] + for tag in sorted(tags.values(), key=lambda x: x.code): + value = tag.__str__(width=width + 1) + tlines.append(value[:width].strip()) + if detail > 1 and len(value) > width: + name = tag.name.upper() + if detail <= 2 and ("COUNTS" in name or "OFFSETS" in name): + value = pformat(tag.value, width=width, height=detail * 4) + else: + value = pformat(tag.value, width=width, height=detail * 12) + vlines.append("%s\n%s" % (tag.name, value)) + info.append("\n".join(tlines)) + if detail > 1: + info.append("\n\n".join(vlines)) + if detail > 3: + try: + info.append( + "DATA\n%s" % pformat(self.asarray(), width=width, height=detail * 8) + ) + except Exception: + pass + return "\n\n".join(info) + + @lazyattr + def flags(self): + """Return set of flags.""" + return set( + ( + name.lower() + for name in sorted(TIFF.FILE_FLAGS) + if getattr(self, "is_" + name) + ) + ) + + @property + def ndim(self): + """Return number of array dimensions.""" + return len(self.shape) + + @property + def size(self): + """Return number of elements in array.""" + return product(self.shape) + + @lazyattr + def andor_tags(self): + """Return consolidated metadata from Andor tags as dict. + + Remove Andor tags from self.tags. + + """ + if not self.is_andor: + return + tags = self.tags + result = {"Id": tags["AndorId"].value} + for tag in list(self.tags.values()): + code = tag.code + if not 4864 < code < 5031: + continue + value = tag.value + name = tag.name[5:] if len(tag.name) > 5 else tag.name + result[name] = value + del tags[tag.name] + return result + + @lazyattr + def epics_tags(self): + """Return consolidated metadata from EPICS areaDetector tags as dict. + + Remove areaDetector tags from self.tags. + + """ + if not self.is_epics: + return + result = {} + tags = self.tags + for tag in list(self.tags.values()): + code = tag.code + if not 65000 <= code < 65500: + continue + value = tag.value + if code == 65000: + result["timeStamp"] = datetime.datetime.fromtimestamp(float(value)) + elif code == 65001: + result["uniqueID"] = int(value) + elif code == 65002: + result["epicsTSSec"] = int(value) + elif code == 65003: + result["epicsTSNsec"] = int(value) + else: + key, value = value.split(":", 1) + result[key] = astype(value) + del tags[tag.name] + return result + + @lazyattr + def geotiff_tags(self): + """Return consolidated metadata from GeoTIFF tags as dict.""" + if not self.is_geotiff: + return + tags = self.tags + + gkd = tags["GeoKeyDirectoryTag"].value + if gkd[0] != 1: + warnings.warn("invalid GeoKeyDirectoryTag") + return {} + + result = { + "KeyDirectoryVersion": gkd[0], + "KeyRevision": gkd[1], + "KeyRevisionMinor": gkd[2], + # 'NumberOfKeys': gkd[3], + } + # deltags = ['GeoKeyDirectoryTag'] + geokeys = TIFF.GEO_KEYS + geocodes = TIFF.GEO_CODES + for index in range(gkd[3]): + keyid, tagid, count, offset = gkd[4 + index * 4 : index * 4 + 8] + keyid = geokeys.get(keyid, keyid) + if tagid == 0: + value = offset + else: + tagname = TIFF.TAGS[tagid] + # deltags.append(tagname) + value = tags[tagname].value[offset : offset + count] + if tagid == 34737 and count > 1 and value[-1] == "|": + value = value[:-1] + value = value if count > 1 else value[0] + if keyid in geocodes: + try: + value = geocodes[keyid](value) + except Exception: + pass + result[keyid] = value + + if "IntergraphMatrixTag" in tags: + value = tags["IntergraphMatrixTag"].value + value = numpy.array(value) + if len(value) == 16: + value = value.reshape((4, 4)).tolist() + result["IntergraphMatrix"] = value + if "ModelPixelScaleTag" in tags: + value = numpy.array(tags["ModelPixelScaleTag"].value).tolist() + result["ModelPixelScale"] = value + if "ModelTiepointTag" in tags: + value = tags["ModelTiepointTag"].value + value = numpy.array(value).reshape((-1, 6)).squeeze().tolist() + result["ModelTiepoint"] = value + if "ModelTransformationTag" in tags: + value = tags["ModelTransformationTag"].value + value = numpy.array(value).reshape((4, 4)).tolist() + result["ModelTransformation"] = value + elif False: + # if 'ModelPixelScaleTag' in tags and 'ModelTiepointTag' in tags: + sx, sy, sz = tags["ModelPixelScaleTag"].value + tiepoints = tags["ModelTiepointTag"].value + transforms = [] + for tp in range(0, len(tiepoints), 6): + i, j, k, x, y, z = tiepoints[tp : tp + 6] + transforms.append( + [ + [sx, 0.0, 0.0, x - i * sx], + [0.0, -sy, 0.0, y + j * sy], + [0.0, 0.0, sz, z - k * sz], + [0.0, 0.0, 0.0, 1.0], + ] + ) + if len(tiepoints) == 6: + transforms = transforms[0] + result["ModelTransformation"] = transforms + + if "RPCCoefficientTag" in tags: + rpcc = tags["RPCCoefficientTag"].value + result["RPCCoefficient"] = { + "ERR_BIAS": rpcc[0], + "ERR_RAND": rpcc[1], + "LINE_OFF": rpcc[2], + "SAMP_OFF": rpcc[3], + "LAT_OFF": rpcc[4], + "LONG_OFF": rpcc[5], + "HEIGHT_OFF": rpcc[6], + "LINE_SCALE": rpcc[7], + "SAMP_SCALE": rpcc[8], + "LAT_SCALE": rpcc[9], + "LONG_SCALE": rpcc[10], + "HEIGHT_SCALE": rpcc[11], + "LINE_NUM_COEFF": rpcc[12:33], + "LINE_DEN_COEFF ": rpcc[33:53], + "SAMP_NUM_COEFF": rpcc[53:73], + "SAMP_DEN_COEFF": rpcc[73:], + } + + return result + + @property + def is_tiled(self): + """Page contains tiled image.""" + return "TileWidth" in self.tags + + @property + def is_reduced(self): + """Page is reduced image of another image.""" + return "NewSubfileType" in self.tags and self.tags["NewSubfileType"].value & 1 + + @property + def is_chroma_subsampled(self): + """Page contains chroma subsampled image.""" + return "YCbCrSubSampling" in self.tags and self.tags[ + "YCbCrSubSampling" + ].value != (1, 1) + + @lazyattr + def is_imagej(self): + """Return ImageJ description if exists, else None.""" + for description in (self.description, self.description1): + if not description: + return + if description[:7] == "ImageJ=": + return description + + @lazyattr + def is_shaped(self): + """Return description containing array shape if exists, else None.""" + for description in (self.description, self.description1): + if not description: + return + if description[:1] == "{" and '"shape":' in description: + return description + if description[:6] == "shape=": + return description + + @property + def is_mdgel(self): + """Page contains MDFileTag tag.""" + return "MDFileTag" in self.tags + + @property + def is_mediacy(self): + """Page contains Media Cybernetics Id tag.""" + return "MC_Id" in self.tags and self.tags["MC_Id"].value[:7] == b"MC TIFF" + + @property + def is_stk(self): + """Page contains UIC2Tag tag.""" + return "UIC2tag" in self.tags + + @property + def is_lsm(self): + """Page contains CZ_LSMINFO tag.""" + return "CZ_LSMINFO" in self.tags + + @property + def is_fluoview(self): + """Page contains FluoView MM_STAMP tag.""" + return "MM_Stamp" in self.tags + + @property + def is_nih(self): + """Page contains NIH image header.""" + return "NIHImageHeader" in self.tags + + @property + def is_sgi(self): + """Page contains SGI image and tile depth tags.""" + return "ImageDepth" in self.tags and "TileDepth" in self.tags + + @property + def is_vista(self): + """Software tag is 'ISS Vista'.""" + return self.software == "ISS Vista" + + @property + def is_metaseries(self): + """Page contains MDS MetaSeries metadata in ImageDescription tag.""" + if self.index > 1 or self.software != "MetaSeries": + return False + d = self.description + return d.startswith("") and d.endswith("") + + @property + def is_ome(self): + """Page contains OME-XML in ImageDescription tag.""" + if self.index > 1 or not self.description: + return False + d = self.description + return d[:14] == "" + + @property + def is_scn(self): + """Page contains Leica SCN XML in ImageDescription tag.""" + if self.index > 1 or not self.description: + return False + d = self.description + return d[:14] == "" + + @property + def is_micromanager(self): + """Page contains Micro-Manager metadata.""" + return "MicroManagerMetadata" in self.tags + + @property + def is_andor(self): + """Page contains Andor Technology tags.""" + return "AndorId" in self.tags + + @property + def is_pilatus(self): + """Page contains Pilatus tags.""" + return self.software[:8] == "TVX TIFF" and self.description[:2] == "# " + + @property + def is_epics(self): + """Page contains EPICS areaDetector tags.""" + return ( + self.description == "EPICS areaDetector" + or self.software == "EPICS areaDetector" + ) + + @property + def is_tvips(self): + """Page contains TVIPS metadata.""" + return "TVIPS" in self.tags + + @property + def is_fei(self): + """Page contains SFEG or HELIOS metadata.""" + return "FEI_SFEG" in self.tags or "FEI_HELIOS" in self.tags + + @property + def is_sem(self): + """Page contains Zeiss SEM metadata.""" + return "CZ_SEM" in self.tags + + @property + def is_svs(self): + """Page contains Aperio metadata.""" + return self.description[:20] == "Aperio Image Library" + + @property + def is_scanimage(self): + """Page contains ScanImage metadata.""" + return ( + self.description[:12] == "state.config" + or self.software[:22] == "SI.LINE_FORMAT_VERSION" + or "scanimage.SI." in self.description[-256:] + ) + + @property + def is_qptiff(self): + """Page contains PerkinElmer tissue images metadata.""" + # The ImageDescription tag contains XML with a top-level + # element + return self.software[:15] == "PerkinElmer-QPI" + + @property + def is_geotiff(self): + """Page contains GeoTIFF metadata.""" + return "GeoKeyDirectoryTag" in self.tags + + +class TiffFrame(object): + """Lightweight TIFF image file directory (IFD). + + Only a limited number of tag values are read from file, e.g. StripOffsets, + and StripByteCounts. Other tag values are assumed to be identical with a + specified TiffPage instance, the keyframe. + + TiffFrame is intended to reduce resource usage and speed up reading data + from file, not for introspection of metadata. + + Not compatible with Python 2. + + """ + + __slots__ = ( + "keyframe", + "parent", + "index", + "offset", + "dataoffsets", + "databytecounts", + ) + + is_mdgel = False + tags = {} + + def __init__(self, parent, index, keyframe): + """Read specified tags from file. + + The file handle position must be at the offset to a valid IFD. + + """ + self.keyframe = keyframe + self.parent = parent + self.index = index + self.dataoffsets = None + self.databytecounts = None + + unpack = struct.unpack + fh = parent.filehandle + self.offset = fh.tell() + try: + tagno = unpack(parent.tagnoformat, fh.read(parent.tagnosize))[0] + if tagno > 4096: + raise ValueError("suspicious number of tags") + except Exception: + raise ValueError("corrupted page list at offset %i" % self.offset) + + # tags = {} + tagcodes = {273, 279, 324, 325} # TIFF.FRAME_TAGS + tagsize = parent.tagsize + codeformat = parent.tagformat1[:2] + + data = fh.read(tagsize * tagno) + index = -tagsize + for _ in range(tagno): + index += tagsize + code = unpack(codeformat, data[index : index + 2])[0] + if code not in tagcodes: + continue + try: + tag = TiffTag(parent, data[index : index + tagsize]) + except TiffTag.Error as e: + warnings.warn(str(e)) + continue + if code == 273 or code == 324: + setattr(self, "dataoffsets", tag.value) + elif code == 279 or code == 325: + setattr(self, "databytecounts", tag.value) + # elif code == 270: + # tagname = tag.name + # if tagname not in tags: + # tags[tagname] = bytes2str(tag.value) + # elif 'ImageDescription1' not in tags: + # tags['ImageDescription1'] = bytes2str(tag.value) + # else: + # tags[tag.name] = tag.value + + def aspage(self): + """Return TiffPage from file.""" + self.parent.filehandle.seek(self.offset) + return TiffPage(self.parent, index=self.index, keyframe=None) + + def asarray(self, *args, **kwargs): + """Read image data from file and return as numpy array.""" + # TODO: fix TypeError on Python 2 + # "TypeError: unbound method asarray() must be called with TiffPage + # instance as first argument (got TiffFrame instance instead)" + kwargs["validate"] = False + return TiffPage.asarray(self, *args, **kwargs) + + def asrgb(self, *args, **kwargs): + """Read image data from file and return RGB image as numpy array.""" + kwargs["validate"] = False + return TiffPage.asrgb(self, *args, **kwargs) + + @property + def offsets_bytecounts(self): + """Return simplified offsets and bytecounts.""" + if self.keyframe.is_contiguous: + return self.dataoffsets[:1], self.keyframe.is_contiguous[1:] + return clean_offsets_counts(self.dataoffsets, self.databytecounts) + + @property + def is_contiguous(self): + """Return offset and size of contiguous data, else None.""" + if self.keyframe.is_contiguous: + return self.dataoffsets[0], self.keyframe.is_contiguous[1] + + @property + def is_memmappable(self): + """Return if page's image data in file can be memory-mapped.""" + return self.keyframe.is_memmappable + + def __getattr__(self, name): + """Return attribute from keyframe.""" + if name in TIFF.FRAME_ATTRS: + return getattr(self.keyframe, name) + # this error could be raised because an AttributeError was + # raised inside a @property function + raise AttributeError( + "'%s' object has no attribute '%s'" % (self.__class__.__name__, name) + ) + + def __str__(self, detail=0): + """Return string containing information about frame.""" + info = " ".join( + s for s in ("x".join(str(i) for i in self.shape), str(self.dtype)) + ) + return "TiffFrame %i @%i %s" % (self.index, self.offset, info) + + +class TiffTag(object): + """TIFF tag structure. + + Attributes + ---------- + name : string + Name of tag. + code : int + Decimal code of tag. + dtype : str + Datatype of tag data. One of TIFF DATA_FORMATS. + count : int + Number of values. + value : various types + Tag data as Python object. + ImageSourceData : int + Location of value in file. + + All attributes are read-only. + + """ + + __slots__ = ("code", "count", "dtype", "value", "valueoffset") + + class Error(Exception): + pass + + def __init__(self, parent, tagheader, **kwargs): + """Initialize instance from tag header.""" + fh = parent.filehandle + byteorder = parent.byteorder + unpack = struct.unpack + offsetsize = parent.offsetsize + + self.valueoffset = fh.tell() + offsetsize + 4 + code, type_ = unpack(parent.tagformat1, tagheader[:4]) + count, value = unpack(parent.tagformat2, tagheader[4:]) + + try: + dtype = TIFF.DATA_FORMATS[type_] + except KeyError: + raise TiffTag.Error("unknown tag data type %i" % type_) + + fmt = "%s%i%s" % (byteorder, count * int(dtype[0]), dtype[1]) + size = struct.calcsize(fmt) + if size > offsetsize or code in TIFF.TAG_READERS: + self.valueoffset = offset = unpack(parent.offsetformat, value)[0] + if offset < 8 or offset > fh.size - size: + raise TiffTag.Error("invalid tag value offset") + # if offset % 2: + # warnings.warn('tag value does not begin on word boundary') + fh.seek(offset) + if code in TIFF.TAG_READERS: + readfunc = TIFF.TAG_READERS[code] + value = readfunc(fh, byteorder, dtype, count, offsetsize) + elif type_ == 7 or (count > 1 and dtype[-1] == "B"): + value = read_bytes(fh, byteorder, dtype, count, offsetsize) + elif code in TIFF.TAGS or dtype[-1] == "s": + value = unpack(fmt, fh.read(size)) + else: + value = read_numpy(fh, byteorder, dtype, count, offsetsize) + elif dtype[-1] == "B" or type_ == 7: + value = value[:size] + else: + value = unpack(fmt, value[:size]) + + process = ( + code not in TIFF.TAG_READERS and code not in TIFF.TAG_TUPLE and type_ != 7 + ) + if process and dtype[-1] == "s" and isinstance(value[0], bytes): + # TIFF ASCII fields can contain multiple strings, + # each terminated with a NUL + value = value[0] + try: + value = bytes2str(stripascii(value).strip()) + except UnicodeDecodeError: + warnings.warn("tag %i: coercing invalid ASCII to bytes" % code) + dtype = "1B" + else: + if code in TIFF.TAG_ENUM: + t = TIFF.TAG_ENUM[code] + try: + value = tuple(t(v) for v in value) + except ValueError as e: + warnings.warn(str(e)) + if process: + if len(value) == 1: + value = value[0] + + self.code = code + self.dtype = dtype + self.count = count + self.value = value + + @property + def name(self): + return TIFF.TAGS.get(self.code, str(self.code)) + + def _fix_lsm_bitspersample(self, parent): + """Correct LSM bitspersample tag. + + Old LSM writers may use a separate region for two 16-bit values, + although they fit into the tag value element of the tag. + + """ + if self.code == 258 and self.count == 2: + # TODO: test this case; need example file + warnings.warn("correcting LSM bitspersample tag") + tof = parent.offsetformat[parent.offsetsize] + self.valueoffset = struct.unpack(tof, self._value)[0] + parent.filehandle.seek(self.valueoffset) + self.value = struct.unpack(">> # read image stack from sequence of TIFF files + >>> imsave('temp_C001T001.tif', numpy.random.rand(64, 64)) + >>> imsave('temp_C001T002.tif', numpy.random.rand(64, 64)) + >>> tifs = TiffSequence('temp_C001*.tif') + >>> tifs.shape + (1, 2) + >>> tifs.axes + 'CT' + >>> data = tifs.asarray() + >>> data.shape + (1, 2, 64, 64) + + """ + + _patterns = { + "axes": r""" + # matches Olympus OIF and Leica TIFF series + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4})) + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + """ + } + + class ParseError(Exception): + pass + + def __init__(self, files, imread=TiffFile, pattern="axes", *args, **kwargs): + """Initialize instance from multiple files. + + Parameters + ---------- + files : str, pathlib.Path, or sequence thereof + Glob pattern or sequence of file names. + Binary streams are not supported. + imread : function or class + Image read function or class with asarray function returning numpy + array from single file. + pattern : str + Regular expression pattern that matches axes names and sequence + indices in file names. + By default, the pattern matches Olympus OIF and Leica TIFF series. + + """ + if isinstance(files, pathlib.Path): + files = str(files) + if isinstance(files, basestring): + files = natural_sorted(glob.glob(files)) + files = list(files) + if not files: + raise ValueError("no files found") + if isinstance(files[0], pathlib.Path): + files = [str(pathlib.Path(f)) for f in files] + elif not isinstance(files[0], basestring): + raise ValueError("not a file name") + self.files = files + + if hasattr(imread, "asarray"): + # redefine imread + _imread = imread + + def imread(fname, *args, **kwargs): + with _imread(fname) as im: + return im.asarray(*args, **kwargs) + + self.imread = imread + + self.pattern = self._patterns.get(pattern, pattern) + try: + self._parse() + if not self.axes: + self.axes = "I" + except self.ParseError: + self.axes = "I" + self.shape = (len(files),) + self._startindex = (0,) + self._indices = tuple((i,) for i in range(len(files))) + + def __str__(self): + """Return string with information about image sequence.""" + return "\n".join( + [ + self.files[0], + " size: %i" % len(self.files), + " axes: %s" % self.axes, + " shape: %s" % str(self.shape), + ] + ) + + def __len__(self): + return len(self.files) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + pass + + def asarray(self, out=None, *args, **kwargs): + """Read image data from all files and return as numpy array. + + The args and kwargs parameters are passed to the imread function. + + Raise IndexError or ValueError if image shapes do not match. + + """ + im = self.imread(self.files[0], *args, **kwargs) + shape = self.shape + im.shape + result = create_output(out, shape, dtype=im.dtype) + result = result.reshape(-1, *im.shape) + for index, fname in zip(self._indices, self.files): + index = [i - j for i, j in zip(index, self._startindex)] + index = numpy.ravel_multi_index(index, self.shape) + im = self.imread(fname, *args, **kwargs) + result[index] = im + result.shape = shape + return result + + def _parse(self): + """Get axes and shape from file names.""" + if not self.pattern: + raise self.ParseError("invalid pattern") + pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE) + matches = pattern.findall(self.files[0]) + if not matches: + raise self.ParseError("pattern does not match file names") + matches = matches[-1] + if len(matches) % 2: + raise self.ParseError("pattern does not match axis name and index") + axes = "".join(m for m in matches[::2] if m) + if not axes: + raise self.ParseError("pattern does not match file names") + + indices = [] + for fname in self.files: + matches = pattern.findall(fname)[-1] + if axes != "".join(m for m in matches[::2] if m): + raise ValueError("axes do not match within the image sequence") + indices.append([int(m) for m in matches[1::2] if m]) + shape = tuple(numpy.max(indices, axis=0)) + startindex = tuple(numpy.min(indices, axis=0)) + shape = tuple(i - j + 1 for i, j in zip(shape, startindex)) + if product(shape) != len(self.files): + warnings.warn("files are missing. Missing data are zeroed") + + self.axes = axes.upper() + self.shape = shape + self._indices = indices + self._startindex = startindex + + +class FileHandle(object): + """Binary file handle. + + A limited, special purpose file handler that can: + + * handle embedded files (for CZI within CZI files) + * re-open closed files (for multi-file formats, such as OME-TIFF) + * read and write numpy arrays and records from file like objects + + Only 'rb' and 'wb' modes are supported. Concurrently reading and writing + of the same stream is untested. + + When initialized from another file handle, do not use it unless this + FileHandle is closed. + + Attributes + ---------- + name : str + Name of the file. + path : str + Absolute path to file. + size : int + Size of file in bytes. + is_file : bool + If True, file has a filno and can be memory-mapped. + + All attributes are read-only. + + """ + + __slots__ = ( + "_fh", + "_file", + "_mode", + "_name", + "_dir", + "_lock", + "_offset", + "_size", + "_close", + "is_file", + ) + + def __init__(self, file, mode="rb", name=None, offset=None, size=None): + """Initialize file handle from file name or another file handle. + + Parameters + ---------- + file : str, pathlib.Path, binary stream, or FileHandle + File name or seekable binary stream, such as an open file + or BytesIO. + mode : str + File open mode in case 'file' is a file name. Must be 'rb' or 'wb'. + name : str + Optional name of file in case 'file' is a binary stream. + offset : int + Optional start position of embedded file. By default, this is + the current file position. + size : int + Optional size of embedded file. By default, this is the number + of bytes from the 'offset' to the end of the file. + + """ + self._file = file + self._fh = None + self._mode = mode + self._name = name + self._dir = "" + self._offset = offset + self._size = size + self._close = True + self.is_file = False + self._lock = NullContext() + self.open() + + def open(self): + """Open or re-open file.""" + if self._fh: + return # file is open + + if isinstance(self._file, pathlib.Path): + self._file = str(self._file) + if isinstance(self._file, basestring): + # file name + self._file = os.path.realpath(self._file) + self._dir, self._name = os.path.split(self._file) + self._fh = open(self._file, self._mode) + self._close = True + if self._offset is None: + self._offset = 0 + elif isinstance(self._file, FileHandle): + # FileHandle + self._fh = self._file._fh + if self._offset is None: + self._offset = 0 + self._offset += self._file._offset + self._close = False + if not self._name: + if self._offset: + name, ext = os.path.splitext(self._file._name) + self._name = "%s@%i%s" % (name, self._offset, ext) + else: + self._name = self._file._name + if self._mode and self._mode != self._file._mode: + raise ValueError("FileHandle has wrong mode") + self._mode = self._file._mode + self._dir = self._file._dir + elif hasattr(self._file, "seek"): + # binary stream: open file, BytesIO + try: + self._file.tell() + except Exception: + raise ValueError("binary stream is not seekable") + self._fh = self._file + if self._offset is None: + self._offset = self._file.tell() + self._close = False + if not self._name: + try: + self._dir, self._name = os.path.split(self._fh.name) + except AttributeError: + self._name = "Unnamed binary stream" + try: + self._mode = self._fh.mode + except AttributeError: + pass + else: + raise ValueError( + "The first parameter must be a file name, " + "seekable binary stream, or FileHandle" + ) + + if self._offset: + self._fh.seek(self._offset) + + if self._size is None: + pos = self._fh.tell() + self._fh.seek(self._offset, 2) + self._size = self._fh.tell() + self._fh.seek(pos) + + try: + self._fh.fileno() + self.is_file = True + except Exception: + self.is_file = False + + def read(self, size=-1): + """Read 'size' bytes from file, or until EOF is reached.""" + if size < 0 and self._offset: + size = self._size + return self._fh.read(size) + + def write(self, bytestring): + """Write bytestring to file.""" + return self._fh.write(bytestring) + + def flush(self): + """Flush write buffers if applicable.""" + return self._fh.flush() + + def memmap_array(self, dtype, shape, offset=0, mode="r", order="C"): + """Return numpy.memmap of data stored in file.""" + if not self.is_file: + raise ValueError("Cannot memory-map file without fileno") + return numpy.memmap( + self._fh, + dtype=dtype, + mode=mode, + offset=self._offset + offset, + shape=shape, + order=order, + ) + + def read_array( + self, dtype, count=-1, sep="", chunksize=2**25, out=None, native=False + ): + """Return numpy array from file. + + Work around numpy issue #2230, "numpy.fromfile does not accept + StringIO object" https://github.com/numpy/numpy/issues/2230. + + """ + fh = self._fh + dtype = numpy.dtype(dtype) + size = self._size if count < 0 else count * dtype.itemsize + + if out is None: + try: + result = numpy.fromfile(fh, dtype, count, sep) + except IOError: + # ByteIO + data = fh.read(size) + result = numpy.frombuffer(data, dtype, count).copy() + if native and not result.dtype.isnative: + # swap byte order and dtype without copy + result.byteswap(True) + result = result.newbyteorder() + return result + + # Read data from file in chunks and copy to output array + shape = out.shape + size = min(out.nbytes, size) + out = out.reshape(-1) + index = 0 + while size > 0: + data = fh.read(min(chunksize, size)) + datasize = len(data) + if datasize == 0: + break + size -= datasize + data = numpy.frombuffer(data, dtype) + out[index : index + data.size] = data + index += data.size + + if hasattr(out, "flush"): + out.flush() + return out.reshape(shape) + + def read_record(self, dtype, shape=1, byteorder=None): + """Return numpy record from file.""" + rec = numpy.rec + try: + record = rec.fromfile(self._fh, dtype, shape, byteorder=byteorder) + except Exception: + dtype = numpy.dtype(dtype) + if shape is None: + shape = self._size // dtype.itemsize + size = product(sequence(shape)) * dtype.itemsize + data = self._fh.read(size) + record = rec.fromstring(data, dtype, shape, byteorder=byteorder) + return record[0] if shape == 1 else record + + def write_empty(self, size): + """Append size bytes to file. Position must be at end of file.""" + if size < 1: + return + self._fh.seek(size - 1, 1) + self._fh.write(b"\x00") + + def write_array(self, data): + """Write numpy array to binary file.""" + try: + data.tofile(self._fh) + except Exception: + # BytesIO + self._fh.write(data.tostring()) + + def tell(self): + """Return file's current position.""" + return self._fh.tell() - self._offset + + def seek(self, offset, whence=0): + """Set file's current position.""" + if self._offset: + if whence == 0: + self._fh.seek(self._offset + offset, whence) + return + elif whence == 2 and self._size > 0: + self._fh.seek(self._offset + self._size + offset, 0) + return + self._fh.seek(offset, whence) + + def close(self): + """Close file.""" + if self._close and self._fh: + self._fh.close() + self._fh = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def __getattr__(self, name): + """Return attribute from underlying file object.""" + if self._offset: + warnings.warn("FileHandle: '%s' not implemented for embedded files" % name) + return getattr(self._fh, name) + + @property + def name(self): + return self._name + + @property + def dirname(self): + return self._dir + + @property + def path(self): + return os.path.join(self._dir, self._name) + + @property + def size(self): + return self._size + + @property + def closed(self): + return self._fh is None + + @property + def lock(self): + return self._lock + + @lock.setter + def lock(self, value): + self._lock = threading.RLock() if value else NullContext() + + +class NullContext(object): + """Null context manager. + + >>> with NullContext(): + ... pass + + """ + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + +class OpenFileCache(object): + """Keep files open.""" + + __slots__ = ("files", "past", "lock", "size") + + def __init__(self, size, lock=None): + """Initialize open file cache.""" + self.past = [] # FIFO of opened files + self.files = {} # refcounts of opened files + self.lock = NullContext() if lock is None else lock + self.size = int(size) + + def open(self, filehandle): + """Re-open file if necessary.""" + with self.lock: + if filehandle in self.files: + self.files[filehandle] += 1 + elif filehandle.closed: + filehandle.open() + self.files[filehandle] = 1 + self.past.append(filehandle) + + def close(self, filehandle): + """Close opened file if no longer used.""" + with self.lock: + if filehandle in self.files: + self.files[filehandle] -= 1 + # trim the file cache + index = 0 + size = len(self.past) + while size > self.size and index < size: + filehandle = self.past[index] + if self.files[filehandle] == 0: + filehandle.close() + del self.files[filehandle] + del self.past[index] + size -= 1 + else: + index += 1 + + def clear(self): + """Close all opened files if not in use.""" + with self.lock: + for filehandle, refcount in list(self.files.items()): + if refcount == 0: + filehandle.close() + del self.files[filehandle] + del self.past[self.past.index(filehandle)] + + +class LazyConst(object): + """Class whose attributes are computed on first access from its methods.""" + + def __init__(self, cls): + self._cls = cls + self.__doc__ = getattr(cls, "__doc__") + + def __getattr__(self, name): + func = getattr(self._cls, name) + if not callable(func): + return func + try: + value = func() + except TypeError: + # Python 2 unbound method + value = func.__func__() + setattr(self, name, value) + return value + + +@LazyConst +class TIFF(object): + """Namespace for module constants.""" + + def TAGS(): + # TIFF tag codes and names from TIFF6, TIFF/EP, EXIF, and other specs + return { + 11: "ProcessingSoftware", + 254: "NewSubfileType", + 255: "SubfileType", + 256: "ImageWidth", + 257: "ImageLength", + 258: "BitsPerSample", + 259: "Compression", + 262: "PhotometricInterpretation", + 263: "Thresholding", + 264: "CellWidth", + 265: "CellLength", + 266: "FillOrder", + 269: "DocumentName", + 270: "ImageDescription", + 271: "Make", + 272: "Model", + 273: "StripOffsets", + 274: "Orientation", + 277: "SamplesPerPixel", + 278: "RowsPerStrip", + 279: "StripByteCounts", + 280: "MinSampleValue", + 281: "MaxSampleValue", + 282: "XResolution", + 283: "YResolution", + 284: "PlanarConfiguration", + 285: "PageName", + 286: "XPosition", + 287: "YPosition", + 288: "FreeOffsets", + 289: "FreeByteCounts", + 290: "GrayResponseUnit", + 291: "GrayResponseCurve", + 292: "T4Options", + 293: "T6Options", + 296: "ResolutionUnit", + 297: "PageNumber", + 300: "ColorResponseUnit", + 301: "TransferFunction", + 305: "Software", + 306: "DateTime", + 315: "Artist", + 316: "HostComputer", + 317: "Predictor", + 318: "WhitePoint", + 319: "PrimaryChromaticities", + 320: "ColorMap", + 321: "HalftoneHints", + 322: "TileWidth", + 323: "TileLength", + 324: "TileOffsets", + 325: "TileByteCounts", + 326: "BadFaxLines", + 327: "CleanFaxData", + 328: "ConsecutiveBadFaxLines", + 330: "SubIFDs", + 332: "InkSet", + 333: "InkNames", + 334: "NumberOfInks", + 336: "DotRange", + 337: "TargetPrinter", + 338: "ExtraSamples", + 339: "SampleFormat", + 340: "SMinSampleValue", + 341: "SMaxSampleValue", + 342: "TransferRange", + 343: "ClipPath", + 344: "XClipPathUnits", + 345: "YClipPathUnits", + 346: "Indexed", + 347: "JPEGTables", + 351: "OPIProxy", + 400: "GlobalParametersIFD", + 401: "ProfileType", + 402: "FaxProfile", + 403: "CodingMethods", + 404: "VersionYear", + 405: "ModeNumber", + 433: "Decode", + 434: "DefaultImageColor", + 435: "T82Options", + 437: "JPEGTables_", # 347 + 512: "JPEGProc", + 513: "JPEGInterchangeFormat", + 514: "JPEGInterchangeFormatLength", + 515: "JPEGRestartInterval", + 517: "JPEGLosslessPredictors", + 518: "JPEGPointTransforms", + 519: "JPEGQTables", + 520: "JPEGDCTables", + 521: "JPEGACTables", + 529: "YCbCrCoefficients", + 530: "YCbCrSubSampling", + 531: "YCbCrPositioning", + 532: "ReferenceBlackWhite", + 559: "StripRowCounts", + 700: "XMP", # XMLPacket + 769: "GDIGamma", # GDI+ + 770: "ICCProfileDescriptor", # GDI+ + 771: "SRGBRenderingIntent", # GDI+ + 800: "ImageTitle", # GDI+ + 999: "USPTO_Miscellaneous", + 4864: "AndorId", # TODO: Andor Technology 4864 - 5030 + 4869: "AndorTemperature", + 4876: "AndorExposureTime", + 4878: "AndorKineticCycleTime", + 4879: "AndorAccumulations", + 4881: "AndorAcquisitionCycleTime", + 4882: "AndorReadoutTime", + 4884: "AndorPhotonCounting", + 4885: "AndorEmDacLevel", + 4890: "AndorFrames", + 4896: "AndorHorizontalFlip", + 4897: "AndorVerticalFlip", + 4898: "AndorClockwise", + 4899: "AndorCounterClockwise", + 4904: "AndorVerticalClockVoltage", + 4905: "AndorVerticalShiftSpeed", + 4907: "AndorPreAmpSetting", + 4908: "AndorCameraSerial", + 4911: "AndorActualTemperature", + 4912: "AndorBaselineClamp", + 4913: "AndorPrescans", + 4914: "AndorModel", + 4915: "AndorChipSizeX", + 4916: "AndorChipSizeY", + 4944: "AndorBaselineOffset", + 4966: "AndorSoftwareVersion", + 18246: "Rating", + 18247: "XP_DIP_XML", + 18248: "StitchInfo", + 18249: "RatingPercent", + 20481: "ResolutionXUnit", # GDI+ + 20482: "ResolutionYUnit", # GDI+ + 20483: "ResolutionXLengthUnit", # GDI+ + 20484: "ResolutionYLengthUnit", # GDI+ + 20485: "PrintFlags", # GDI+ + 20486: "PrintFlagsVersion", # GDI+ + 20487: "PrintFlagsCrop", # GDI+ + 20488: "PrintFlagsBleedWidth", # GDI+ + 20489: "PrintFlagsBleedWidthScale", # GDI+ + 20490: "HalftoneLPI", # GDI+ + 20491: "HalftoneLPIUnit", # GDI+ + 20492: "HalftoneDegree", # GDI+ + 20493: "HalftoneShape", # GDI+ + 20494: "HalftoneMisc", # GDI+ + 20495: "HalftoneScreen", # GDI+ + 20496: "JPEGQuality", # GDI+ + 20497: "GridSize", # GDI+ + 20498: "ThumbnailFormat", # GDI+ + 20499: "ThumbnailWidth", # GDI+ + 20500: "ThumbnailHeight", # GDI+ + 20501: "ThumbnailColorDepth", # GDI+ + 20502: "ThumbnailPlanes", # GDI+ + 20503: "ThumbnailRawBytes", # GDI+ + 20504: "ThumbnailSize", # GDI+ + 20505: "ThumbnailCompressedSize", # GDI+ + 20506: "ColorTransferFunction", # GDI+ + 20507: "ThumbnailData", + 20512: "ThumbnailImageWidth", # GDI+ + 20513: "ThumbnailImageHeight", # GDI+ + 20514: "ThumbnailBitsPerSample", # GDI+ + 20515: "ThumbnailCompression", + 20516: "ThumbnailPhotometricInterp", # GDI+ + 20517: "ThumbnailImageDescription", # GDI+ + 20518: "ThumbnailEquipMake", # GDI+ + 20519: "ThumbnailEquipModel", # GDI+ + 20520: "ThumbnailStripOffsets", # GDI+ + 20521: "ThumbnailOrientation", # GDI+ + 20522: "ThumbnailSamplesPerPixel", # GDI+ + 20523: "ThumbnailRowsPerStrip", # GDI+ + 20524: "ThumbnailStripBytesCount", # GDI+ + 20525: "ThumbnailResolutionX", + 20526: "ThumbnailResolutionY", + 20527: "ThumbnailPlanarConfig", # GDI+ + 20528: "ThumbnailResolutionUnit", + 20529: "ThumbnailTransferFunction", + 20530: "ThumbnailSoftwareUsed", # GDI+ + 20531: "ThumbnailDateTime", # GDI+ + 20532: "ThumbnailArtist", # GDI+ + 20533: "ThumbnailWhitePoint", # GDI+ + 20534: "ThumbnailPrimaryChromaticities", # GDI+ + 20535: "ThumbnailYCbCrCoefficients", # GDI+ + 20536: "ThumbnailYCbCrSubsampling", # GDI+ + 20537: "ThumbnailYCbCrPositioning", + 20538: "ThumbnailRefBlackWhite", # GDI+ + 20539: "ThumbnailCopyRight", # GDI+ + 20545: "InteroperabilityIndex", + 20546: "InteroperabilityVersion", + 20624: "LuminanceTable", + 20625: "ChrominanceTable", + 20736: "FrameDelay", # GDI+ + 20737: "LoopCount", # GDI+ + 20738: "GlobalPalette", # GDI+ + 20739: "IndexBackground", # GDI+ + 20740: "IndexTransparent", # GDI+ + 20752: "PixelUnit", # GDI+ + 20753: "PixelPerUnitX", # GDI+ + 20754: "PixelPerUnitY", # GDI+ + 20755: "PaletteHistogram", # GDI+ + 28672: "SonyRawFileType", # Sony ARW + 28722: "VignettingCorrParams", # Sony ARW + 28725: "ChromaticAberrationCorrParams", # Sony ARW + 28727: "DistortionCorrParams", # Sony ARW + # Private tags >= 32768 + 32781: "ImageID", + 32931: "WangTag1", + 32932: "WangAnnotation", + 32933: "WangTag3", + 32934: "WangTag4", + 32953: "ImageReferencePoints", + 32954: "RegionXformTackPoint", + 32955: "WarpQuadrilateral", + 32956: "AffineTransformMat", + 32995: "Matteing", + 32996: "DataType", + 32997: "ImageDepth", + 32998: "TileDepth", + 33300: "ImageFullWidth", + 33301: "ImageFullLength", + 33302: "TextureFormat", + 33303: "TextureWrapModes", + 33304: "FieldOfViewCotangent", + 33305: "MatrixWorldToScreen", + 33306: "MatrixWorldToCamera", + 33405: "Model2", + 33421: "CFARepeatPatternDim", + 33422: "CFAPattern", + 33423: "BatteryLevel", + 33424: "KodakIFD", + 33434: "ExposureTime", + 33437: "FNumber", + 33432: "Copyright", + 33445: "MDFileTag", + 33446: "MDScalePixel", + 33447: "MDColorTable", + 33448: "MDLabName", + 33449: "MDSampleInfo", + 33450: "MDPrepDate", + 33451: "MDPrepTime", + 33452: "MDFileUnits", + 33550: "ModelPixelScaleTag", + 33589: "AdventScale", + 33590: "AdventRevision", + 33628: "UIC1tag", # Metamorph Universal Imaging Corp STK + 33629: "UIC2tag", + 33630: "UIC3tag", + 33631: "UIC4tag", + 33723: "IPTCNAA", + 33858: "ExtendedTagsOffset", # DEFF points IFD with private tags + 33918: "IntergraphPacketData", # INGRPacketDataTag + 33919: "IntergraphFlagRegisters", # INGRFlagRegisters + 33920: "IntergraphMatrixTag", # IrasBTransformationMatrix + 33921: "INGRReserved", + 33922: "ModelTiepointTag", + 33923: "LeicaMagic", + 34016: "Site", + 34017: "ColorSequence", + 34018: "IT8Header", + 34019: "RasterPadding", + 34020: "BitsPerRunLength", + 34021: "BitsPerExtendedRunLength", + 34022: "ColorTable", + 34023: "ImageColorIndicator", + 34024: "BackgroundColorIndicator", + 34025: "ImageColorValue", + 34026: "BackgroundColorValue", + 34027: "PixelIntensityRange", + 34028: "TransparencyIndicator", + 34029: "ColorCharacterization", + 34030: "HCUsage", + 34031: "TrapIndicator", + 34032: "CMYKEquivalent", + 34118: "CZ_SEM", # Zeiss SEM + 34152: "AFCP_IPTC", + 34232: "PixelMagicJBIGOptions", + 34263: "JPLCartoIFD", + 34122: "IPLAB", # number of images + 34264: "ModelTransformationTag", + 34306: "WB_GRGBLevels", # Leaf MOS + 34310: "LeafData", + 34361: "MM_Header", + 34362: "MM_Stamp", + 34363: "MM_Unknown", + 34377: "ImageResources", # Photoshop + 34386: "MM_UserBlock", + 34412: "CZ_LSMINFO", + 34665: "ExifTag", + 34675: "InterColorProfile", # ICCProfile + 34680: "FEI_SFEG", # + 34682: "FEI_HELIOS", # + 34683: "FEI_TITAN", # + 34687: "FXExtensions", + 34688: "MultiProfiles", + 34689: "SharedData", + 34690: "T88Options", + 34710: "MarCCD", # offset to MarCCD header + 34732: "ImageLayer", + 34735: "GeoKeyDirectoryTag", + 34736: "GeoDoubleParamsTag", + 34737: "GeoAsciiParamsTag", + 34750: "JBIGOptions", + 34821: "PIXTIFF", # ? Pixel Translations Inc + 34850: "ExposureProgram", + 34852: "SpectralSensitivity", + 34853: "GPSTag", # GPSIFD + 34855: "ISOSpeedRatings", + 34856: "OECF", + 34857: "Interlace", + 34858: "TimeZoneOffset", + 34859: "SelfTimerMode", + 34864: "SensitivityType", + 34865: "StandardOutputSensitivity", + 34866: "RecommendedExposureIndex", + 34867: "ISOSpeed", + 34868: "ISOSpeedLatitudeyyy", + 34869: "ISOSpeedLatitudezzz", + 34908: "HylaFAXFaxRecvParams", + 34909: "HylaFAXFaxSubAddress", + 34910: "HylaFAXFaxRecvTime", + 34911: "FaxDcs", + 34929: "FedexEDR", + 34954: "LeafSubIFD", + 34959: "Aphelion1", + 34960: "Aphelion2", + 34961: "AphelionInternal", # ADCIS + 36864: "ExifVersion", + 36867: "DateTimeOriginal", + 36868: "DateTimeDigitized", + 36873: "GooglePlusUploadCode", + 36880: "OffsetTime", + 36881: "OffsetTimeOriginal", + 36882: "OffsetTimeDigitized", + # TODO: Pilatus/CHESS/TV6 36864..37120 conflicting with Exif tags + # 36864: 'TVX ?', + # 36865: 'TVX_NumExposure', + # 36866: 'TVX_NumBackground', + # 36867: 'TVX_ExposureTime', + # 36868: 'TVX_BackgroundTime', + # 36870: 'TVX ?', + # 36873: 'TVX_SubBpp', + # 36874: 'TVX_SubWide', + # 36875: 'TVX_SubHigh', + # 36876: 'TVX_BlackLevel', + # 36877: 'TVX_DarkCurrent', + # 36878: 'TVX_ReadNoise', + # 36879: 'TVX_DarkCurrentNoise', + # 36880: 'TVX_BeamMonitor', + # 37120: 'TVX_UserVariables', # A/D values + 37121: "ComponentsConfiguration", + 37122: "CompressedBitsPerPixel", + 37377: "ShutterSpeedValue", + 37378: "ApertureValue", + 37379: "BrightnessValue", + 37380: "ExposureBiasValue", + 37381: "MaxApertureValue", + 37382: "SubjectDistance", + 37383: "MeteringMode", + 37384: "LightSource", + 37385: "Flash", + 37386: "FocalLength", + 37387: "FlashEnergy_", # 37387 + 37388: "SpatialFrequencyResponse_", # 37388 + 37389: "Noise", + 37390: "FocalPlaneXResolution", + 37391: "FocalPlaneYResolution", + 37392: "FocalPlaneResolutionUnit", + 37393: "ImageNumber", + 37394: "SecurityClassification", + 37395: "ImageHistory", + 37396: "SubjectLocation", + 37397: "ExposureIndex", + 37398: "TIFFEPStandardID", + 37399: "SensingMethod", + 37434: "CIP3DataFile", + 37435: "CIP3Sheet", + 37436: "CIP3Side", + 37439: "StoNits", + 37500: "MakerNote", + 37510: "UserComment", + 37520: "SubsecTime", + 37521: "SubsecTimeOriginal", + 37522: "SubsecTimeDigitized", + 37679: "MODIText", # Microsoft Office Document Imaging + 37680: "MODIOLEPropertySetStorage", + 37681: "MODIPositioning", + 37706: "TVIPS", # offset to TemData structure + 37707: "TVIPS1", + 37708: "TVIPS2", # same TemData structure as undefined + 37724: "ImageSourceData", # Photoshop + 37888: "Temperature", + 37889: "Humidity", + 37890: "Pressure", + 37891: "WaterDepth", + 37892: "Acceleration", + 37893: "CameraElevationAngle", + 40001: "MC_IpWinScal", # Media Cybernetics + 40100: "MC_IdOld", + 40965: "InteroperabilityTag", # InteropOffset + 40091: "XPTitle", + 40092: "XPComment", + 40093: "XPAuthor", + 40094: "XPKeywords", + 40095: "XPSubject", + 40960: "FlashpixVersion", + 40961: "ColorSpace", + 40962: "PixelXDimension", + 40963: "PixelYDimension", + 40964: "RelatedSoundFile", + 40976: "SamsungRawPointersOffset", + 40977: "SamsungRawPointersLength", + 41217: "SamsungRawByteOrder", + 41218: "SamsungRawUnknown", + 41483: "FlashEnergy", + 41484: "SpatialFrequencyResponse", + 41485: "Noise_", # 37389 + 41486: "FocalPlaneXResolution_", # 37390 + 41487: "FocalPlaneYResolution_", # 37391 + 41488: "FocalPlaneResolutionUnit_", # 37392 + 41489: "ImageNumber_", # 37393 + 41490: "SecurityClassification_", # 37394 + 41491: "ImageHistory_", # 37395 + 41492: "SubjectLocation_", # 37395 + 41493: "ExposureIndex_ ", # 37397 + 41494: "TIFF-EPStandardID", + 41495: "SensingMethod_", # 37399 + 41728: "FileSource", + 41729: "SceneType", + 41730: "CFAPattern_", # 33422 + 41985: "CustomRendered", + 41986: "ExposureMode", + 41987: "WhiteBalance", + 41988: "DigitalZoomRatio", + 41989: "FocalLengthIn35mmFilm", + 41990: "SceneCaptureType", + 41991: "GainControl", + 41992: "Contrast", + 41993: "Saturation", + 41994: "Sharpness", + 41995: "DeviceSettingDescription", + 41996: "SubjectDistanceRange", + 42016: "ImageUniqueID", + 42032: "CameraOwnerName", + 42033: "BodySerialNumber", + 42034: "LensSpecification", + 42035: "LensMake", + 42036: "LensModel", + 42037: "LensSerialNumber", + 42112: "GDAL_METADATA", + 42113: "GDAL_NODATA", + 42240: "Gamma", + 43314: "NIHImageHeader", + 44992: "ExpandSoftware", + 44993: "ExpandLens", + 44994: "ExpandFilm", + 44995: "ExpandFilterLens", + 44996: "ExpandScanner", + 44997: "ExpandFlashLamp", + 48129: "PixelFormat", # HDP and WDP + 48130: "Transformation", + 48131: "Uncompressed", + 48132: "ImageType", + 48256: "ImageWidth_", # 256 + 48257: "ImageHeight_", + 48258: "WidthResolution", + 48259: "HeightResolution", + 48320: "ImageOffset", + 48321: "ImageByteCount", + 48322: "AlphaOffset", + 48323: "AlphaByteCount", + 48324: "ImageDataDiscard", + 48325: "AlphaDataDiscard", + 50215: "OceScanjobDescription", + 50216: "OceApplicationSelector", + 50217: "OceIdentificationNumber", + 50218: "OceImageLogicCharacteristics", + 50255: "Annotations", + 50288: "MC_Id", # Media Cybernetics + 50289: "MC_XYPosition", + 50290: "MC_ZPosition", + 50291: "MC_XYCalibration", + 50292: "MC_LensCharacteristics", + 50293: "MC_ChannelName", + 50294: "MC_ExcitationWavelength", + 50295: "MC_TimeStamp", + 50296: "MC_FrameProperties", + 50341: "PrintImageMatching", + 50495: "PCO_RAW", # TODO: PCO CamWare + 50547: "OriginalFileName", + 50560: "USPTO_OriginalContentType", # US Patent Office + 50561: "USPTO_RotationCode", + 50656: "CR2CFAPattern", + 50706: "DNGVersion", # DNG 50706 .. 51112 + 50707: "DNGBackwardVersion", + 50708: "UniqueCameraModel", + 50709: "LocalizedCameraModel", + 50710: "CFAPlaneColor", + 50711: "CFALayout", + 50712: "LinearizationTable", + 50713: "BlackLevelRepeatDim", + 50714: "BlackLevel", + 50715: "BlackLevelDeltaH", + 50716: "BlackLevelDeltaV", + 50717: "WhiteLevel", + 50718: "DefaultScale", + 50719: "DefaultCropOrigin", + 50720: "DefaultCropSize", + 50721: "ColorMatrix1", + 50722: "ColorMatrix2", + 50723: "CameraCalibration1", + 50724: "CameraCalibration2", + 50725: "ReductionMatrix1", + 50726: "ReductionMatrix2", + 50727: "AnalogBalance", + 50728: "AsShotNeutral", + 50729: "AsShotWhiteXY", + 50730: "BaselineExposure", + 50731: "BaselineNoise", + 50732: "BaselineSharpness", + 50733: "BayerGreenSplit", + 50734: "LinearResponseLimit", + 50735: "CameraSerialNumber", + 50736: "LensInfo", + 50737: "ChromaBlurRadius", + 50738: "AntiAliasStrength", + 50739: "ShadowScale", + 50740: "DNGPrivateData", + 50741: "MakerNoteSafety", + 50752: "RawImageSegmentation", + 50778: "CalibrationIlluminant1", + 50779: "CalibrationIlluminant2", + 50780: "BestQualityScale", + 50781: "RawDataUniqueID", + 50784: "AliasLayerMetadata", + 50827: "OriginalRawFileName", + 50828: "OriginalRawFileData", + 50829: "ActiveArea", + 50830: "MaskedAreas", + 50831: "AsShotICCProfile", + 50832: "AsShotPreProfileMatrix", + 50833: "CurrentICCProfile", + 50834: "CurrentPreProfileMatrix", + 50838: "IJMetadataByteCounts", + 50839: "IJMetadata", + 50844: "RPCCoefficientTag", + 50879: "ColorimetricReference", + 50885: "SRawType", + 50898: "PanasonicTitle", + 50899: "PanasonicTitle2", + 50931: "CameraCalibrationSignature", + 50932: "ProfileCalibrationSignature", + 50933: "ProfileIFD", + 50934: "AsShotProfileName", + 50935: "NoiseReductionApplied", + 50936: "ProfileName", + 50937: "ProfileHueSatMapDims", + 50938: "ProfileHueSatMapData1", + 50939: "ProfileHueSatMapData2", + 50940: "ProfileToneCurve", + 50941: "ProfileEmbedPolicy", + 50942: "ProfileCopyright", + 50964: "ForwardMatrix1", + 50965: "ForwardMatrix2", + 50966: "PreviewApplicationName", + 50967: "PreviewApplicationVersion", + 50968: "PreviewSettingsName", + 50969: "PreviewSettingsDigest", + 50970: "PreviewColorSpace", + 50971: "PreviewDateTime", + 50972: "RawImageDigest", + 50973: "OriginalRawFileDigest", + 50974: "SubTileBlockSize", + 50975: "RowInterleaveFactor", + 50981: "ProfileLookTableDims", + 50982: "ProfileLookTableData", + 51008: "OpcodeList1", + 51009: "OpcodeList2", + 51022: "OpcodeList3", + 51023: "FibicsXML", # + 51041: "NoiseProfile", + 51043: "TimeCodes", + 51044: "FrameRate", + 51058: "TStop", + 51081: "ReelName", + 51089: "OriginalDefaultFinalSize", + 51090: "OriginalBestQualitySize", + 51091: "OriginalDefaultCropSize", + 51105: "CameraLabel", + 51107: "ProfileHueSatMapEncoding", + 51108: "ProfileLookTableEncoding", + 51109: "BaselineExposureOffset", + 51110: "DefaultBlackRender", + 51111: "NewRawImageDigest", + 51112: "RawToPreviewGain", + 51125: "DefaultUserCrop", + 51123: "MicroManagerMetadata", + 59932: "Padding", + 59933: "OffsetSchema", + # Reusable Tags 65000-65535 + # 65000: Dimap_Document XML + # 65000-65112: Photoshop Camera RAW EXIF tags + # 65000: 'OwnerName', + # 65001: 'SerialNumber', + # 65002: 'Lens', + # 65024: 'KDC_IFD', + # 65100: 'RawFile', + # 65101: 'Converter', + # 65102: 'WhiteBalance', + # 65105: 'Exposure', + # 65106: 'Shadows', + # 65107: 'Brightness', + # 65108: 'Contrast', + # 65109: 'Saturation', + # 65110: 'Sharpness', + # 65111: 'Smoothness', + # 65112: 'MoireFilter', + 65200: "FlexXML", # + 65563: "PerSample", + } + + def TAG_NAMES(): + return {v: c for c, v in TIFF.TAGS.items()} + + def TAG_READERS(): + # Map TIFF tag codes to import functions + return { + 320: read_colormap, + # 700: read_bytes, # read_utf8, + # 34377: read_bytes, + 33723: read_bytes, + # 34675: read_bytes, + 33628: read_uic1tag, # Universal Imaging Corp STK + 33629: read_uic2tag, + 33630: read_uic3tag, + 33631: read_uic4tag, + 34118: read_cz_sem, # Carl Zeiss SEM + 34361: read_mm_header, # Olympus FluoView + 34362: read_mm_stamp, + 34363: read_numpy, # MM_Unknown + 34386: read_numpy, # MM_UserBlock + 34412: read_cz_lsminfo, # Carl Zeiss LSM + 34680: read_fei_metadata, # S-FEG + 34682: read_fei_metadata, # Helios NanoLab + 37706: read_tvips_header, # TVIPS EMMENU + 37724: read_bytes, # ImageSourceData + 33923: read_bytes, # read_leica_magic + 43314: read_nih_image_header, + # 40001: read_bytes, + 40100: read_bytes, + 50288: read_bytes, + 50296: read_bytes, + 50839: read_bytes, + 51123: read_json, + 34665: read_exif_ifd, + 34853: read_gps_ifd, + 40965: read_interoperability_ifd, + } + + def TAG_TUPLE(): + # Tags whose values must be stored as tuples + return frozenset((273, 279, 324, 325, 530, 531, 34736)) + + def TAG_ATTRIBUTES(): + # Map tag codes to TiffPage attribute names + return { + "ImageWidth": "imagewidth", + "ImageLength": "imagelength", + "BitsPerSample": "bitspersample", + "Compression": "compression", + "PlanarConfiguration": "planarconfig", + "FillOrder": "fillorder", + "PhotometricInterpretation": "photometric", + "ColorMap": "colormap", + "ImageDescription": "description", + "ImageDescription1": "description1", + "SamplesPerPixel": "samplesperpixel", + "RowsPerStrip": "rowsperstrip", + "Software": "software", + "Predictor": "predictor", + "TileWidth": "tilewidth", + "TileLength": "tilelength", + "ExtraSamples": "extrasamples", + "SampleFormat": "sampleformat", + "ImageDepth": "imagedepth", + "TileDepth": "tiledepth", + } + + def TAG_ENUM(): + return { + # 254: TIFF.FILETYPE, + 255: TIFF.OFILETYPE, + 259: TIFF.COMPRESSION, + 262: TIFF.PHOTOMETRIC, + 263: TIFF.THRESHHOLD, + 266: TIFF.FILLORDER, + 274: TIFF.ORIENTATION, + 284: TIFF.PLANARCONFIG, + 290: TIFF.GRAYRESPONSEUNIT, + # 292: TIFF.GROUP3OPT, + # 293: TIFF.GROUP4OPT, + 296: TIFF.RESUNIT, + 300: TIFF.COLORRESPONSEUNIT, + 317: TIFF.PREDICTOR, + 338: TIFF.EXTRASAMPLE, + 339: TIFF.SAMPLEFORMAT, + # 512: TIFF.JPEGPROC, + # 531: TIFF.YCBCRPOSITION, + } + + def FILETYPE(): + class FILETYPE(enum.IntFlag): + # Python 3.6 only + UNDEFINED = 0 + REDUCEDIMAGE = 1 + PAGE = 2 + MASK = 4 + + return FILETYPE + + def OFILETYPE(): + class OFILETYPE(enum.IntEnum): + UNDEFINED = 0 + IMAGE = 1 + REDUCEDIMAGE = 2 + PAGE = 3 + + return OFILETYPE + + def COMPRESSION(): + class COMPRESSION(enum.IntEnum): + NONE = 1 # Uncompressed + CCITTRLE = 2 # CCITT 1D + CCITT_T4 = 3 # 'T4/Group 3 Fax', + CCITT_T6 = 4 # 'T6/Group 4 Fax', + LZW = 5 + OJPEG = 6 # old-style JPEG + JPEG = 7 + ADOBE_DEFLATE = 8 + JBIG_BW = 9 + JBIG_COLOR = 10 + JPEG_99 = 99 + KODAK_262 = 262 + NEXT = 32766 + SONY_ARW = 32767 + PACKED_RAW = 32769 + SAMSUNG_SRW = 32770 + CCIRLEW = 32771 + SAMSUNG_SRW2 = 32772 + PACKBITS = 32773 + THUNDERSCAN = 32809 + IT8CTPAD = 32895 + IT8LW = 32896 + IT8MP = 32897 + IT8BL = 32898 + PIXARFILM = 32908 + PIXARLOG = 32909 + DEFLATE = 32946 + DCS = 32947 + APERIO_JP2000_YCBC = 33003 # Leica Aperio + APERIO_JP2000_RGB = 33005 # Leica Aperio + JBIG = 34661 + SGILOG = 34676 + SGILOG24 = 34677 + JPEG2000 = 34712 + NIKON_NEF = 34713 + JBIG2 = 34715 + MDI_BINARY = 34718 # 'Microsoft Document Imaging + MDI_PROGRESSIVE = 34719 # 'Microsoft Document Imaging + MDI_VECTOR = 34720 # 'Microsoft Document Imaging + JPEG_LOSSY = 34892 + LZMA = 34925 + ZSTD = 34926 + OPS_PNG = 34933 # Objective Pathology Services + OPS_JPEGXR = 34934 # Objective Pathology Services + PIXTIFF = 50013 + KODAK_DCR = 65000 + PENTAX_PEF = 65535 + # def __bool__(self): return self != 1 # Python 3.6 only + + return COMPRESSION + + def PHOTOMETRIC(): + class PHOTOMETRIC(enum.IntEnum): + MINISWHITE = 0 + MINISBLACK = 1 + RGB = 2 + PALETTE = 3 + MASK = 4 + SEPARATED = 5 # CMYK + YCBCR = 6 + CIELAB = 8 + ICCLAB = 9 + ITULAB = 10 + CFA = 32803 # Color Filter Array + LOGL = 32844 + LOGLUV = 32845 + LINEAR_RAW = 34892 + + return PHOTOMETRIC + + def THRESHHOLD(): + class THRESHHOLD(enum.IntEnum): + BILEVEL = 1 + HALFTONE = 2 + ERRORDIFFUSE = 3 + + return THRESHHOLD + + def FILLORDER(): + class FILLORDER(enum.IntEnum): + MSB2LSB = 1 + LSB2MSB = 2 + + return FILLORDER + + def ORIENTATION(): + class ORIENTATION(enum.IntEnum): + TOPLEFT = 1 + TOPRIGHT = 2 + BOTRIGHT = 3 + BOTLEFT = 4 + LEFTTOP = 5 + RIGHTTOP = 6 + RIGHTBOT = 7 + LEFTBOT = 8 + + return ORIENTATION + + def PLANARCONFIG(): + class PLANARCONFIG(enum.IntEnum): + CONTIG = 1 + SEPARATE = 2 + + return PLANARCONFIG + + def GRAYRESPONSEUNIT(): + class GRAYRESPONSEUNIT(enum.IntEnum): + _10S = 1 + _100S = 2 + _1000S = 3 + _10000S = 4 + _100000S = 5 + + return GRAYRESPONSEUNIT + + def GROUP4OPT(): + class GROUP4OPT(enum.IntEnum): + UNCOMPRESSED = 2 + + return GROUP4OPT + + def RESUNIT(): + class RESUNIT(enum.IntEnum): + NONE = 1 + INCH = 2 + CENTIMETER = 3 + # def __bool__(self): return self != 1 # Python 3.6 only + + return RESUNIT + + def COLORRESPONSEUNIT(): + class COLORRESPONSEUNIT(enum.IntEnum): + _10S = 1 + _100S = 2 + _1000S = 3 + _10000S = 4 + _100000S = 5 + + return COLORRESPONSEUNIT + + def PREDICTOR(): + class PREDICTOR(enum.IntEnum): + NONE = 1 + HORIZONTAL = 2 + FLOATINGPOINT = 3 + # def __bool__(self): return self != 1 # Python 3.6 only + + return PREDICTOR + + def EXTRASAMPLE(): + class EXTRASAMPLE(enum.IntEnum): + UNSPECIFIED = 0 + ASSOCALPHA = 1 + UNASSALPHA = 2 + + return EXTRASAMPLE + + def SAMPLEFORMAT(): + class SAMPLEFORMAT(enum.IntEnum): + UINT = 1 + INT = 2 + IEEEFP = 3 + VOID = 4 + COMPLEXINT = 5 + COMPLEXIEEEFP = 6 + + return SAMPLEFORMAT + + def DATATYPES(): + class DATATYPES(enum.IntEnum): + NOTYPE = 0 + BYTE = 1 + ASCII = 2 + SHORT = 3 + LONG = 4 + RATIONAL = 5 + SBYTE = 6 + UNDEFINED = 7 + SSHORT = 8 + SLONG = 9 + SRATIONAL = 10 + FLOAT = 11 + DOUBLE = 12 + IFD = 13 + UNICODE = 14 + COMPLEX = 15 + LONG8 = 16 + SLONG8 = 17 + IFD8 = 18 + + return DATATYPES + + def DATA_FORMATS(): + # Map TIFF DATATYPES to Python struct formats + return { + 1: "1B", # BYTE 8-bit unsigned integer. + 2: "1s", # ASCII 8-bit byte that contains a 7-bit ASCII code; + # the last byte must be NULL (binary zero). + 3: "1H", # SHORT 16-bit (2-byte) unsigned integer + 4: "1I", # LONG 32-bit (4-byte) unsigned integer. + 5: "2I", # RATIONAL Two LONGs: the first represents the numerator + # of a fraction; the second, the denominator. + 6: "1b", # SBYTE An 8-bit signed (twos-complement) integer. + 7: "1B", # UNDEFINED An 8-bit byte that may contain anything, + # depending on the definition of the field. + 8: "1h", # SSHORT A 16-bit (2-byte) signed (twos-complement) + # integer. + 9: "1i", # SLONG A 32-bit (4-byte) signed (twos-complement) + # integer. + 10: "2i", # SRATIONAL Two SLONGs: the first represents the + # numerator of a fraction, the second the denominator. + 11: "1f", # FLOAT Single precision (4-byte) IEEE format. + 12: "1d", # DOUBLE Double precision (8-byte) IEEE format. + 13: "1I", # IFD unsigned 4 byte IFD offset. + # 14: '', # UNICODE + # 15: '', # COMPLEX + 16: "1Q", # LONG8 unsigned 8 byte integer (BigTiff) + 17: "1q", # SLONG8 signed 8 byte integer (BigTiff) + 18: "1Q", # IFD8 unsigned 8 byte IFD offset (BigTiff) + } + + def DATA_DTYPES(): + # Map numpy dtypes to TIFF DATATYPES + return { + "B": 1, + "s": 2, + "H": 3, + "I": 4, + "2I": 5, + "b": 6, + "h": 8, + "i": 9, + "2i": 10, + "f": 11, + "d": 12, + "Q": 16, + "q": 17, + } + + def SAMPLE_DTYPES(): + # Map TIFF SampleFormats and BitsPerSample to numpy dtype + return { + (1, 1): "?", # bitmap + (1, 2): "B", + (1, 3): "B", + (1, 4): "B", + (1, 5): "B", + (1, 6): "B", + (1, 7): "B", + (1, 8): "B", + (1, 9): "H", + (1, 10): "H", + (1, 11): "H", + (1, 12): "H", + (1, 13): "H", + (1, 14): "H", + (1, 15): "H", + (1, 16): "H", + (1, 17): "I", + (1, 18): "I", + (1, 19): "I", + (1, 20): "I", + (1, 21): "I", + (1, 22): "I", + (1, 23): "I", + (1, 24): "I", + (1, 25): "I", + (1, 26): "I", + (1, 27): "I", + (1, 28): "I", + (1, 29): "I", + (1, 30): "I", + (1, 31): "I", + (1, 32): "I", + (1, 64): "Q", + (2, 8): "b", + (2, 16): "h", + (2, 32): "i", + (2, 64): "q", + (3, 16): "e", + (3, 32): "f", + (3, 64): "d", + (6, 64): "F", + (6, 128): "D", + (1, (5, 6, 5)): "B", + } + + def COMPESSORS(): + # Map COMPRESSION to compress functions and default compression levels + + class Compressors(object): + """Delay import compressor functions.""" + + def __init__(self): + self._compressors = {8: (zlib.compress, 6), 32946: (zlib.compress, 6)} + + def __getitem__(self, key): + if key in self._compressors: + return self._compressors[key] + + if key == 34925: + try: + import lzma # delayed import + except ImportError: + try: + import backports.lzma as lzma # delayed import + except ImportError: + raise KeyError + + def lzma_compress(x, level): + return lzma.compress(x) + + self._compressors[key] = lzma_compress, 0 + return lzma_compress, 0 + + if key == 34926: + try: + import zstd # delayed import + except ImportError: + raise KeyError + self._compressors[key] = zstd.compress, 9 + return zstd.compress, 9 + + raise KeyError + + def __contains__(self, key): + try: + self[key] + return True + except KeyError: + return False + + return Compressors() + + def DECOMPESSORS(): + # Map COMPRESSION to decompress functions + + class Decompressors(object): + """Delay import decompressor functions.""" + + def __init__(self): + self._decompressors = { + None: identityfunc, + 1: identityfunc, + 5: decode_lzw, + 8: zlib.decompress, + 32773: decode_packbits, + 32946: zlib.decompress, + } + + def __getitem__(self, key): + if key in self._decompressors: + return self._decompressors[key] + + if key == 7: + try: + from imagecodecs import jpeg, jpeg_12 + except ImportError: + raise KeyError + + def decode_jpeg(x, table, bps, colorspace=None): + if bps == 8: + return jpeg.decode_jpeg(x, table, colorspace) + elif bps == 12: + return jpeg_12.decode_jpeg_12(x, table, colorspace) + else: + raise ValueError("bitspersample not supported") + + self._decompressors[key] = decode_jpeg + return decode_jpeg + + if key == 34925: + try: + import lzma # delayed import + except ImportError: + try: + import backports.lzma as lzma # delayed import + except ImportError: + raise KeyError + self._decompressors[key] = lzma.decompress + return lzma.decompress + + if key == 34926: + try: + import zstd # delayed import + except ImportError: + raise KeyError + self._decompressors[key] = zstd.decompress + return zstd.decompress + raise KeyError + + def __contains__(self, item): + try: + self[item] + return True + except KeyError: + return False + + return Decompressors() + + def FRAME_ATTRS(): + # Attributes that a TiffFrame shares with its keyframe + return set("shape ndim size dtype axes is_final".split()) + + def FILE_FLAGS(): + # TiffFile and TiffPage 'is_\*' attributes + exclude = set( + "reduced final memmappable contiguous tiled " "chroma_subsampled".split() + ) + return set( + a[3:] for a in dir(TiffPage) if a[:3] == "is_" and a[3:] not in exclude + ) + + def FILE_EXTENSIONS(): + # TIFF file extensions + return tuple( + "tif tiff ome.tif lsm stk qptiff pcoraw " + "gel seq svs bif tf8 tf2 btf".split() + ) + + def FILEOPEN_FILTER(): + # String for use in Windows File Open box + return [ + ("%s files" % ext.upper(), "*.%s" % ext) for ext in TIFF.FILE_EXTENSIONS + ] + [("allfiles", "*")] + + def AXES_LABELS(): + # TODO: is there a standard for character axes labels? + axes = { + "X": "width", + "Y": "height", + "Z": "depth", + "S": "sample", # rgb(a) + "I": "series", # general sequence, plane, page, IFD + "T": "time", + "C": "channel", # color, emission wavelength + "A": "angle", + "P": "phase", # formerly F # P is Position in LSM! + "R": "tile", # region, point, mosaic + "H": "lifetime", # histogram + "E": "lambda", # excitation wavelength + "L": "exposure", # lux + "V": "event", + "Q": "other", + "M": "mosaic", # LSM 6 + } + axes.update(dict((v, k) for k, v in axes.items())) + return axes + + def ANDOR_TAGS(): + # Andor Technology tags #4864 - 5030 + return set(range(4864, 5030)) + + def EXIF_TAGS(): + tags = { + # 65000 - 65112 Photoshop Camera RAW EXIF tags + 65000: "OwnerName", + 65001: "SerialNumber", + 65002: "Lens", + 65100: "RawFile", + 65101: "Converter", + 65102: "WhiteBalance", + 65105: "Exposure", + 65106: "Shadows", + 65107: "Brightness", + 65108: "Contrast", + 65109: "Saturation", + 65110: "Sharpness", + 65111: "Smoothness", + 65112: "MoireFilter", + } + tags.update(TIFF.TAGS) + return tags + + def GPS_TAGS(): + return { + 0: "GPSVersionID", + 1: "GPSLatitudeRef", + 2: "GPSLatitude", + 3: "GPSLongitudeRef", + 4: "GPSLongitude", + 5: "GPSAltitudeRef", + 6: "GPSAltitude", + 7: "GPSTimeStamp", + 8: "GPSSatellites", + 9: "GPSStatus", + 10: "GPSMeasureMode", + 11: "GPSDOP", + 12: "GPSSpeedRef", + 13: "GPSSpeed", + 14: "GPSTrackRef", + 15: "GPSTrack", + 16: "GPSImgDirectionRef", + 17: "GPSImgDirection", + 18: "GPSMapDatum", + 19: "GPSDestLatitudeRef", + 20: "GPSDestLatitude", + 21: "GPSDestLongitudeRef", + 22: "GPSDestLongitude", + 23: "GPSDestBearingRef", + 24: "GPSDestBearing", + 25: "GPSDestDistanceRef", + 26: "GPSDestDistance", + 27: "GPSProcessingMethod", + 28: "GPSAreaInformation", + 29: "GPSDateStamp", + 30: "GPSDifferential", + 31: "GPSHPositioningError", + } + + def IOP_TAGS(): + return { + 1: "InteroperabilityIndex", + 2: "InteroperabilityVersion", + 4096: "RelatedImageFileFormat", + 4097: "RelatedImageWidth", + 4098: "RelatedImageLength", + } + + def GEO_KEYS(): + return { + 1024: "GTModelTypeGeoKey", + 1025: "GTRasterTypeGeoKey", + 1026: "GTCitationGeoKey", + 2048: "GeographicTypeGeoKey", + 2049: "GeogCitationGeoKey", + 2050: "GeogGeodeticDatumGeoKey", + 2051: "GeogPrimeMeridianGeoKey", + 2052: "GeogLinearUnitsGeoKey", + 2053: "GeogLinearUnitSizeGeoKey", + 2054: "GeogAngularUnitsGeoKey", + 2055: "GeogAngularUnitsSizeGeoKey", + 2056: "GeogEllipsoidGeoKey", + 2057: "GeogSemiMajorAxisGeoKey", + 2058: "GeogSemiMinorAxisGeoKey", + 2059: "GeogInvFlatteningGeoKey", + 2060: "GeogAzimuthUnitsGeoKey", + 2061: "GeogPrimeMeridianLongGeoKey", + 2062: "GeogTOWGS84GeoKey", + 3059: "ProjLinearUnitsInterpCorrectGeoKey", # GDAL + 3072: "ProjectedCSTypeGeoKey", + 3073: "PCSCitationGeoKey", + 3074: "ProjectionGeoKey", + 3075: "ProjCoordTransGeoKey", + 3076: "ProjLinearUnitsGeoKey", + 3077: "ProjLinearUnitSizeGeoKey", + 3078: "ProjStdParallel1GeoKey", + 3079: "ProjStdParallel2GeoKey", + 3080: "ProjNatOriginLongGeoKey", + 3081: "ProjNatOriginLatGeoKey", + 3082: "ProjFalseEastingGeoKey", + 3083: "ProjFalseNorthingGeoKey", + 3084: "ProjFalseOriginLongGeoKey", + 3085: "ProjFalseOriginLatGeoKey", + 3086: "ProjFalseOriginEastingGeoKey", + 3087: "ProjFalseOriginNorthingGeoKey", + 3088: "ProjCenterLongGeoKey", + 3089: "ProjCenterLatGeoKey", + 3090: "ProjCenterEastingGeoKey", + 3091: "ProjFalseOriginNorthingGeoKey", + 3092: "ProjScaleAtNatOriginGeoKey", + 3093: "ProjScaleAtCenterGeoKey", + 3094: "ProjAzimuthAngleGeoKey", + 3095: "ProjStraightVertPoleLongGeoKey", + 3096: "ProjRectifiedGridAngleGeoKey", + 4096: "VerticalCSTypeGeoKey", + 4097: "VerticalCitationGeoKey", + 4098: "VerticalDatumGeoKey", + 4099: "VerticalUnitsGeoKey", + } + + def GEO_CODES(): + try: + from .tifffile_geodb import GEO_CODES # delayed import + except (ImportError, ValueError): + try: + from tifffile_geodb import GEO_CODES # delayed import + except (ImportError, ValueError): + GEO_CODES = {} + return GEO_CODES + + def CZ_LSMINFO(): + return [ + ("MagicNumber", "u4"), + ("StructureSize", "i4"), + ("DimensionX", "i4"), + ("DimensionY", "i4"), + ("DimensionZ", "i4"), + ("DimensionChannels", "i4"), + ("DimensionTime", "i4"), + ("DataType", "i4"), # DATATYPES + ("ThumbnailX", "i4"), + ("ThumbnailY", "i4"), + ("VoxelSizeX", "f8"), + ("VoxelSizeY", "f8"), + ("VoxelSizeZ", "f8"), + ("OriginX", "f8"), + ("OriginY", "f8"), + ("OriginZ", "f8"), + ("ScanType", "u2"), + ("SpectralScan", "u2"), + ("TypeOfData", "u4"), # TYPEOFDATA + ("OffsetVectorOverlay", "u4"), + ("OffsetInputLut", "u4"), + ("OffsetOutputLut", "u4"), + ("OffsetChannelColors", "u4"), + ("TimeIntervall", "f8"), + ("OffsetChannelDataTypes", "u4"), + ("OffsetScanInformation", "u4"), # SCANINFO + ("OffsetKsData", "u4"), + ("OffsetTimeStamps", "u4"), + ("OffsetEventList", "u4"), + ("OffsetRoi", "u4"), + ("OffsetBleachRoi", "u4"), + ("OffsetNextRecording", "u4"), + # LSM 2.0 ends here + ("DisplayAspectX", "f8"), + ("DisplayAspectY", "f8"), + ("DisplayAspectZ", "f8"), + ("DisplayAspectTime", "f8"), + ("OffsetMeanOfRoisOverlay", "u4"), + ("OffsetTopoIsolineOverlay", "u4"), + ("OffsetTopoProfileOverlay", "u4"), + ("OffsetLinescanOverlay", "u4"), + ("ToolbarFlags", "u4"), + ("OffsetChannelWavelength", "u4"), + ("OffsetChannelFactors", "u4"), + ("ObjectiveSphereCorrection", "f8"), + ("OffsetUnmixParameters", "u4"), + # LSM 3.2, 4.0 end here + ("OffsetAcquisitionParameters", "u4"), + ("OffsetCharacteristics", "u4"), + ("OffsetPalette", "u4"), + ("TimeDifferenceX", "f8"), + ("TimeDifferenceY", "f8"), + ("TimeDifferenceZ", "f8"), + ("InternalUse1", "u4"), + ("DimensionP", "i4"), + ("DimensionM", "i4"), + ("DimensionsReserved", "16i4"), + ("OffsetTilePositions", "u4"), + ("", "9u4"), # Reserved + ("OffsetPositions", "u4"), + # ('', '21u4'), # must be 0 + ] + + def CZ_LSMINFO_READERS(): + # Import functions for CZ_LSMINFO sub-records + # TODO: read more CZ_LSMINFO sub-records + return { + "ScanInformation": read_lsm_scaninfo, + "TimeStamps": read_lsm_timestamps, + "EventList": read_lsm_eventlist, + "ChannelColors": read_lsm_channelcolors, + "Positions": read_lsm_floatpairs, + "TilePositions": read_lsm_floatpairs, + "VectorOverlay": None, + "InputLut": None, + "OutputLut": None, + "TimeIntervall": None, + "ChannelDataTypes": None, + "KsData": None, + "Roi": None, + "BleachRoi": None, + "NextRecording": None, + "MeanOfRoisOverlay": None, + "TopoIsolineOverlay": None, + "TopoProfileOverlay": None, + "ChannelWavelength": None, + "SphereCorrection": None, + "ChannelFactors": None, + "UnmixParameters": None, + "AcquisitionParameters": None, + "Characteristics": None, + } + + def CZ_LSMINFO_SCANTYPE(): + # Map CZ_LSMINFO.ScanType to dimension order + return { + 0: "XYZCT", # 'Stack' normal x-y-z-scan + 1: "XYZCT", # 'Z-Scan' x-z-plane Y=1 + 2: "XYZCT", # 'Line' + 3: "XYTCZ", # 'Time Series Plane' time series x-y XYCTZ ? Z=1 + 4: "XYZTC", # 'Time Series z-Scan' time series x-z + 5: "XYTCZ", # 'Time Series Mean-of-ROIs' + 6: "XYZTC", # 'Time Series Stack' time series x-y-z + 7: "XYCTZ", # Spline Scan + 8: "XYCZT", # Spline Plane x-z + 9: "XYTCZ", # Time Series Spline Plane x-z + 10: "XYZCT", # 'Time Series Point' point mode + } + + def CZ_LSMINFO_DIMENSIONS(): + # Map dimension codes to CZ_LSMINFO attribute + return { + "X": "DimensionX", + "Y": "DimensionY", + "Z": "DimensionZ", + "C": "DimensionChannels", + "T": "DimensionTime", + "P": "DimensionP", + "M": "DimensionM", + } + + def CZ_LSMINFO_DATATYPES(): + # Description of CZ_LSMINFO.DataType + return { + 0: "varying data types", + 1: "8 bit unsigned integer", + 2: "12 bit unsigned integer", + 5: "32 bit float", + } + + def CZ_LSMINFO_TYPEOFDATA(): + # Description of CZ_LSMINFO.TypeOfData + return { + 0: "Original scan data", + 1: "Calculated data", + 2: "3D reconstruction", + 3: "Topography height map", + } + + def CZ_LSMINFO_SCANINFO_ARRAYS(): + return { + 0x20000000: "Tracks", + 0x30000000: "Lasers", + 0x60000000: "DetectionChannels", + 0x80000000: "IlluminationChannels", + 0xA0000000: "BeamSplitters", + 0xC0000000: "DataChannels", + 0x11000000: "Timers", + 0x13000000: "Markers", + } + + def CZ_LSMINFO_SCANINFO_STRUCTS(): + return { + # 0x10000000: 'Recording', + 0x40000000: "Track", + 0x50000000: "Laser", + 0x70000000: "DetectionChannel", + 0x90000000: "IlluminationChannel", + 0xB0000000: "BeamSplitter", + 0xD0000000: "DataChannel", + 0x12000000: "Timer", + 0x14000000: "Marker", + } + + def CZ_LSMINFO_SCANINFO_ATTRIBUTES(): + return { + # Recording + 0x10000001: "Name", + 0x10000002: "Description", + 0x10000003: "Notes", + 0x10000004: "Objective", + 0x10000005: "ProcessingSummary", + 0x10000006: "SpecialScanMode", + 0x10000007: "ScanType", + 0x10000008: "ScanMode", + 0x10000009: "NumberOfStacks", + 0x1000000A: "LinesPerPlane", + 0x1000000B: "SamplesPerLine", + 0x1000000C: "PlanesPerVolume", + 0x1000000D: "ImagesWidth", + 0x1000000E: "ImagesHeight", + 0x1000000F: "ImagesNumberPlanes", + 0x10000010: "ImagesNumberStacks", + 0x10000011: "ImagesNumberChannels", + 0x10000012: "LinscanXySize", + 0x10000013: "ScanDirection", + 0x10000014: "TimeSeries", + 0x10000015: "OriginalScanData", + 0x10000016: "ZoomX", + 0x10000017: "ZoomY", + 0x10000018: "ZoomZ", + 0x10000019: "Sample0X", + 0x1000001A: "Sample0Y", + 0x1000001B: "Sample0Z", + 0x1000001C: "SampleSpacing", + 0x1000001D: "LineSpacing", + 0x1000001E: "PlaneSpacing", + 0x1000001F: "PlaneWidth", + 0x10000020: "PlaneHeight", + 0x10000021: "VolumeDepth", + 0x10000023: "Nutation", + 0x10000034: "Rotation", + 0x10000035: "Precession", + 0x10000036: "Sample0time", + 0x10000037: "StartScanTriggerIn", + 0x10000038: "StartScanTriggerOut", + 0x10000039: "StartScanEvent", + 0x10000040: "StartScanTime", + 0x10000041: "StopScanTriggerIn", + 0x10000042: "StopScanTriggerOut", + 0x10000043: "StopScanEvent", + 0x10000044: "StopScanTime", + 0x10000045: "UseRois", + 0x10000046: "UseReducedMemoryRois", + 0x10000047: "User", + 0x10000048: "UseBcCorrection", + 0x10000049: "PositionBcCorrection1", + 0x10000050: "PositionBcCorrection2", + 0x10000051: "InterpolationY", + 0x10000052: "CameraBinning", + 0x10000053: "CameraSupersampling", + 0x10000054: "CameraFrameWidth", + 0x10000055: "CameraFrameHeight", + 0x10000056: "CameraOffsetX", + 0x10000057: "CameraOffsetY", + 0x10000059: "RtBinning", + 0x1000005A: "RtFrameWidth", + 0x1000005B: "RtFrameHeight", + 0x1000005C: "RtRegionWidth", + 0x1000005D: "RtRegionHeight", + 0x1000005E: "RtOffsetX", + 0x1000005F: "RtOffsetY", + 0x10000060: "RtZoom", + 0x10000061: "RtLinePeriod", + 0x10000062: "Prescan", + 0x10000063: "ScanDirectionZ", + # Track + 0x40000001: "MultiplexType", # 0 After Line; 1 After Frame + 0x40000002: "MultiplexOrder", + 0x40000003: "SamplingMode", # 0 Sample; 1 Line Avg; 2 Frame Avg + 0x40000004: "SamplingMethod", # 1 Mean; 2 Sum + 0x40000005: "SamplingNumber", + 0x40000006: "Acquire", + 0x40000007: "SampleObservationTime", + 0x4000000B: "TimeBetweenStacks", + 0x4000000C: "Name", + 0x4000000D: "Collimator1Name", + 0x4000000E: "Collimator1Position", + 0x4000000F: "Collimator2Name", + 0x40000010: "Collimator2Position", + 0x40000011: "IsBleachTrack", + 0x40000012: "IsBleachAfterScanNumber", + 0x40000013: "BleachScanNumber", + 0x40000014: "TriggerIn", + 0x40000015: "TriggerOut", + 0x40000016: "IsRatioTrack", + 0x40000017: "BleachCount", + 0x40000018: "SpiCenterWavelength", + 0x40000019: "PixelTime", + 0x40000021: "CondensorFrontlens", + 0x40000023: "FieldStopValue", + 0x40000024: "IdCondensorAperture", + 0x40000025: "CondensorAperture", + 0x40000026: "IdCondensorRevolver", + 0x40000027: "CondensorFilter", + 0x40000028: "IdTransmissionFilter1", + 0x40000029: "IdTransmission1", + 0x40000030: "IdTransmissionFilter2", + 0x40000031: "IdTransmission2", + 0x40000032: "RepeatBleach", + 0x40000033: "EnableSpotBleachPos", + 0x40000034: "SpotBleachPosx", + 0x40000035: "SpotBleachPosy", + 0x40000036: "SpotBleachPosz", + 0x40000037: "IdTubelens", + 0x40000038: "IdTubelensPosition", + 0x40000039: "TransmittedLight", + 0x4000003A: "ReflectedLight", + 0x4000003B: "SimultanGrabAndBleach", + 0x4000003C: "BleachPixelTime", + # Laser + 0x50000001: "Name", + 0x50000002: "Acquire", + 0x50000003: "Power", + # DetectionChannel + 0x70000001: "IntegrationMode", + 0x70000002: "SpecialMode", + 0x70000003: "DetectorGainFirst", + 0x70000004: "DetectorGainLast", + 0x70000005: "AmplifierGainFirst", + 0x70000006: "AmplifierGainLast", + 0x70000007: "AmplifierOffsFirst", + 0x70000008: "AmplifierOffsLast", + 0x70000009: "PinholeDiameter", + 0x7000000A: "CountingTrigger", + 0x7000000B: "Acquire", + 0x7000000C: "PointDetectorName", + 0x7000000D: "AmplifierName", + 0x7000000E: "PinholeName", + 0x7000000F: "FilterSetName", + 0x70000010: "FilterName", + 0x70000013: "IntegratorName", + 0x70000014: "ChannelName", + 0x70000015: "DetectorGainBc1", + 0x70000016: "DetectorGainBc2", + 0x70000017: "AmplifierGainBc1", + 0x70000018: "AmplifierGainBc2", + 0x70000019: "AmplifierOffsetBc1", + 0x70000020: "AmplifierOffsetBc2", + 0x70000021: "SpectralScanChannels", + 0x70000022: "SpiWavelengthStart", + 0x70000023: "SpiWavelengthStop", + 0x70000026: "DyeName", + 0x70000027: "DyeFolder", + # IlluminationChannel + 0x90000001: "Name", + 0x90000002: "Power", + 0x90000003: "Wavelength", + 0x90000004: "Aquire", + 0x90000005: "DetchannelName", + 0x90000006: "PowerBc1", + 0x90000007: "PowerBc2", + # BeamSplitter + 0xB0000001: "FilterSet", + 0xB0000002: "Filter", + 0xB0000003: "Name", + # DataChannel + 0xD0000001: "Name", + 0xD0000003: "Acquire", + 0xD0000004: "Color", + 0xD0000005: "SampleType", + 0xD0000006: "BitsPerSample", + 0xD0000007: "RatioType", + 0xD0000008: "RatioTrack1", + 0xD0000009: "RatioTrack2", + 0xD000000A: "RatioChannel1", + 0xD000000B: "RatioChannel2", + 0xD000000C: "RatioConst1", + 0xD000000D: "RatioConst2", + 0xD000000E: "RatioConst3", + 0xD000000F: "RatioConst4", + 0xD0000010: "RatioConst5", + 0xD0000011: "RatioConst6", + 0xD0000012: "RatioFirstImages1", + 0xD0000013: "RatioFirstImages2", + 0xD0000014: "DyeName", + 0xD0000015: "DyeFolder", + 0xD0000016: "Spectrum", + 0xD0000017: "Acquire", + # Timer + 0x12000001: "Name", + 0x12000002: "Description", + 0x12000003: "Interval", + 0x12000004: "TriggerIn", + 0x12000005: "TriggerOut", + 0x12000006: "ActivationTime", + 0x12000007: "ActivationNumber", + # Marker + 0x14000001: "Name", + 0x14000002: "Description", + 0x14000003: "TriggerIn", + 0x14000004: "TriggerOut", + } + + def NIH_IMAGE_HEADER(): + return [ + ("FileID", "a8"), + ("nLines", "i2"), + ("PixelsPerLine", "i2"), + ("Version", "i2"), + ("OldLutMode", "i2"), + ("OldnColors", "i2"), + ("Colors", "u1", (3, 32)), + ("OldColorStart", "i2"), + ("ColorWidth", "i2"), + ("ExtraColors", "u2", (6, 3)), + ("nExtraColors", "i2"), + ("ForegroundIndex", "i2"), + ("BackgroundIndex", "i2"), + ("XScale", "f8"), + ("Unused2", "i2"), + ("Unused3", "i2"), + ("UnitsID", "i2"), # NIH_UNITS_TYPE + ("p1", [("x", "i2"), ("y", "i2")]), + ("p2", [("x", "i2"), ("y", "i2")]), + ("CurveFitType", "i2"), # NIH_CURVEFIT_TYPE + ("nCoefficients", "i2"), + ("Coeff", "f8", 6), + ("UMsize", "u1"), + ("UM", "a15"), + ("UnusedBoolean", "u1"), + ("BinaryPic", "b1"), + ("SliceStart", "i2"), + ("SliceEnd", "i2"), + ("ScaleMagnification", "f4"), + ("nSlices", "i2"), + ("SliceSpacing", "f4"), + ("CurrentSlice", "i2"), + ("FrameInterval", "f4"), + ("PixelAspectRatio", "f4"), + ("ColorStart", "i2"), + ("ColorEnd", "i2"), + ("nColors", "i2"), + ("Fill1", "3u2"), + ("Fill2", "3u2"), + ("Table", "u1"), # NIH_COLORTABLE_TYPE + ("LutMode", "u1"), # NIH_LUTMODE_TYPE + ("InvertedTable", "b1"), + ("ZeroClip", "b1"), + ("XUnitSize", "u1"), + ("XUnit", "a11"), + ("StackType", "i2"), # NIH_STACKTYPE_TYPE + # ('UnusedBytes', 'u1', 200) + ] + + def NIH_COLORTABLE_TYPE(): + return ( + "CustomTable", + "AppleDefault", + "Pseudo20", + "Pseudo32", + "Rainbow", + "Fire1", + "Fire2", + "Ice", + "Grays", + "Spectrum", + ) + + def NIH_LUTMODE_TYPE(): + return ( + "PseudoColor", + "OldAppleDefault", + "OldSpectrum", + "GrayScale", + "ColorLut", + "CustomGrayscale", + ) + + def NIH_CURVEFIT_TYPE(): + return ( + "StraightLine", + "Poly2", + "Poly3", + "Poly4", + "Poly5", + "ExpoFit", + "PowerFit", + "LogFit", + "RodbardFit", + "SpareFit1", + "Uncalibrated", + "UncalibratedOD", + ) + + def NIH_UNITS_TYPE(): + return ( + "Nanometers", + "Micrometers", + "Millimeters", + "Centimeters", + "Meters", + "Kilometers", + "Inches", + "Feet", + "Miles", + "Pixels", + "OtherUnits", + ) + + def NIH_STACKTYPE_TYPE(): + return ("VolumeStack", "RGBStack", "MovieStack", "HSVStack") + + def TVIPS_HEADER_V1(): + # TVIPS TemData structure from EMMENU Help file + return [ + ("Version", "i4"), + ("CommentV1", "a80"), + ("HighTension", "i4"), + ("SphericalAberration", "i4"), + ("IlluminationAperture", "i4"), + ("Magnification", "i4"), + ("PostMagnification", "i4"), + ("FocalLength", "i4"), + ("Defocus", "i4"), + ("Astigmatism", "i4"), + ("AstigmatismDirection", "i4"), + ("BiprismVoltage", "i4"), + ("SpecimenTiltAngle", "i4"), + ("SpecimenTiltDirection", "i4"), + ("IlluminationTiltDirection", "i4"), + ("IlluminationTiltAngle", "i4"), + ("ImageMode", "i4"), + ("EnergySpread", "i4"), + ("ChromaticAberration", "i4"), + ("ShutterType", "i4"), + ("DefocusSpread", "i4"), + ("CcdNumber", "i4"), + ("CcdSize", "i4"), + ("OffsetXV1", "i4"), + ("OffsetYV1", "i4"), + ("PhysicalPixelSize", "i4"), + ("Binning", "i4"), + ("ReadoutSpeed", "i4"), + ("GainV1", "i4"), + ("SensitivityV1", "i4"), + ("ExposureTimeV1", "i4"), + ("FlatCorrected", "i4"), + ("DeadPxCorrected", "i4"), + ("ImageMean", "i4"), + ("ImageStd", "i4"), + ("DisplacementX", "i4"), + ("DisplacementY", "i4"), + ("DateV1", "i4"), + ("TimeV1", "i4"), + ("ImageMin", "i4"), + ("ImageMax", "i4"), + ("ImageStatisticsQuality", "i4"), + ] + + def TVIPS_HEADER_V2(): + return [ + ("ImageName", "V160"), # utf16 + ("ImageFolder", "V160"), + ("ImageSizeX", "i4"), + ("ImageSizeY", "i4"), + ("ImageSizeZ", "i4"), + ("ImageSizeE", "i4"), + ("ImageDataType", "i4"), + ("Date", "i4"), + ("Time", "i4"), + ("Comment", "V1024"), + ("ImageHistory", "V1024"), + ("Scaling", "16f4"), + ("ImageStatistics", "16c16"), + ("ImageType", "i4"), + ("ImageDisplaType", "i4"), + ("PixelSizeX", "f4"), # distance between two px in x, [nm] + ("PixelSizeY", "f4"), # distance between two px in y, [nm] + ("ImageDistanceZ", "f4"), + ("ImageDistanceE", "f4"), + ("ImageMisc", "32f4"), + ("TemType", "V160"), + ("TemHighTension", "f4"), + ("TemAberrations", "32f4"), + ("TemEnergy", "32f4"), + ("TemMode", "i4"), + ("TemMagnification", "f4"), + ("TemMagnificationCorrection", "f4"), + ("PostMagnification", "f4"), + ("TemStageType", "i4"), + ("TemStagePosition", "5f4"), # x, y, z, a, b + ("TemImageShift", "2f4"), + ("TemBeamShift", "2f4"), + ("TemBeamTilt", "2f4"), + ("TilingParameters", "7f4"), # 0: tiling? 1:x 2:y 3: max x + # 4: max y 5: overlap x 6: overlap y + ("TemIllumination", "3f4"), # 0: spotsize 1: intensity + ("TemShutter", "i4"), + ("TemMisc", "32f4"), + ("CameraType", "V160"), + ("PhysicalPixelSizeX", "f4"), + ("PhysicalPixelSizeY", "f4"), + ("OffsetX", "i4"), + ("OffsetY", "i4"), + ("BinningX", "i4"), + ("BinningY", "i4"), + ("ExposureTime", "f4"), + ("Gain", "f4"), + ("ReadoutRate", "f4"), + ("FlatfieldDescription", "V160"), + ("Sensitivity", "f4"), + ("Dose", "f4"), + ("CamMisc", "32f4"), + ("FeiMicroscopeInformation", "V1024"), + ("FeiSpecimenInformation", "V1024"), + ("Magic", "u4"), + ] + + def MM_HEADER(): + # Olympus FluoView MM_Header + MM_DIMENSION = [ + ("Name", "a16"), + ("Size", "i4"), + ("Origin", "f8"), + ("Resolution", "f8"), + ("Unit", "a64"), + ] + return [ + ("HeaderFlag", "i2"), + ("ImageType", "u1"), + ("ImageName", "a257"), + ("OffsetData", "u4"), + ("PaletteSize", "i4"), + ("OffsetPalette0", "u4"), + ("OffsetPalette1", "u4"), + ("CommentSize", "i4"), + ("OffsetComment", "u4"), + ("Dimensions", MM_DIMENSION, 10), + ("OffsetPosition", "u4"), + ("MapType", "i2"), + ("MapMin", "f8"), + ("MapMax", "f8"), + ("MinValue", "f8"), + ("MaxValue", "f8"), + ("OffsetMap", "u4"), + ("Gamma", "f8"), + ("Offset", "f8"), + ("GrayChannel", MM_DIMENSION), + ("OffsetThumbnail", "u4"), + ("VoiceField", "i4"), + ("OffsetVoiceField", "u4"), + ] + + def MM_DIMENSIONS(): + # Map FluoView MM_Header.Dimensions to axes characters + return { + "X": "X", + "Y": "Y", + "Z": "Z", + "T": "T", + "CH": "C", + "WAVELENGTH": "C", + "TIME": "T", + "XY": "R", + "EVENT": "V", + "EXPOSURE": "L", + } + + def UIC_TAGS(): + # Map Universal Imaging Corporation MetaMorph internal tag ids to + # name and type + from fractions import Fraction # delayed import + + return [ + ("AutoScale", int), + ("MinScale", int), + ("MaxScale", int), + ("SpatialCalibration", int), + ("XCalibration", Fraction), + ("YCalibration", Fraction), + ("CalibrationUnits", str), + ("Name", str), + ("ThreshState", int), + ("ThreshStateRed", int), + ("tagid_10", None), # undefined + ("ThreshStateGreen", int), + ("ThreshStateBlue", int), + ("ThreshStateLo", int), + ("ThreshStateHi", int), + ("Zoom", int), + ("CreateTime", julian_datetime), + ("LastSavedTime", julian_datetime), + ("currentBuffer", int), + ("grayFit", None), + ("grayPointCount", None), + ("grayX", Fraction), + ("grayY", Fraction), + ("grayMin", Fraction), + ("grayMax", Fraction), + ("grayUnitName", str), + ("StandardLUT", int), + ("wavelength", int), + ("StagePosition", "(%i,2,2)u4"), # N xy positions as fract + ("CameraChipOffset", "(%i,2,2)u4"), # N xy offsets as fract + ("OverlayMask", None), + ("OverlayCompress", None), + ("Overlay", None), + ("SpecialOverlayMask", None), + ("SpecialOverlayCompress", None), + ("SpecialOverlay", None), + ("ImageProperty", read_uic_image_property), + ("StageLabel", "%ip"), # N str + ("AutoScaleLoInfo", Fraction), + ("AutoScaleHiInfo", Fraction), + ("AbsoluteZ", "(%i,2)u4"), # N fractions + ("AbsoluteZValid", "(%i,)u4"), # N long + ("Gamma", "I"), # 'I' uses offset + ("GammaRed", "I"), + ("GammaGreen", "I"), + ("GammaBlue", "I"), + ("CameraBin", "2I"), + ("NewLUT", int), + ("ImagePropertyEx", None), + ("PlaneProperty", int), + ("UserLutTable", "(256,3)u1"), + ("RedAutoScaleInfo", int), + ("RedAutoScaleLoInfo", Fraction), + ("RedAutoScaleHiInfo", Fraction), + ("RedMinScaleInfo", int), + ("RedMaxScaleInfo", int), + ("GreenAutoScaleInfo", int), + ("GreenAutoScaleLoInfo", Fraction), + ("GreenAutoScaleHiInfo", Fraction), + ("GreenMinScaleInfo", int), + ("GreenMaxScaleInfo", int), + ("BlueAutoScaleInfo", int), + ("BlueAutoScaleLoInfo", Fraction), + ("BlueAutoScaleHiInfo", Fraction), + ("BlueMinScaleInfo", int), + ("BlueMaxScaleInfo", int), + # ('OverlayPlaneColor', read_uic_overlay_plane_color), + ] + + def PILATUS_HEADER(): + # PILATUS CBF Header Specification, Version 1.4 + # Map key to [value_indices], type + return { + "Detector": ([slice(1, None)], str), + "Pixel_size": ([1, 4], float), + "Silicon": ([3], float), + "Exposure_time": ([1], float), + "Exposure_period": ([1], float), + "Tau": ([1], float), + "Count_cutoff": ([1], int), + "Threshold_setting": ([1], float), + "Gain_setting": ([1, 2], str), + "N_excluded_pixels": ([1], int), + "Excluded_pixels": ([1], str), + "Flat_field": ([1], str), + "Trim_file": ([1], str), + "Image_path": ([1], str), + # optional + "Wavelength": ([1], float), + "Energy_range": ([1, 2], float), + "Detector_distance": ([1], float), + "Detector_Voffset": ([1], float), + "Beam_xy": ([1, 2], float), + "Flux": ([1], str), + "Filter_transmission": ([1], float), + "Start_angle": ([1], float), + "Angle_increment": ([1], float), + "Detector_2theta": ([1], float), + "Polarization": ([1], float), + "Alpha": ([1], float), + "Kappa": ([1], float), + "Phi": ([1], float), + "Phi_increment": ([1], float), + "Chi": ([1], float), + "Chi_increment": ([1], float), + "Oscillation_axis": ([slice(1, None)], str), + "N_oscillations": ([1], int), + "Start_position": ([1], float), + "Position_increment": ([1], float), + "Shutter_time": ([1], float), + "Omega": ([1], float), + "Omega_increment": ([1], float), + } + + def REVERSE_BITORDER_BYTES(): + # Bytes with reversed bitorder + return ( + b"\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8(" + b"\xa8h\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14" + b"\x94T\xd44\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|" + b'\xfc\x02\x82B\xc2"\xa2b\xe2\x12\x92R\xd22\xb2r\xf2\n\x8aJ\xca*' + b"\xaaj\xea\x1a\x9aZ\xda:\xbaz\xfa\x06\x86F\xc6&\xa6f\xe6\x16" + b"\x96V\xd66\xb6v\xf6\x0e\x8eN\xce.\xaen\xee\x1e\x9e^\xde>\xbe~" + b"\xfe\x01\x81A\xc1!\xa1a\xe1\x11\x91Q\xd11\xb1q\xf1\t\x89I\xc9)" + b"\xa9i\xe9\x19\x99Y\xd99\xb9y\xf9\x05\x85E\xc5%\xa5e\xe5\x15" + b"\x95U\xd55\xb5u\xf5\r\x8dM\xcd-\xadm\xed\x1d\x9d]\xdd=\xbd}" + b"\xfd\x03\x83C\xc3#\xa3c\xe3\x13\x93S\xd33\xb3s\xf3\x0b\x8bK" + b"\xcb+\xabk\xeb\x1b\x9b[\xdb;\xbb{\xfb\x07\x87G\xc7'\xa7g\xe7" + b"\x17\x97W\xd77\xb7w\xf7\x0f\x8fO\xcf/\xafo\xef\x1f\x9f_" + b"\xdf?\xbf\x7f\xff" + ) + + def REVERSE_BITORDER_ARRAY(): + # Numpy array of bytes with reversed bitorder + return numpy.frombuffer(TIFF.REVERSE_BITORDER_BYTES, dtype="uint8") + + def ALLOCATIONGRANULARITY(): + # alignment for writing contiguous data to TIFF + import mmap # delayed import + + return mmap.ALLOCATIONGRANULARITY + + +def read_tags(fh, byteorder, offsetsize, tagnames, customtags=None, maxifds=None): + """Read tags from chain of IFDs and return as list of dicts. + + The file handle position must be at a valid IFD header. + + """ + if offsetsize == 4: + offsetformat = byteorder + "I" + tagnosize = 2 + tagnoformat = byteorder + "H" + tagsize = 12 + tagformat1 = byteorder + "HH" + tagformat2 = byteorder + "I4s" + elif offsetsize == 8: + offsetformat = byteorder + "Q" + tagnosize = 8 + tagnoformat = byteorder + "Q" + tagsize = 20 + tagformat1 = byteorder + "HH" + tagformat2 = byteorder + "Q8s" + else: + raise ValueError("invalid offset size") + + if customtags is None: + customtags = {} + if maxifds is None: + maxifds = 2**32 + + result = [] + unpack = struct.unpack + offset = fh.tell() + while len(result) < maxifds: + # loop over IFDs + try: + tagno = unpack(tagnoformat, fh.read(tagnosize))[0] + if tagno > 4096: + raise ValueError("suspicious number of tags") + except Exception: + warnings.warn("corrupted tag list at offset %i" % offset) + break + + tags = {} + data = fh.read(tagsize * tagno) + pos = fh.tell() + index = 0 + for _ in range(tagno): + code, type_ = unpack(tagformat1, data[index : index + 4]) + count, value = unpack(tagformat2, data[index + 4 : index + tagsize]) + index += tagsize + name = tagnames.get(code, str(code)) + try: + dtype = TIFF.DATA_FORMATS[type_] + except KeyError: + raise TiffTag.Error("unknown tag data type %i" % type_) + + fmt = "%s%i%s" % (byteorder, count * int(dtype[0]), dtype[1]) + size = struct.calcsize(fmt) + if size > offsetsize or code in customtags: + offset = unpack(offsetformat, value)[0] + if offset < 8 or offset > fh.size - size: + raise TiffTag.Error("invalid tag value offset %i" % offset) + fh.seek(offset) + if code in customtags: + readfunc = customtags[code][1] + value = readfunc(fh, byteorder, dtype, count, offsetsize) + elif type_ == 7 or (count > 1 and dtype[-1] == "B"): + value = read_bytes(fh, byteorder, dtype, count, offsetsize) + elif code in tagnames or dtype[-1] == "s": + value = unpack(fmt, fh.read(size)) + else: + value = read_numpy(fh, byteorder, dtype, count, offsetsize) + elif dtype[-1] == "B" or type_ == 7: + value = value[:size] + else: + value = unpack(fmt, value[:size]) + + if code not in customtags and code not in TIFF.TAG_TUPLE: + if len(value) == 1: + value = value[0] + if type_ != 7 and dtype[-1] == "s" and isinstance(value, bytes): + # TIFF ASCII fields can contain multiple strings, + # each terminated with a NUL + try: + value = bytes2str(stripascii(value).strip()) + except UnicodeDecodeError: + warnings.warn("tag %i: coercing invalid ASCII to bytes" % code) + + tags[name] = value + + result.append(tags) + # read offset to next page + fh.seek(pos) + offset = unpack(offsetformat, fh.read(offsetsize))[0] + if offset == 0: + break + if offset >= fh.size: + warnings.warn("invalid page offset %i" % offset) + break + fh.seek(offset) + + if result and maxifds == 1: + result = result[0] + return result + + +def read_exif_ifd(fh, byteorder, dtype, count, offsetsize): + """Read EXIF tags from file and return as dict.""" + exif = read_tags(fh, byteorder, offsetsize, TIFF.EXIF_TAGS, maxifds=1) + for name in ("ExifVersion", "FlashpixVersion"): + try: + exif[name] = bytes2str(exif[name]) + except Exception: + pass + if "UserComment" in exif: + idcode = exif["UserComment"][:8] + try: + if idcode == b"ASCII\x00\x00\x00": + exif["UserComment"] = bytes2str(exif["UserComment"][8:]) + elif idcode == b"UNICODE\x00": + exif["UserComment"] = exif["UserComment"][8:].decode("utf-16") + except Exception: + pass + return exif + + +def read_gps_ifd(fh, byteorder, dtype, count, offsetsize): + """Read GPS tags from file and return as dict.""" + return read_tags(fh, byteorder, offsetsize, TIFF.GPS_TAGS, maxifds=1) + + +def read_interoperability_ifd(fh, byteorder, dtype, count, offsetsize): + """Read Interoperability tags from file and return as dict.""" + tag_names = {1: "InteroperabilityIndex"} + return read_tags(fh, byteorder, offsetsize, tag_names, maxifds=1) + + +def read_bytes(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as byte string.""" + dtype = "B" if dtype[-1] == "s" else byteorder + dtype[-1] + count *= numpy.dtype(dtype).itemsize + data = fh.read(count) + if len(data) != count: + warnings.warn("failed to read all bytes: %i, %i" % (len(data), count)) + return data + + +def read_utf8(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as unicode string.""" + return fh.read(count).decode("utf-8") + + +def read_numpy(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as numpy array.""" + dtype = "b" if dtype[-1] == "s" else byteorder + dtype[-1] + return fh.read_array(dtype, count) + + +def read_colormap(fh, byteorder, dtype, count, offsetsize): + """Read ColorMap data from file and return as numpy array.""" + cmap = fh.read_array(byteorder + dtype[-1], count) + cmap.shape = (3, -1) + return cmap + + +def read_json(fh, byteorder, dtype, count, offsetsize): + """Read JSON tag data from file and return as object.""" + data = fh.read(count) + try: + return json.loads(unicode(stripnull(data), "utf-8")) + except ValueError: + warnings.warn("invalid JSON '%s'" % data) + + +def read_mm_header(fh, byteorder, dtype, count, offsetsize): + """Read FluoView mm_header tag from file and return as dict.""" + mmh = fh.read_record(TIFF.MM_HEADER, byteorder=byteorder) + mmh = recarray2dict(mmh) + mmh["Dimensions"] = [ + (bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) + for d in mmh["Dimensions"] + ] + d = mmh["GrayChannel"] + mmh["GrayChannel"] = ( + bytes2str(d[0]).strip(), + d[1], + d[2], + d[3], + bytes2str(d[4]).strip(), + ) + return mmh + + +def read_mm_stamp(fh, byteorder, dtype, count, offsetsize): + """Read FluoView mm_stamp tag from file and return as numpy.ndarray.""" + return fh.read_array(byteorder + "f8", 8) + + +def read_uic1tag(fh, byteorder, dtype, count, offsetsize, planecount=None): + """Read MetaMorph STK UIC1Tag from file and return as dict. + + Return empty dictionary if planecount is unknown. + + """ + assert dtype in ("2I", "1I") and byteorder == "<" + result = {} + if dtype == "2I": + # pre MetaMorph 2.5 (not tested) + values = fh.read_array(" structure_size: + break + lsminfo.append((name, dtype)) + else: + lsminfo = TIFF.CZ_LSMINFO + + lsminfo = fh.read_record(lsminfo, byteorder=byteorder) + lsminfo = recarray2dict(lsminfo) + + # read LSM info subrecords at offsets + for name, reader in TIFF.CZ_LSMINFO_READERS.items(): + if reader is None: + continue + offset = lsminfo.get("Offset" + name, 0) + if offset < 8: + continue + fh.seek(offset) + try: + lsminfo[name] = reader(fh) + except ValueError: + pass + return lsminfo + + +def read_lsm_floatpairs(fh): + """Read LSM sequence of float pairs from file and return as list.""" + size = struct.unpack(" 0: + esize, etime, etype = struct.unpack(" 4: + size = struct.unpack(" 1 else {} + return frame_data, roi_data + + +def read_micromanager_metadata(fh): + """Read MicroManager non-TIFF settings from open file and return as dict. + + The settings can be used to read image data without parsing the TIFF file. + + Raise ValueError if the file does not contain valid MicroManager metadata. + + """ + fh.seek(0) + try: + byteorder = {b"II": "<", b"MM": ">"}[fh.read(2)] + except IndexError: + raise ValueError("not a MicroManager TIFF file") + + result = {} + fh.seek(8) + ( + index_header, + index_offset, + display_header, + display_offset, + comments_header, + comments_offset, + summary_header, + summary_length, + ) = struct.unpack(byteorder + "IIIIIIII", fh.read(32)) + + if summary_header != 2355492: + raise ValueError("invalid MicroManager summary header") + result["Summary"] = read_json(fh, byteorder, None, summary_length, None) + + if index_header != 54773648: + raise ValueError("invalid MicroManager index header") + fh.seek(index_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 3453623: + raise ValueError("invalid MicroManager index header") + data = struct.unpack(byteorder + "IIIII" * count, fh.read(20 * count)) + result["IndexMap"] = { + "Channel": data[::5], + "Slice": data[1::5], + "Frame": data[2::5], + "Position": data[3::5], + "Offset": data[4::5], + } + + if display_header != 483765892: + raise ValueError("invalid MicroManager display header") + fh.seek(display_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 347834724: + raise ValueError("invalid MicroManager display header") + result["DisplaySettings"] = read_json(fh, byteorder, None, count, None) + + if comments_header != 99384722: + raise ValueError("invalid MicroManager comments header") + fh.seek(comments_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 84720485: + raise ValueError("invalid MicroManager comments header") + result["Comments"] = read_json(fh, byteorder, None, count, None) + + return result + + +def read_metaseries_catalog(fh): + """Read MetaSeries non-TIFF hint catalog from file. + + Raise ValueError if the file does not contain a valid hint catalog. + + """ + # TODO: implement read_metaseries_catalog + raise NotImplementedError() + + +def imagej_metadata_tags(metadata, byteorder): + """Return IJMetadata and IJMetadataByteCounts tags from metadata dict. + + The tags can be passed to the TiffWriter.save function as extratags. + + The metadata dict may contain the following keys and values: + + Info : str + Human-readable information as string. + Labels : sequence of str + Human-readable labels for each channel. + Ranges : sequence of doubles + Lower and upper values for each channel. + LUTs : sequence of (3, 256) uint8 ndarrays + Color palettes for each channel. + Plot : bytes + Undocumented ImageJ internal format. + ROI: bytes + Undocumented ImageJ internal region of interest format. + Overlays : bytes + Undocumented ImageJ internal format. + + """ + header = [{">": b"IJIJ", "<": b"JIJI"}[byteorder]] + bytecounts = [0] + body = [] + + def _string(data, byteorder): + return data.encode("utf-16" + {">": "be", "<": "le"}[byteorder]) + + def _doubles(data, byteorder): + return struct.pack(byteorder + ("d" * len(data)), *data) + + def _ndarray(data, byteorder): + return data.tobytes() + + def _bytes(data, byteorder): + return data + + metadata_types = ( + ("Info", b"info", 1, _string), + ("Labels", b"labl", None, _string), + ("Ranges", b"rang", 1, _doubles), + ("LUTs", b"luts", None, _ndarray), + ("Plot", b"plot", 1, _bytes), + ("ROI", b"roi ", 1, _bytes), + ("Overlays", b"over", None, _bytes), + ) + + for key, mtype, count, func in metadata_types: + if key.lower() in metadata: + key = key.lower() + elif key not in metadata: + continue + if byteorder == "<": + mtype = mtype[::-1] + values = metadata[key] + if count is None: + count = len(values) + else: + values = [values] + header.append(mtype + struct.pack(byteorder + "I", count)) + for value in values: + data = func(value, byteorder) + body.append(data) + bytecounts.append(len(data)) + + if not body: + return () + body = b"".join(body) + header = b"".join(header) + data = header + body + bytecounts[0] = len(header) + bytecounts = struct.pack(byteorder + ("I" * len(bytecounts)), *bytecounts) + return ( + (50839, "B", len(data), data, True), + (50838, "I", len(bytecounts) // 4, bytecounts, True), + ) + + +def imagej_metadata(data, bytecounts, byteorder): + """Return IJMetadata tag value as dict. + + The 'Info' string can have multiple formats, e.g. OIF or ScanImage, + that might be parsed into dicts using the matlabstr2py or + oiffile.SettingsFile functions. + + """ + + def _string(data, byteorder): + return data.decode("utf-16" + {">": "be", "<": "le"}[byteorder]) + + def _doubles(data, byteorder): + return struct.unpack(byteorder + ("d" * (len(data) // 8)), data) + + def _lut(data, byteorder): + return numpy.frombuffer(data, "uint8").reshape(-1, 256) + + def _bytes(data, byteorder): + return data + + metadata_types = { # big-endian + b"info": ("Info", _string), + b"labl": ("Labels", _string), + b"rang": ("Ranges", _doubles), + b"luts": ("LUTs", _lut), + b"plot": ("Plots", _bytes), + b"roi ": ("ROI", _bytes), + b"over": ("Overlays", _bytes), + } + metadata_types.update( # little-endian + dict((k[::-1], v) for k, v in metadata_types.items()) + ) + + if not bytecounts: + raise ValueError("no ImageJ metadata") + + if data[:4] not in (b"IJIJ", b"JIJI"): + raise ValueError("invalid ImageJ metadata") + + header_size = bytecounts[0] + if header_size < 12 or header_size > 804: + raise ValueError("invalid ImageJ metadata header size") + + ntypes = (header_size - 4) // 8 + header = struct.unpack(byteorder + "4sI" * ntypes, data[4 : 4 + ntypes * 8]) + pos = 4 + ntypes * 8 + counter = 0 + result = {} + for mtype, count in zip(header[::2], header[1::2]): + values = [] + name, func = metadata_types.get(mtype, (bytes2str(mtype), read_bytes)) + for _ in range(count): + counter += 1 + pos1 = pos + bytecounts[counter] + values.append(func(data[pos:pos1], byteorder)) + pos = pos1 + result[name.strip()] = values[0] if count == 1 else values + return result + + +def imagej_description_metadata(description): + """Return metatata from ImageJ image description as dict. + + Raise ValueError if not a valid ImageJ description. + + >>> description = 'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n' + >>> imagej_description_metadata(description) # doctest: +SKIP + {'ImageJ': '1.11a', 'images': 510, 'hyperstack': True} + + """ + + def _bool(val): + return {"true": True, "false": False}[val.lower()] + + result = {} + for line in description.splitlines(): + try: + key, val = line.split("=") + except Exception: + continue + key = key.strip() + val = val.strip() + for dtype in (int, float, _bool): + try: + val = dtype(val) + break + except Exception: + pass + result[key] = val + + if "ImageJ" not in result: + raise ValueError("not a ImageJ image description") + return result + + +def imagej_description( + shape, + rgb=None, + colormaped=False, + version="1.11a", + hyperstack=None, + mode=None, + loop=None, + **kwargs +): + """Return ImageJ image description from data shape. + + ImageJ can handle up to 6 dimensions in order TZCYXS. + + >>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP + ImageJ=1.11a + images=510 + channels=2 + slices=5 + frames=51 + hyperstack=true + mode=grayscale + loop=false + + """ + if colormaped: + raise NotImplementedError("ImageJ colormapping not supported") + shape = imagej_shape(shape, rgb=rgb) + rgb = shape[-1] in (3, 4) + + result = ["ImageJ=%s" % version] + append = [] + result.append("images=%i" % product(shape[:-3])) + if hyperstack is None: + hyperstack = True + append.append("hyperstack=true") + else: + append.append("hyperstack=%s" % bool(hyperstack)) + if shape[2] > 1: + result.append("channels=%i" % shape[2]) + if mode is None and not rgb: + mode = "grayscale" + if hyperstack and mode: + append.append("mode=%s" % mode) + if shape[1] > 1: + result.append("slices=%i" % shape[1]) + if shape[0] > 1: + result.append("frames=%i" % shape[0]) + if loop is None: + append.append("loop=false") + if loop is not None: + append.append("loop=%s" % bool(loop)) + for key, value in kwargs.items(): + append.append("%s=%s" % (key.lower(), value)) + + return "\n".join(result + append + [""]) + + +def imagej_shape(shape, rgb=None): + """Return shape normalized to 6D ImageJ hyperstack TZCYXS. + + Raise ValueError if not a valid ImageJ hyperstack shape. + + >>> imagej_shape((2, 3, 4, 5, 3), False) + (2, 3, 4, 5, 3, 1) + + """ + shape = tuple(int(i) for i in shape) + ndim = len(shape) + if 1 > ndim > 6: + raise ValueError("invalid ImageJ hyperstack: not 2 to 6 dimensional") + if rgb is None: + rgb = shape[-1] in (3, 4) and ndim > 2 + if rgb and shape[-1] not in (3, 4): + raise ValueError("invalid ImageJ hyperstack: not a RGB image") + if not rgb and ndim == 6 and shape[-1] != 1: + raise ValueError("invalid ImageJ hyperstack: not a non-RGB image") + if rgb or shape[-1] == 1: + return (1,) * (6 - ndim) + shape + return (1,) * (5 - ndim) + shape + (1,) + + +def json_description(shape, **metadata): + """Return JSON image description from data shape and other meta data. + + Return UTF-8 encoded JSON. + + >>> json_description((256, 256, 3), axes='YXS') # doctest: +SKIP + b'{"shape": [256, 256, 3], "axes": "YXS"}' + + """ + metadata.update(shape=shape) + return json.dumps(metadata) # .encode('utf-8') + + +def json_description_metadata(description): + """Return metatata from JSON formatted image description as dict. + + Raise ValuError if description is of unknown format. + + >>> description = '{"shape": [256, 256, 3], "axes": "YXS"}' + >>> json_description_metadata(description) # doctest: +SKIP + {'shape': [256, 256, 3], 'axes': 'YXS'} + >>> json_description_metadata('shape=(256, 256, 3)') + {'shape': (256, 256, 3)} + + """ + if description[:6] == "shape=": + # old style 'shaped' description; not JSON + shape = tuple(int(i) for i in description[7:-1].split(",")) + return dict(shape=shape) + if description[:1] == "{" and description[-1:] == "}": + # JSON description + return json.loads(description) + raise ValueError("invalid JSON image description", description) + + +def fluoview_description_metadata(description, ignoresections=None): + """Return metatata from FluoView image description as dict. + + The FluoView image description format is unspecified. Expect failures. + + >>> descr = ('[Intensity Mapping]\\nMap Ch0: Range=00000 to 02047\\n' + ... '[Intensity Mapping End]') + >>> fluoview_description_metadata(descr) + {'Intensity Mapping': {'Map Ch0: Range': '00000 to 02047'}} + + """ + if not description.startswith("["): + raise ValueError("invalid FluoView image description") + if ignoresections is None: + ignoresections = {"Region Info (Fields)", "Protocol Description"} + + result = {} + sections = [result] + comment = False + for line in description.splitlines(): + if not comment: + line = line.strip() + if not line: + continue + if line[0] == "[": + if line[-5:] == " End]": + # close section + del sections[-1] + section = sections[-1] + name = line[1:-5] + if comment: + section[name] = "\n".join(section[name]) + if name[:4] == "LUT ": + a = numpy.array(section[name], dtype="uint8") + a.shape = -1, 3 + section[name] = a + continue + # new section + comment = False + name = line[1:-1] + if name[:4] == "LUT ": + section = [] + elif name in ignoresections: + section = [] + comment = True + else: + section = {} + sections.append(section) + result[name] = section + continue + # add entry + if comment: + section.append(line) + continue + line = line.split("=", 1) + if len(line) == 1: + section[line[0].strip()] = None + continue + key, value = line + if key[:4] == "RGB ": + section.extend(int(rgb) for rgb in value.split()) + else: + section[key.strip()] = astype(value.strip()) + return result + + +def pilatus_description_metadata(description): + """Return metatata from Pilatus image description as dict. + + Return metadata from Pilatus pixel array detectors by Dectris, created + by camserver or TVX software. + + >>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m') + {'Pixel_size': (0.000172, 0.000172)} + + """ + result = {} + if not description.startswith("# "): + return result + for c in "#:=,()": + description = description.replace(c, " ") + for line in description.split("\n"): + if line[:2] != " ": + continue + line = line.split() + name = line[0] + if line[0] not in TIFF.PILATUS_HEADER: + try: + result["DateTime"] = datetime.datetime.strptime( + " ".join(line), "%Y-%m-%dT%H %M %S.%f" + ) + except Exception: + result[name] = " ".join(line[1:]) + continue + indices, dtype = TIFF.PILATUS_HEADER[line[0]] + if isinstance(indices[0], slice): + # assumes one slice + values = line[indices[0]] + else: + values = [line[i] for i in indices] + if dtype is float and values[0] == "not": + values = ["NaN"] + values = tuple(dtype(v) for v in values) + if dtype == str: + values = " ".join(values) + elif len(values) == 1: + values = values[0] + result[name] = values + return result + + +def svs_description_metadata(description): + """Return metatata from Aperio image description as dict. + + The Aperio image description format is unspecified. Expect failures. + + >>> svs_description_metadata('Aperio Image Library v1.0') + {'Aperio Image Library': 'v1.0'} + + """ + if not description.startswith("Aperio Image Library "): + raise ValueError("invalid Aperio image description") + result = {} + lines = description.split("\n") + key, value = lines[0].strip().rsplit(None, 1) # 'Aperio Image Library' + result[key.strip()] = value.strip() + if len(lines) == 1: + return result + items = lines[1].split("|") + result[""] = items[0].strip() # TODO: parse this? + for item in items[1:]: + key, value = item.split(" = ") + result[key.strip()] = astype(value.strip()) + return result + + +def stk_description_metadata(description): + """Return metadata from MetaMorph image description as list of dict. + + The MetaMorph image description format is unspecified. Expect failures. + + """ + description = description.strip() + if not description: + return [] + try: + description = bytes2str(description) + except UnicodeDecodeError: + warnings.warn("failed to parse MetaMorph image description") + return [] + result = [] + for plane in description.split("\x00"): + d = {} + for line in plane.split("\r\n"): + line = line.split(":", 1) + if len(line) > 1: + name, value = line + d[name.strip()] = astype(value.strip()) + else: + value = line[0].strip() + if value: + if "" in d: + d[""].append(value) + else: + d[""] = [value] + result.append(d) + return result + + +def metaseries_description_metadata(description): + """Return metatata from MetaSeries image description as dict.""" + if not description.startswith(""): + raise ValueError("invalid MetaSeries image description") + + from xml.etree import cElementTree as etree # delayed import + + root = etree.fromstring(description) + types = {"float": float, "int": int, "bool": lambda x: asbool(x, "on", "off")} + + def parse(root, result): + # recursive + for child in root: + attrib = child.attrib + if not attrib: + result[child.tag] = parse(child, {}) + continue + if "id" in attrib: + i = attrib["id"] + t = attrib["type"] + v = attrib["value"] + if t in types: + result[i] = types[t](v) + else: + result[i] = v + return result + + adict = parse(root, {}) + if "Description" in adict: + adict["Description"] = adict["Description"].replace(" ", "\n") + return adict + + +def scanimage_description_metadata(description): + """Return metatata from ScanImage image description as dict.""" + return matlabstr2py(description) + + +def scanimage_artist_metadata(artist): + """Return metatata from ScanImage artist tag as dict.""" + try: + return json.loads(artist) + except ValueError: + warnings.warn("invalid JSON '%s'" % artist) + + +def _replace_by(module_function, package=__package__, warn=None, prefix="_"): + """Try replace decorated function by module.function.""" + return lambda f: f # imageio: just use what's in here + + def _warn(e, warn): + if warn is None: + warn = "\n Functionality might be degraded or be slow.\n" + elif warn is True: + warn = "" + elif not warn: + return + warnings.warn("%s%s" % (e, warn)) + + try: + from importlib import import_module + except ImportError as e: + _warn(e, warn) + return identityfunc + + def decorate(func, module_function=module_function, warn=warn): + module, function = module_function.split(".") + try: + if package: + module = import_module("." + module, package=package) + else: + module = import_module(module) + except Exception as e: + _warn(e, warn) + return func + try: + func, oldfunc = getattr(module, function), func + except Exception as e: + _warn(e, warn) + return func + globals()[prefix + func.__name__] = oldfunc + return func + + return decorate + + +def decode_floats(data): + """Decode floating point horizontal differencing. + + The TIFF predictor type 3 reorders the bytes of the image values and + applies horizontal byte differencing to improve compression of floating + point images. The ordering of interleaved color channels is preserved. + + Parameters + ---------- + data : numpy.ndarray + The image to be decoded. The dtype must be a floating point. + The shape must include the number of contiguous samples per pixel + even if 1. + + """ + shape = data.shape + dtype = data.dtype + if len(shape) < 3: + raise ValueError("invalid data shape") + if dtype.char not in "dfe": + raise ValueError("not a floating point image") + littleendian = data.dtype.byteorder == "<" or ( + sys.byteorder == "little" and data.dtype.byteorder == "=" + ) + # undo horizontal byte differencing + data = data.view("uint8") + data.shape = shape[:-2] + (-1,) + shape[-1:] + numpy.cumsum(data, axis=-2, dtype="uint8", out=data) + # reorder bytes + if littleendian: + data.shape = shape[:-2] + (-1,) + shape[-2:] + data = numpy.swapaxes(data, -3, -2) + data = numpy.swapaxes(data, -2, -1) + data = data[..., ::-1] + # back to float + data = numpy.ascontiguousarray(data) + data = data.view(dtype) + data.shape = shape + return data + + +@_replace_by("_tifffile.decode_packbits") +def decode_packbits(encoded): + """Decompress PackBits encoded byte string. + + PackBits is a simple byte-oriented run-length compression scheme. + + """ + func = ord if sys.version[0] == "2" else identityfunc + result = [] + result_extend = result.extend + i = 0 + try: + while True: + n = func(encoded[i]) + 1 + i += 1 + if n < 129: + result_extend(encoded[i : i + n]) + i += n + elif n > 129: + result_extend(encoded[i : i + 1] * (258 - n)) + i += 1 + except IndexError: + pass + return b"".join(result) if sys.version[0] == "2" else bytes(result) + + +@_replace_by("_tifffile.decode_lzw") +def decode_lzw(encoded): + """Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string). + + The strip must begin with a CLEAR code and end with an EOI code. + + This implementation of the LZW decoding algorithm is described in (1) and + is not compatible with old style LZW compressed files like quad-lzw.tif. + + """ + len_encoded = len(encoded) + bitcount_max = len_encoded * 8 + unpack = struct.unpack + + if sys.version[0] == "2": + newtable = [chr(i) for i in range(256)] + else: + newtable = [bytes([i]) for i in range(256)] + newtable.extend((0, 0)) + + def next_code(): + """Return integer of 'bitw' bits at 'bitcount' position in encoded.""" + start = bitcount // 8 + s = encoded[start : start + 4] + try: + code = unpack(">I", s)[0] + except Exception: + code = unpack(">I", s + b"\x00" * (4 - len(s)))[0] + code <<= bitcount % 8 + code &= mask + return code >> shr + + switchbitch = { # code: bit-width, shr-bits, bit-mask + 255: (9, 23, int(9 * "1" + "0" * 23, 2)), + 511: (10, 22, int(10 * "1" + "0" * 22, 2)), + 1023: (11, 21, int(11 * "1" + "0" * 21, 2)), + 2047: (12, 20, int(12 * "1" + "0" * 20, 2)), + } + bitw, shr, mask = switchbitch[255] + bitcount = 0 + + if len_encoded < 4: + raise ValueError("strip must be at least 4 characters long") + + if next_code() != 256: + raise ValueError("strip must begin with CLEAR code") + + code = 0 + oldcode = 0 + result = [] + result_append = result.append + while True: + code = next_code() # ~5% faster when inlining this function + bitcount += bitw + if code == 257 or bitcount >= bitcount_max: # EOI + break + if code == 256: # CLEAR + table = newtable[:] + table_append = table.append + lentable = 258 + bitw, shr, mask = switchbitch[255] + code = next_code() + bitcount += bitw + if code == 257: # EOI + break + result_append(table[code]) + else: + if code < lentable: + decoded = table[code] + newcode = table[oldcode] + decoded[:1] + else: + newcode = table[oldcode] + newcode += newcode[:1] + decoded = newcode + result_append(decoded) + table_append(newcode) + lentable += 1 + oldcode = code + if lentable in switchbitch: + bitw, shr, mask = switchbitch[lentable] + + if code != 257: + warnings.warn("unexpected end of LZW stream (code %i)" % code) + + return b"".join(result) + + +@_replace_by("_tifffile.unpack_ints") +def unpack_ints(data, dtype, itemsize, runlen=0): + """Decompress byte string to array of integers of any bit size <= 32. + + This Python implementation is slow and only handles itemsizes 1, 2, 4, 8, + 16, 32, and 64. + + Parameters + ---------- + data : byte str + Data to decompress. + dtype : numpy.dtype or str + A numpy boolean or integer type. + itemsize : int + Number of bits per integer. + runlen : int + Number of consecutive integers, after which to start at next byte. + + Examples + -------- + >>> unpack_ints(b'a', 'B', 1) + array([0, 1, 1, 0, 0, 0, 0, 1], dtype=uint8) + >>> unpack_ints(b'ab', 'B', 2) + array([1, 2, 0, 1, 1, 2, 0, 2], dtype=uint8) + + """ + if itemsize == 1: # bitarray + data = numpy.frombuffer(data, "|B") + data = numpy.unpackbits(data) + if runlen % 8: + data = data.reshape(-1, runlen + (8 - runlen % 8)) + data = data[:, :runlen].reshape(-1) + return data.astype(dtype) + + dtype = numpy.dtype(dtype) + if itemsize in (8, 16, 32, 64): + return numpy.frombuffer(data, dtype) + if itemsize not in (1, 2, 4, 8, 16, 32): + raise ValueError("itemsize not supported: %i" % itemsize) + if dtype.kind not in "biu": + raise ValueError("invalid dtype") + + itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize) + if itembytes != dtype.itemsize: + raise ValueError("dtype.itemsize too small") + if runlen == 0: + runlen = (8 * len(data)) // itemsize + skipbits = runlen * itemsize % 8 + if skipbits: + skipbits = 8 - skipbits + shrbits = itembytes * 8 - itemsize + bitmask = int(itemsize * "1" + "0" * shrbits, 2) + dtypestr = ">" + dtype.char # dtype always big-endian? + + unpack = struct.unpack + size = runlen * (len(data) * 8 // (runlen * itemsize + skipbits)) + result = numpy.empty((size,), dtype) + bitcount = 0 + for i in range(size): + start = bitcount // 8 + s = data[start : start + itembytes] + try: + code = unpack(dtypestr, s)[0] + except Exception: + code = unpack(dtypestr, s + b"\x00" * (itembytes - len(s)))[0] + code <<= bitcount % 8 + code &= bitmask + result[i] = code >> shrbits + bitcount += itemsize + if (i + 1) % runlen == 0: + bitcount += skipbits + return result + + +def unpack_rgb(data, dtype=">> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) + >>> print(unpack_rgb(data, '>> print(unpack_rgb(data, '>> print(unpack_rgb(data, '= bits) + data = numpy.frombuffer(data, dtype.byteorder + dt) + result = numpy.empty((data.size, len(bitspersample)), dtype.char) + for i, bps in enumerate(bitspersample): + t = data >> int(numpy.sum(bitspersample[i + 1 :])) + t &= int("0b" + "1" * bps, 2) + if rescale: + o = ((dtype.itemsize * 8) // bps + 1) * bps + if o > data.dtype.itemsize * 8: + t = t.astype("I") + t *= (2**o - 1) // (2**bps - 1) + t //= 2 ** (o - (dtype.itemsize * 8)) + result[:, i] = t + return result.reshape(-1) + + +@_replace_by("_tifffile.reverse_bitorder") +def reverse_bitorder(data): + """Reverse bits in each byte of byte string or numpy array. + + Decode data where pixels with lower column values are stored in the + lower-order bits of the bytes (FillOrder is LSB2MSB). + + Parameters + ---------- + data : byte string or ndarray + The data to be bit reversed. If byte string, a new bit-reversed byte + string is returned. Numpy arrays are bit-reversed in-place. + + Examples + -------- + >>> reverse_bitorder(b'\\x01\\x64') + b'\\x80&' + >>> data = numpy.array([1, 666], dtype='uint16') + >>> reverse_bitorder(data) + >>> data + array([ 128, 16473], dtype=uint16) + + """ + try: + view = data.view("uint8") + numpy.take(TIFF.REVERSE_BITORDER_ARRAY, view, out=view) + except AttributeError: + return data.translate(TIFF.REVERSE_BITORDER_BYTES) + except ValueError: + raise NotImplementedError("slices of arrays not supported") + + +def apply_colormap(image, colormap, contig=True): + """Return palette-colored image. + + The image values are used to index the colormap on axis 1. The returned + image is of shape image.shape+colormap.shape[0] and dtype colormap.dtype. + + Parameters + ---------- + image : numpy.ndarray + Indexes into the colormap. + colormap : numpy.ndarray + RGB lookup table aka palette of shape (3, 2**bits_per_sample). + contig : bool + If True, return a contiguous array. + + Examples + -------- + >>> image = numpy.arange(256, dtype='uint8') + >>> colormap = numpy.vstack([image, image, image]).astype('uint16') * 256 + >>> apply_colormap(image, colormap)[-1] + array([65280, 65280, 65280], dtype=uint16) + + """ + image = numpy.take(colormap, image, axis=1) + image = numpy.rollaxis(image, 0, image.ndim) + if contig: + image = numpy.ascontiguousarray(image) + return image + + +def reorient(image, orientation): + """Return reoriented view of image array. + + Parameters + ---------- + image : numpy.ndarray + Non-squeezed output of asarray() functions. + Axes -3 and -2 must be image length and width respectively. + orientation : int or str + One of TIFF.ORIENTATION names or values. + + """ + ORIENTATION = TIFF.ORIENTATION + orientation = enumarg(ORIENTATION, orientation) + + if orientation == ORIENTATION.TOPLEFT: + return image + elif orientation == ORIENTATION.TOPRIGHT: + return image[..., ::-1, :] + elif orientation == ORIENTATION.BOTLEFT: + return image[..., ::-1, :, :] + elif orientation == ORIENTATION.BOTRIGHT: + return image[..., ::-1, ::-1, :] + elif orientation == ORIENTATION.LEFTTOP: + return numpy.swapaxes(image, -3, -2) + elif orientation == ORIENTATION.RIGHTTOP: + return numpy.swapaxes(image, -3, -2)[..., ::-1, :] + elif orientation == ORIENTATION.RIGHTBOT: + return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] + elif orientation == ORIENTATION.LEFTBOT: + return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] + + +def repeat_nd(a, repeats): + """Return read-only view into input array with elements repeated. + + Zoom nD image by integer factors using nearest neighbor interpolation + (box filter). + + Parameters + ---------- + a : array_like + Input array. + repeats : sequence of int + The number of repetitions to apply along each dimension of input array. + + Example + ------- + >>> repeat_nd([[1, 2], [3, 4]], (2, 2)) + array([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 3, 4, 4], + [3, 3, 4, 4]]) + + """ + a = numpy.asarray(a) + reshape = [] + shape = [] + strides = [] + for i, j, k in zip(a.strides, a.shape, repeats): + shape.extend((j, k)) + strides.extend((i, 0)) + reshape.append(j * k) + return numpy.lib.stride_tricks.as_strided( + a, shape, strides, writeable=False + ).reshape(reshape) + + +def reshape_nd(data_or_shape, ndim): + """Return image array or shape with at least ndim dimensions. + + Prepend 1s to image shape as necessary. + + >>> reshape_nd(numpy.empty(0), 1).shape + (0,) + >>> reshape_nd(numpy.empty(1), 2).shape + (1, 1) + >>> reshape_nd(numpy.empty((2, 3)), 3).shape + (1, 2, 3) + >>> reshape_nd(numpy.empty((3, 4, 5)), 3).shape + (3, 4, 5) + >>> reshape_nd((2, 3), 3) + (1, 2, 3) + + """ + is_shape = isinstance(data_or_shape, tuple) + shape = data_or_shape if is_shape else data_or_shape.shape + if len(shape) >= ndim: + return data_or_shape + shape = (1,) * (ndim - len(shape)) + shape + return shape if is_shape else data_or_shape.reshape(shape) + + +def squeeze_axes(shape, axes, skip="XY"): + """Return shape and axes with single-dimensional entries removed. + + Remove unused dimensions unless their axes are listed in 'skip'. + + >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') + ((5, 2, 1), 'TYX') + + """ + if len(shape) != len(axes): + raise ValueError("dimensions of axes and shape do not match") + shape, axes = zip(*(i for i in zip(shape, axes) if i[0] > 1 or i[1] in skip)) + return tuple(shape), "".join(axes) + + +def transpose_axes(image, axes, asaxes="CTZYX"): + """Return image with its axes permuted to match specified axes. + + A view is returned if possible. + + >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape + (5, 2, 1, 3, 4) + + """ + for ax in axes: + if ax not in asaxes: + raise ValueError("unknown axis %s" % ax) + # add missing axes to image + shape = image.shape + for ax in reversed(asaxes): + if ax not in axes: + axes = ax + axes + shape = (1,) + shape + image = image.reshape(shape) + # transpose axes + image = image.transpose([axes.index(ax) for ax in asaxes]) + return image + + +def reshape_axes(axes, shape, newshape, unknown="Q"): + """Return axes matching new shape. + + Unknown dimensions are labelled 'Q'. + + >>> reshape_axes('YXS', (219, 301, 1), (219, 301)) + 'YX' + >>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1)) + 'QQYQXQ' + + """ + shape = tuple(shape) + newshape = tuple(newshape) + if len(axes) != len(shape): + raise ValueError("axes do not match shape") + + size = product(shape) + newsize = product(newshape) + if size != newsize: + raise ValueError("cannot reshape %s to %s" % (shape, newshape)) + if not axes or not newshape: + return "" + + lendiff = max(0, len(shape) - len(newshape)) + if lendiff: + newshape = newshape + (1,) * lendiff + + i = len(shape) - 1 + prodns = 1 + prods = 1 + result = [] + for ns in newshape[::-1]: + prodns *= ns + while i > 0 and shape[i] == 1 and ns != 1: + i -= 1 + if ns == shape[i] and prodns == prods * shape[i]: + prods *= shape[i] + result.append(axes[i]) + i -= 1 + else: + result.append(unknown) + + return "".join(reversed(result[lendiff:])) + + +def stack_pages(pages, out=None, maxworkers=1, *args, **kwargs): + """Read data from sequence of TiffPage and stack them vertically. + + Additional parameters are passed to the TiffPage.asarray function. + + """ + npages = len(pages) + if npages == 0: + raise ValueError("no pages") + + if npages == 1: + return pages[0].asarray(out=out, *args, **kwargs) + + page0 = next(p for p in pages if p is not None) + page0.asarray(validate=None) # ThreadPoolExecutor swallows exceptions + shape = (npages,) + page0.keyframe.shape + dtype = page0.keyframe.dtype + out = create_output(out, shape, dtype) + + if maxworkers is None: + maxworkers = multiprocessing.cpu_count() // 2 + page0.parent.filehandle.lock = maxworkers > 1 + + filecache = OpenFileCache( + size=max(4, maxworkers), lock=page0.parent.filehandle.lock + ) + + def func(page, index, out=out, filecache=filecache, args=args, kwargs=kwargs): + """Read, decode, and copy page data.""" + if page is not None: + filecache.open(page.parent.filehandle) + out[index] = page.asarray( + lock=filecache.lock, reopen=False, validate=False, *args, **kwargs + ) + filecache.close(page.parent.filehandle) + + if maxworkers < 2: + for i, page in enumerate(pages): + func(page, i) + else: + with concurrent.futures.ThreadPoolExecutor(maxworkers) as executor: + executor.map(func, pages, range(npages)) + + filecache.clear() + page0.parent.filehandle.lock = None + + return out + + +def clean_offsets_counts(offsets, counts): + """Return cleaned offsets and byte counts. + + Remove zero offsets and counts. Use to sanitize _offsets and _bytecounts + tag values for strips or tiles. + + """ + offsets = list(offsets) + counts = list(counts) + assert len(offsets) == len(counts) + j = 0 + for i, (o, b) in enumerate(zip(offsets, counts)): + if o > 0 and b > 0: + if i > j: + offsets[j] = o + counts[j] = b + j += 1 + elif b > 0 and o <= 0: + raise ValueError("invalid offset") + else: + warnings.warn("empty byte count") + if j == 0: + j = 1 + return offsets[:j], counts[:j] + + +def buffered_read(fh, lock, offsets, bytecounts, buffersize=2**26): + """Return iterator over blocks read from file.""" + length = len(offsets) + i = 0 + while i < length: + data = [] + with lock: + size = 0 + while size < buffersize and i < length: + fh.seek(offsets[i]) + bytecount = bytecounts[i] + data.append(fh.read(bytecount)) + size += bytecount + i += 1 + for block in data: + yield block + + +def create_output(out, shape, dtype, mode="w+", suffix=".memmap"): + """Return numpy array where image data of shape and dtype can be copied. + + The 'out' parameter may have the following values or types: + + None + An empty array of shape and dtype is created and returned. + numpy.ndarray + An existing writable array of compatible dtype and shape. A view of + the same array is returned after verification. + 'memmap' or 'memmap:tempdir' + A memory-map to an array stored in a temporary binary file on disk + is created and returned. + str or open file + The file name or file object used to create a memory-map to an array + stored in a binary file on disk. The created memory-mapped array is + returned. + + """ + if out is None: + return numpy.zeros(shape, dtype) + if isinstance(out, str) and out[:6] == "memmap": + tempdir = out[7:] if len(out) > 7 else None + with tempfile.NamedTemporaryFile(dir=tempdir, suffix=suffix) as fh: + return numpy.memmap(fh, shape=shape, dtype=dtype, mode=mode) + if isinstance(out, numpy.ndarray): + if product(shape) != product(out.shape): + raise ValueError("incompatible output shape") + if not numpy.can_cast(dtype, out.dtype): + raise ValueError("incompatible output dtype") + return out.reshape(shape) + if isinstance(out, pathlib.Path): + out = str(out) + return numpy.memmap(out, shape=shape, dtype=dtype, mode=mode) + + +def matlabstr2py(string): + """Return Python object from Matlab string representation. + + Return str, bool, int, float, list (Matlab arrays or cells), or + dict (Matlab structures) types. + + Use to access ScanImage metadata. + + >>> matlabstr2py('1') + 1 + >>> matlabstr2py("['x y z' true false; 1 2.0 -3e4; NaN Inf @class]") + [['x y z', True, False], [1, 2.0, -30000.0], [nan, inf, '@class']] + >>> d = matlabstr2py("SI.hChannels.channelType = {'stripe' 'stripe'}\\n" + ... "SI.hChannels.channelsActive = 2") + >>> d['SI.hChannels.channelType'] + ['stripe', 'stripe'] + + """ + # TODO: handle invalid input + # TODO: review unboxing of multidimensional arrays + + def lex(s): + # return sequence of tokens from matlab string representation + tokens = ["["] + while True: + t, i = next_token(s) + if t is None: + break + if t == ";": + tokens.extend(("]", "[")) + elif t == "[": + tokens.extend(("[", "[")) + elif t == "]": + tokens.extend(("]", "]")) + else: + tokens.append(t) + s = s[i:] + tokens.append("]") + return tokens + + def next_token(s): + # return next token in matlab string + length = len(s) + if length == 0: + return None, 0 + i = 0 + while i < length and s[i] == " ": + i += 1 + if i == length: + return None, i + if s[i] in "{[;]}": + return s[i], i + 1 + if s[i] == "'": + j = i + 1 + while j < length and s[j] != "'": + j += 1 + return s[i : j + 1], j + 1 + if s[i] == "<": + j = i + 1 + while j < length and s[j] != ">": + j += 1 + return s[i : j + 1], j + 1 + j = i + while j < length and s[j] not in " {[;]}": + j += 1 + return s[i:j], j + + def value(s, fail=False): + # return Python value of token + s = s.strip() + if not s: + return s + if len(s) == 1: + try: + return int(s) + except Exception: + if fail: + raise ValueError() + return s + if s[0] == "'": + if fail and s[-1] != "'" or "'" in s[1:-1]: + raise ValueError() + return s[1:-1] + if s[0] == "<": + if fail and s[-1] != ">" or "<" in s[1:-1]: + raise ValueError() + return s + if fail and any(i in s for i in " ';[]{}"): + raise ValueError() + if s[0] == "@": + return s + if s in ("true", "True"): + return True + if s in ("false", "False"): + return False + if s[:6] == "zeros(": + return numpy.zeros([int(i) for i in s[6:-1].split(",")]).tolist() + if s[:5] == "ones(": + return numpy.ones([int(i) for i in s[5:-1].split(",")]).tolist() + if "." in s or "e" in s: + try: + return float(s) + except Exception: + pass + try: + return int(s) + except Exception: + pass + try: + return float(s) # nan, inf + except Exception: + if fail: + raise ValueError() + return s + + def parse(s): + # return Python value from string representation of Matlab value + s = s.strip() + try: + return value(s, fail=True) + except ValueError: + pass + result = add2 = [] + levels = [add2] + for t in lex(s): + if t in "[{": + add2 = [] + levels.append(add2) + elif t in "]}": + x = levels.pop() + if len(x) == 1 and isinstance(x[0], (list, str)): + x = x[0] + add2 = levels[-1] + add2.append(x) + else: + add2.append(value(t)) + if len(result) == 1 and isinstance(result[0], (list, str)): + result = result[0] + return result + + if "\r" in string or "\n" in string: + # structure + d = {} + for line in string.splitlines(): + line = line.strip() + if not line or line[0] == "%": + continue + k, v = line.split("=", 1) + k = k.strip() + if any(c in k for c in " ';[]{}<>"): + continue + d[k] = parse(v) + return d + return parse(string) + + +def stripnull(string, null=b"\x00"): + """Return string truncated at first null character. + + Clean NULL terminated C strings. For unicode strings use null='\\0'. + + >>> stripnull(b'string\\x00') + b'string' + >>> stripnull('string\\x00', null='\\0') + 'string' + + """ + i = string.find(null) + return string if (i < 0) else string[:i] + + +def stripascii(string): + """Return string truncated at last byte that is 7-bit ASCII. + + Clean NULL separated and terminated TIFF strings. + + >>> stripascii(b'string\\x00string\\n\\x01\\x00') + b'string\\x00string\\n' + >>> stripascii(b'\\x00') + b'' + + """ + # TODO: pythonize this + i = len(string) + while i: + i -= 1 + if 8 < byte2int(string[i]) < 127: + break + else: + i = -1 + return string[: i + 1] + + +def asbool(value, true=(b"true", "true"), false=(b"false", "false")): + """Return string as bool if possible, else raise TypeError. + + >>> asbool(b' False ') + False + + """ + value = value.strip().lower() + if value in true: # might raise UnicodeWarning/BytesWarning + return True + if value in false: + return False + raise TypeError() + + +def astype(value, types=None): + """Return argument as one of types if possible. + + >>> astype('42') + 42 + >>> astype('3.14') + 3.14 + >>> astype('True') + True + >>> astype(b'Neee-Wom') + 'Neee-Wom' + + """ + if types is None: + types = int, float, asbool, bytes2str + for typ in types: + try: + return typ(value) + except (ValueError, AttributeError, TypeError, UnicodeEncodeError): + pass + return value + + +def format_size(size, threshold=1536): + """Return file size as string from byte size. + + >>> format_size(1234) + '1234 B' + >>> format_size(12345678901) + '11.50 GiB' + + """ + if size < threshold: + return "%i B" % size + for unit in ("KiB", "MiB", "GiB", "TiB", "PiB"): + size /= 1024.0 + if size < threshold: + return "%.2f %s" % (size, unit) + + +def identityfunc(arg): + """Single argument identity function. + + >>> identityfunc('arg') + 'arg' + + """ + return arg + + +def nullfunc(*args, **kwargs): + """Null function. + + >>> nullfunc('arg', kwarg='kwarg') + + """ + return + + +def sequence(value): + """Return tuple containing value if value is not a sequence. + + >>> sequence(1) + (1,) + >>> sequence([1]) + [1] + + """ + try: + len(value) + return value + except TypeError: + return (value,) + + +def product(iterable): + """Return product of sequence of numbers. + + Equivalent of functools.reduce(operator.mul, iterable, 1). + Multiplying numpy integers might overflow. + + >>> product([2**8, 2**30]) + 274877906944 + >>> product([]) + 1 + + """ + prod = 1 + for i in iterable: + prod *= i + return prod + + +def natural_sorted(iterable): + """Return human sorted list of strings. + + E.g. for sorting file names. + + >>> natural_sorted(['f1', 'f2', 'f10']) + ['f1', 'f2', 'f10'] + + """ + + def sortkey(x): + return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] + + numbers = re.compile(r"(\d+)") + return sorted(iterable, key=sortkey) + + +def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)): + """Return datetime object from timestamp in Excel serial format. + + Convert LSM time stamps. + + >>> excel_datetime(40237.029999999795) + datetime.datetime(2010, 2, 28, 0, 43, 11, 999982) + + """ + return epoch + datetime.timedelta(timestamp) + + +def julian_datetime(julianday, milisecond=0): + """Return datetime from days since 1/1/4713 BC and ms since midnight. + + Convert Julian dates according to MetaMorph. + + >>> julian_datetime(2451576, 54362783) + datetime.datetime(2000, 2, 2, 15, 6, 2, 783) + + """ + if julianday <= 1721423: + # no datetime before year 1 + return None + + a = julianday + 1 + if a > 2299160: + alpha = math.trunc((a - 1867216.25) / 36524.25) + a += 1 + alpha - alpha // 4 + b = a + (1524 if a > 1721423 else 1158) + c = math.trunc((b - 122.1) / 365.25) + d = math.trunc(365.25 * c) + e = math.trunc((b - d) / 30.6001) + + day = b - d - math.trunc(30.6001 * e) + month = e - (1 if e < 13.5 else 13) + year = c - (4716 if month > 2.5 else 4715) + + hour, milisecond = divmod(milisecond, 1000 * 60 * 60) + minute, milisecond = divmod(milisecond, 1000 * 60) + second, milisecond = divmod(milisecond, 1000) + + return datetime.datetime(year, month, day, hour, minute, second, milisecond) + + +def byteorder_isnative(byteorder): + """Return if byteorder matches the system's byteorder. + + >>> byteorder_isnative('=') + True + + """ + if byteorder == "=" or byteorder == sys.byteorder: + return True + keys = {"big": ">", "little": "<"} + return keys.get(byteorder, byteorder) == keys[sys.byteorder] + + +def recarray2dict(recarray): + """Return numpy.recarray as dict.""" + # TODO: subarrays + result = {} + for descr, value in zip(recarray.dtype.descr, recarray): + name, dtype = descr[:2] + if dtype[1] == "S": + value = bytes2str(stripnull(value)) + elif value.ndim < 2: + value = value.tolist() + result[name] = value + return result + + +def xml2dict(xml, sanitize=True, prefix=None): + """Return XML as dict. + + >>> xml2dict('1') + {'root': {'key': 1, 'attr': 'name'}} + + """ + from xml.etree import cElementTree as etree # delayed import + + at = tx = "" + if prefix: + at, tx = prefix + + def astype(value): + # return value as int, float, bool, or str + for t in (int, float, asbool): + try: + return t(value) + except Exception: + pass + return value + + def etree2dict(t): + # adapted from https://stackoverflow.com/a/10077069/453463 + key = t.tag + if sanitize: + key = key.rsplit("}", 1)[-1] + d = {key: {} if t.attrib else None} + children = list(t) + if children: + dd = collections.defaultdict(list) + for dc in map(etree2dict, children): + for k, v in dc.items(): + dd[k].append(astype(v)) + d = { + key: { + k: astype(v[0]) if len(v) == 1 else astype(v) for k, v in dd.items() + } + } + if t.attrib: + d[key].update((at + k, astype(v)) for k, v in t.attrib.items()) + if t.text: + text = t.text.strip() + if children or t.attrib: + if text: + d[key][tx + "value"] = astype(text) + else: + d[key] = astype(text) + return d + + return etree2dict(etree.fromstring(xml)) + + +def hexdump(bytestr, width=75, height=24, snipat=-2, modulo=2, ellipsis="..."): + """Return hexdump representation of byte string. + + >>> hexdump(binascii.unhexlify('49492a00080000000e00fe0004000100')) + '49 49 2a 00 08 00 00 00 0e 00 fe 00 04 00 01 00 II*.............' + + """ + size = len(bytestr) + if size < 1 or width < 2 or height < 1: + return "" + if height == 1: + addr = b"" + bytesperline = min(modulo * (((width - len(addr)) // 4) // modulo), size) + if bytesperline < 1: + return "" + nlines = 1 + else: + addr = b"%%0%ix: " % len(b"%x" % size) + bytesperline = min(modulo * (((width - len(addr % 1)) // 4) // modulo), size) + if bytesperline < 1: + return "" + width = 3 * bytesperline + len(addr % 1) + nlines = (size - 1) // bytesperline + 1 + + if snipat is None or snipat == 1: + snipat = height + elif 0 < abs(snipat) < 1: + snipat = int(math.floor(height * snipat)) + if snipat < 0: + snipat += height + + if height == 1 or nlines == 1: + blocks = [(0, bytestr[:bytesperline])] + addr = b"" + height = 1 + width = 3 * bytesperline + elif height is None or nlines <= height: + blocks = [(0, bytestr)] + elif snipat <= 0: + start = bytesperline * (nlines - height) + blocks = [(start, bytestr[start:])] # (start, None) + elif snipat >= height or height < 3: + end = bytesperline * height + blocks = [(0, bytestr[:end])] # (end, None) + else: + end1 = bytesperline * snipat + end2 = bytesperline * (height - snipat - 1) + blocks = [ + (0, bytestr[:end1]), + (size - end1 - end2, None), + (size - end2, bytestr[size - end2 :]), + ] + + ellipsis = str2bytes(ellipsis) + result = [] + for start, bytestr in blocks: + if bytestr is None: + result.append(ellipsis) # 'skip %i bytes' % start) + continue + hexstr = binascii.hexlify(bytestr) + strstr = re.sub(rb"[^\x20-\x7f]", b".", bytestr) + for i in range(0, len(bytestr), bytesperline): + h = hexstr[2 * i : 2 * i + bytesperline * 2] + r = (addr % (i + start)) if height > 1 else addr + r += b" ".join(h[i : i + 2] for i in range(0, 2 * bytesperline, 2)) + r += b" " * (width - len(r)) + r += strstr[i : i + bytesperline] + result.append(r) + result = b"\n".join(result) + if sys.version_info[0] == 3: + result = result.decode("ascii") + return result + + +def isprintable(string): + """Return if all characters in string are printable. + + >>> isprintable('abc') + True + >>> isprintable(b'\01') + False + + """ + string = string.strip() + if len(string) < 1: + return True + if sys.version_info[0] == 3: + try: + return string.isprintable() + except Exception: + pass + try: + return string.decode("utf-8").isprintable() + except Exception: + pass + else: + if string.isalnum(): + return True + printable = ( + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST" + "UVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c" + ) + return all(c in printable for c in string) + + +def clean_whitespace(string, compact=False): + """Return string with compressed whitespace.""" + for a, b in ( + ("\r\n", "\n"), + ("\r", "\n"), + ("\n\n", "\n"), + ("\t", " "), + (" ", " "), + ): + string = string.replace(a, b) + if compact: + for a, b in (("\n", " "), ("[ ", "["), (" ", " "), (" ", " "), (" ", " ")): + string = string.replace(a, b) + return string.strip() + + +def pformat_xml(xml): + """Return pretty formatted XML.""" + try: + import lxml.etree as etree # delayed import + + if not isinstance(xml, bytes): + xml = xml.encode("utf-8") + xml = etree.parse(io.BytesIO(xml)) + xml = etree.tostring( + xml, pretty_print=True, xml_declaration=True, encoding=xml.docinfo.encoding + ) + xml = bytes2str(xml) + except Exception: + if isinstance(xml, bytes): + xml = bytes2str(xml) + xml = xml.replace("><", ">\n<") + return xml.replace(" ", " ").replace("\t", " ") + + +def pformat(arg, width=79, height=24, compact=True): + """Return pretty formatted representation of object as string. + + Whitespace might be altered. + + """ + if height is None or height < 1: + height = 1024 + if width is None or width < 1: + width = 256 + + npopt = numpy.get_printoptions() + numpy.set_printoptions(threshold=100, linewidth=width) + + if isinstance(arg, basestring): + if arg[:5].lower() in (" height: + arg = "\n".join(argl[: height // 2] + ["..."] + argl[-height // 2 :]) + return arg + + +def snipstr(string, width=79, snipat=0.5, ellipsis="..."): + """Return string cut to specified length. + + >>> snipstr('abcdefghijklmnop', 8) + 'abc...op' + + """ + if ellipsis is None: + if isinstance(string, bytes): + ellipsis = b"..." + else: + ellipsis = "\u2026" # does not print on win-py3.5 + esize = len(ellipsis) + + splitlines = string.splitlines() + # TODO: finish and test multiline snip + + result = [] + for line in splitlines: + if line is None: + result.append(ellipsis) + continue + linelen = len(line) + if linelen <= width: + result.append(string) + continue + + split = snipat + if split is None or split == 1: + split = linelen + elif 0 < abs(split) < 1: + split = int(math.floor(linelen * split)) + if split < 0: + split += linelen + if split < 0: + split = 0 + + if esize == 0 or width < esize + 1: + if split <= 0: + result.append(string[-width:]) + else: + result.append(string[:width]) + elif split <= 0: + result.append(ellipsis + string[esize - width :]) + elif split >= linelen or width < esize + 4: + result.append(string[: width - esize] + ellipsis) + else: + splitlen = linelen - width + esize + end1 = split - splitlen // 2 + end2 = end1 + splitlen + result.append(string[:end1] + ellipsis + string[end2:]) + + if isinstance(string, bytes): + return b"\n".join(result) + else: + return "\n".join(result) + + +def enumarg(enum, arg): + """Return enum member from its name or value. + + >>> enumarg(TIFF.PHOTOMETRIC, 2) + + >>> enumarg(TIFF.PHOTOMETRIC, 'RGB') + + + """ + try: + return enum(arg) + except Exception: + try: + return enum[arg.upper()] + except Exception: + raise ValueError("invalid argument %s" % arg) + + +def parse_kwargs(kwargs, *keys, **keyvalues): + """Return dict with keys from keys|keyvals and values from kwargs|keyvals. + + Existing keys are deleted from kwargs. + + >>> kwargs = {'one': 1, 'two': 2, 'four': 4} + >>> kwargs2 = parse_kwargs(kwargs, 'two', 'three', four=None, five=5) + >>> kwargs == {'one': 1} + True + >>> kwargs2 == {'two': 2, 'four': 4, 'five': 5} + True + + """ + result = {} + for key in keys: + if key in kwargs: + result[key] = kwargs[key] + del kwargs[key] + for key, value in keyvalues.items(): + if key in kwargs: + result[key] = kwargs[key] + del kwargs[key] + else: + result[key] = value + return result + + +def update_kwargs(kwargs, **keyvalues): + """Update dict with keys and values if keys do not already exist. + + >>> kwargs = {'one': 1, } + >>> update_kwargs(kwargs, one=None, two=2) + >>> kwargs == {'one': 1, 'two': 2} + True + + """ + for key, value in keyvalues.items(): + if key not in kwargs: + kwargs[key] = value + + +def validate_jhove(filename, jhove="jhove", ignore=("More than 50 IFDs",)): + """Validate TIFF file using jhove -m TIFF-hul. + + Raise ValueError if jhove outputs an error message unless the message + contains one of the strings in 'ignore'. + + JHOVE does not support bigtiff or more than 50 IFDs. + + See `JHOVE TIFF-hul Module `_ + + """ + import subprocess # noqa: delayed import + + out = subprocess.check_output([jhove, filename, "-m", "TIFF-hul"]) + if b"ErrorMessage: " in out: + for line in out.splitlines(): + line = line.strip() + if line.startswith(b"ErrorMessage: "): + error = line[14:].decode("utf8") + for i in ignore: + if i in error: + break + else: + raise ValueError(error) + break + + +def lsm2bin(lsmfile, binfile=None, tile=(256, 256), verbose=True): + """Convert [MP]TZCYX LSM file to series of BIN files. + + One BIN file containing 'ZCYX' data are created for each position, time, + and tile. The position, time, and tile indices are encoded at the end + of the filenames. + + """ + verbose = print_ if verbose else nullfunc + + if binfile is None: + binfile = lsmfile + elif binfile.lower() == "none": + binfile = None + if binfile: + binfile += "_(z%ic%iy%ix%i)_m%%ip%%it%%03iy%%ix%%i.bin" + + verbose("\nOpening LSM file... ", end="", flush=True) + start_time = time.time() + + with TiffFile(lsmfile) as lsm: + if not lsm.is_lsm: + verbose("\n", lsm, flush=True) + raise ValueError("not a LSM file") + series = lsm.series[0] # first series contains the image data + shape = series.shape + axes = series.axes + dtype = series.dtype + size = product(shape) * dtype.itemsize + + verbose("%.3f s" % (time.time() - start_time)) + # verbose(lsm, flush=True) + verbose( + "Image\n axes: %s\n shape: %s\n dtype: %s\n size: %s" + % (axes, shape, dtype, format_size(size)), + flush=True, + ) + if not series.axes.endswith("TZCYX"): + raise ValueError("not a *TZCYX LSM file") + + verbose("Copying image from LSM to BIN files", end="", flush=True) + start_time = time.time() + tiles = shape[-2] // tile[-2], shape[-1] // tile[-1] + if binfile: + binfile = binfile % (shape[-4], shape[-3], tile[0], tile[1]) + shape = (1,) * (7 - len(shape)) + shape + # cache for ZCYX stacks and output files + data = numpy.empty(shape[3:], dtype=dtype) + out = numpy.empty((shape[-4], shape[-3], tile[0], tile[1]), dtype=dtype) + # iterate over Tiff pages containing data + pages = iter(series.pages) + for m in range(shape[0]): # mosaic axis + for p in range(shape[1]): # position axis + for t in range(shape[2]): # time axis + for z in range(shape[3]): # z slices + data[z] = next(pages).asarray() + for y in range(tiles[0]): # tile y + for x in range(tiles[1]): # tile x + out[:] = data[ + ..., + y * tile[0] : (y + 1) * tile[0], + x * tile[1] : (x + 1) * tile[1], + ] + if binfile: + out.tofile(binfile % (m, p, t, y, x)) + verbose(".", end="", flush=True) + verbose(" %.3f s" % (time.time() - start_time)) + + +def imshow( + data, + title=None, + vmin=0, + vmax=None, + cmap=None, + bitspersample=None, + photometric="RGB", + interpolation=None, + dpi=96, + figure=None, + subplot=111, + maxdim=32768, + **kwargs +): + """Plot n-dimensional images using matplotlib.pyplot. + + Return figure, subplot and plot axis. + Requires pyplot already imported C{from matplotlib import pyplot}. + + Parameters + ---------- + bitspersample : int or None + Number of bits per channel in integer RGB images. + photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'} + The color space of the image data. + title : str + Window and subplot title. + figure : matplotlib.figure.Figure (optional). + Matplotlib to use for plotting. + subplot : int + A matplotlib.pyplot.subplot axis. + maxdim : int + maximum image width and length. + kwargs : optional + Arguments for matplotlib.pyplot.imshow. + + """ + isrgb = photometric in ("RGB",) # 'PALETTE', 'YCBCR' + if data.dtype.kind == "b": + isrgb = False + if isrgb and not ( + data.shape[-1] in (3, 4) or (data.ndim > 2 and data.shape[-3] in (3, 4)) + ): + isrgb = False + photometric = "MINISBLACK" + + data = data.squeeze() + if photometric in ("MINISWHITE", "MINISBLACK", None): + data = reshape_nd(data, 2) + else: + data = reshape_nd(data, 3) + + dims = data.ndim + if dims < 2: + raise ValueError("not an image") + elif dims == 2: + dims = 0 + isrgb = False + else: + if isrgb and data.shape[-3] in (3, 4): + data = numpy.swapaxes(data, -3, -2) + data = numpy.swapaxes(data, -2, -1) + elif not isrgb and ( + data.shape[-1] < data.shape[-2] // 8 + and data.shape[-1] < data.shape[-3] // 8 + and data.shape[-1] < 5 + ): + data = numpy.swapaxes(data, -3, -1) + data = numpy.swapaxes(data, -2, -1) + isrgb = isrgb and data.shape[-1] in (3, 4) + dims -= 3 if isrgb else 2 + + if isrgb: + data = data[..., :maxdim, :maxdim, :maxdim] + else: + data = data[..., :maxdim, :maxdim] + + if photometric == "PALETTE" and isrgb: + datamax = data.max() + if datamax > 255: + data = data >> 8 # possible precision loss + data = data.astype("B") + elif data.dtype.kind in "ui": + if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: + try: + bitspersample = int(math.ceil(math.log(data.max(), 2))) + except Exception: + bitspersample = data.dtype.itemsize * 8 + elif not isinstance(bitspersample, inttypes): + # bitspersample can be tuple, e.g. (5, 6, 5) + bitspersample = data.dtype.itemsize * 8 + datamax = 2**bitspersample + if isrgb: + if bitspersample < 8: + data = data << (8 - bitspersample) + elif bitspersample > 8: + data = data >> (bitspersample - 8) # precision loss + data = data.astype("B") + elif data.dtype.kind == "f": + datamax = data.max() + if isrgb and datamax > 1.0: + if data.dtype.char == "d": + data = data.astype("f") + data /= datamax + else: + data = data / datamax + elif data.dtype.kind == "b": + datamax = 1 + elif data.dtype.kind == "c": + data = numpy.absolute(data) + datamax = data.max() + + if not isrgb: + if vmax is None: + vmax = datamax + if vmin is None: + if data.dtype.kind == "i": + dtmin = numpy.iinfo(data.dtype).min + vmin = numpy.min(data) + if vmin == dtmin: + vmin = numpy.min(data > dtmin) + if data.dtype.kind == "f": + dtmin = numpy.finfo(data.dtype).min + vmin = numpy.min(data) + if vmin == dtmin: + vmin = numpy.min(data > dtmin) + else: + vmin = 0 + + pyplot = sys.modules["matplotlib.pyplot"] + + if figure is None: + pyplot.rc("font", family="sans-serif", weight="normal", size=8) + figure = pyplot.figure( + dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor="1.0", edgecolor="w" + ) + try: + figure.canvas.manager.window.title(title) + except Exception: + pass + size = len(title.splitlines()) if title else 1 + pyplot.subplots_adjust( + bottom=0.03 * (dims + 2), + top=0.98 - size * 0.03, + left=0.1, + right=0.95, + hspace=0.05, + wspace=0.0, + ) + subplot = pyplot.subplot(subplot) + + if title: + try: + title = unicode(title, "Windows-1252") + except TypeError: + pass + pyplot.title(title, size=11) + + if cmap is None: + if data.dtype.char == "?": + cmap = "gray" + elif data.dtype.kind in "buf" or vmin == 0: + cmap = "viridis" + else: + cmap = "coolwarm" + if photometric == "MINISWHITE": + cmap += "_r" + + image = pyplot.imshow( + numpy.atleast_2d(data[(0,) * dims].squeeze()), + vmin=vmin, + vmax=vmax, + cmap=cmap, + interpolation=interpolation, + **kwargs + ) + + if not isrgb: + pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 + + def format_coord(x, y): + # callback function to format coordinate display in toolbar + x = int(x + 0.5) + y = int(y + 0.5) + try: + if dims: + return "%s @ %s [%4i, %4i]" % (curaxdat[1][y, x], current, y, x) + return "%s @ [%4i, %4i]" % (data[y, x], y, x) + except IndexError: + return "" + + def none(event): + return "" + + subplot.format_coord = format_coord + image.get_cursor_data = none + image.format_cursor_data = none + + if dims: + current = list((0,) * dims) + curaxdat = [0, data[tuple(current)].squeeze()] + sliders = [ + pyplot.Slider( + pyplot.axes([0.125, 0.03 * (axis + 1), 0.725, 0.025]), + "Dimension %i" % axis, + 0, + data.shape[axis] - 1, + 0, + facecolor="0.5", + valfmt="%%.0f [%i]" % data.shape[axis], + ) + for axis in range(dims) + ] + for slider in sliders: + slider.drawon = False + + def set_image(current, sliders=sliders, data=data): + # change image and redraw canvas + curaxdat[1] = data[tuple(current)].squeeze() + image.set_data(curaxdat[1]) + for ctrl, index in zip(sliders, current): + ctrl.eventson = False + ctrl.set_val(index) + ctrl.eventson = True + figure.canvas.draw() + + def on_changed(index, axis, data=data, current=current): + # callback function for slider change event + index = int(round(index)) + curaxdat[0] = axis + if index == current[axis]: + return + if index >= data.shape[axis]: + index = 0 + elif index < 0: + index = data.shape[axis] - 1 + current[axis] = index + set_image(current) + + def on_keypressed(event, data=data, current=current): + # callback function for key press event + key = event.key + axis = curaxdat[0] + if str(key) in "0123456789": + on_changed(key, axis) + elif key == "right": + on_changed(current[axis] + 1, axis) + elif key == "left": + on_changed(current[axis] - 1, axis) + elif key == "up": + curaxdat[0] = 0 if axis == len(data.shape) - 1 else axis + 1 + elif key == "down": + curaxdat[0] = len(data.shape) - 1 if axis == 0 else axis - 1 + elif key == "end": + on_changed(data.shape[axis] - 1, axis) + elif key == "home": + on_changed(0, axis) + + figure.canvas.mpl_connect("key_press_event", on_keypressed) + for axis, ctrl in enumerate(sliders): + ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) + + return figure, subplot, image + + +def _app_show(): + """Block the GUI. For use as skimage plugin.""" + pyplot = sys.modules["matplotlib.pyplot"] + pyplot.show() + + +def askopenfilename(**kwargs): + """Return file name(s) from Tkinter's file open dialog.""" + try: + from Tkinter import Tk + import tkFileDialog as filedialog + except ImportError: + from tkinter import Tk, filedialog + root = Tk() + root.withdraw() + root.update() + filenames = filedialog.askopenfilename(**kwargs) + root.destroy() + return filenames + + +def main(argv=None): + """Command line usage main function.""" + if float(sys.version[0:3]) < 2.7: + print("This script requires Python version 2.7 or better.") + print("This is Python version %s" % sys.version) + return 0 + if argv is None: + argv = sys.argv + + import optparse # TODO: use argparse + + parser = optparse.OptionParser( + usage="usage: %prog [options] path", + description="Display image data in TIFF files.", + version="%%prog %s" % __version__, + ) + opt = parser.add_option + opt("-p", "--page", dest="page", type="int", default=-1, help="display single page") + opt( + "-s", + "--series", + dest="series", + type="int", + default=-1, + help="display series of pages of same shape", + ) + opt( + "--nomultifile", + dest="nomultifile", + action="store_true", + default=False, + help="do not read OME series from multiple files", + ) + opt( + "--noplots", + dest="noplots", + type="int", + default=8, + help="maximum number of plots", + ) + opt( + "--interpol", + dest="interpol", + metavar="INTERPOL", + default="bilinear", + help="image interpolation method", + ) + opt("--dpi", dest="dpi", type="int", default=96, help="plot resolution") + opt( + "--vmin", + dest="vmin", + type="int", + default=None, + help="minimum value for colormapping", + ) + opt( + "--vmax", + dest="vmax", + type="int", + default=None, + help="maximum value for colormapping", + ) + opt( + "--debug", + dest="debug", + action="store_true", + default=False, + help="raise exception on failures", + ) + opt( + "--doctest", + dest="doctest", + action="store_true", + default=False, + help="runs the docstring examples", + ) + opt("-v", "--detail", dest="detail", type="int", default=2) + opt("-q", "--quiet", dest="quiet", action="store_true") + + settings, path = parser.parse_args() + path = " ".join(path) + + if settings.doctest: + import doctest + + doctest.testmod(optionflags=doctest.ELLIPSIS) + return 0 + if not path: + path = askopenfilename( + title="Select a TIFF file", filetypes=TIFF.FILEOPEN_FILTER + ) + if not path: + parser.error("No file specified") + + if any(i in path for i in "?*"): + path = glob.glob(path) + if not path: + print("no files match the pattern") + return 0 + # TODO: handle image sequences + path = path[0] + + if not settings.quiet: + print("\nReading file structure...", end=" ") + start = time.time() + try: + tif = TiffFile(path, multifile=not settings.nomultifile) + except Exception as e: + if settings.debug: + raise + else: + print("\n", e) + sys.exit(0) + if not settings.quiet: + print("%.3f ms" % ((time.time() - start) * 1e3)) + + if tif.is_ome: + settings.norgb = True + + images = [] + if settings.noplots > 0: + if not settings.quiet: + print("Reading image data... ", end=" ") + + def notnone(x): + return next(i for i in x if i is not None) + + start = time.time() + try: + if settings.page >= 0: + images = [(tif.asarray(key=settings.page), tif[settings.page], None)] + elif settings.series >= 0: + images = [ + ( + tif.asarray(series=settings.series), + notnone(tif.series[settings.series]._pages), + tif.series[settings.series], + ) + ] + else: + images = [] + for i, s in enumerate(tif.series[: settings.noplots]): + try: + images.append( + (tif.asarray(series=i), notnone(s._pages), tif.series[i]) + ) + except ValueError as e: + images.append((None, notnone(s.pages), None)) + if settings.debug: + raise + else: + print("\nSeries %i failed: %s... " % (i, e), end="") + if not settings.quiet: + print("%.3f ms" % ((time.time() - start) * 1e3)) + except Exception as e: + if settings.debug: + raise + else: + print(e) + + if not settings.quiet: + print() + print(TiffFile.__str__(tif, detail=int(settings.detail))) + print() + tif.close() + + if images and settings.noplots > 0: + try: + import matplotlib + + matplotlib.use("TkAgg") + from matplotlib import pyplot + except ImportError as e: + warnings.warn("failed to import matplotlib.\n%s" % e) + else: + for img, page, series in images: + if img is None: + continue + vmin, vmax = settings.vmin, settings.vmax + if "GDAL_NODATA" in page.tags: + try: + vmin = numpy.min( + img[img > float(page.tags["GDAL_NODATA"].value)] + ) + except ValueError: + pass + if tif.is_stk: + try: + vmin = tif.stk_metadata["MinScale"] + vmax = tif.stk_metadata["MaxScale"] + except KeyError: + pass + else: + if vmax <= vmin: + vmin, vmax = settings.vmin, settings.vmax + if series: + title = "%s\n%s\n%s" % (str(tif), str(page), str(series)) + else: + title = "%s\n %s" % (str(tif), str(page)) + photometric = "MINISBLACK" + if page.photometric not in (3,): + photometric = TIFF.PHOTOMETRIC(page.photometric).name + imshow( + img, + title=title, + vmin=vmin, + vmax=vmax, + bitspersample=page.bitspersample, + photometric=photometric, + interpolation=settings.interpol, + dpi=settings.dpi, + ) + pyplot.show() + + +if sys.version_info[0] == 2: + inttypes = int, long # noqa + + def print_(*args, **kwargs): + """Print function with flush support.""" + flush = kwargs.pop("flush", False) + print(*args, **kwargs) + if flush: + sys.stdout.flush() + + def bytes2str(b, encoding=None, errors=None): + """Return string from bytes.""" + return b + + def str2bytes(s, encoding=None): + """Return bytes from string.""" + return s + + def byte2int(b): + """Return value of byte as int.""" + return ord(b) + + class FileNotFoundError(IOError): + pass + + TiffFrame = TiffPage # noqa +else: + inttypes = int + basestring = str, bytes + unicode = str + print_ = print + + def bytes2str(b, encoding=None, errors="strict"): + """Return unicode string from encoded bytes.""" + if encoding is not None: + return b.decode(encoding, errors) + try: + return b.decode("utf-8", errors) + except UnicodeDecodeError: + return b.decode("cp1252", errors) + + def str2bytes(s, encoding="cp1252"): + """Return bytes from unicode string.""" + return s.encode(encoding) + + def byte2int(b): + """Return value of byte as int.""" + return b + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/bsdf.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/bsdf.py new file mode 100644 index 0000000000000000000000000000000000000000..041d7e524366d312516b637481f51cd799d74d37 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/bsdf.py @@ -0,0 +1,324 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write BSDF files. + +Backend Library: internal + +The BSDF format enables reading and writing of image data in the +BSDF serialization format. This format allows storage of images, volumes, +and series thereof. Data can be of any numeric data type, and can +optionally be compressed. Each image/volume can have associated +meta data, which can consist of any data type supported by BSDF. + +By default, image data is lazily loaded; the actual image data is +not read until it is requested. This allows storing multiple images +in a single file and still have fast access to individual images. +Alternatively, a series of images can be read in streaming mode, reading +images as they are read (e.g. from http). + +BSDF is a simple generic binary format. It is easy to extend and there +are standard extension definitions for 2D and 3D image data. +Read more at http://bsdf.io. + + +Parameters +---------- +random_access : bool + Whether individual images in the file can be read in random order. + Defaults to True for normal files, and to False when reading from HTTP. + If False, the file is read in "streaming mode", allowing reading + files as they are read, but without support for "rewinding". + Note that setting this to True when reading from HTTP, the whole file + is read upon opening it (since lazy loading is not possible over HTTP). + +compression : int + Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib + compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2 + compression (more compact but slower). Default 1 (zlib). + Note that some BSDF implementations may not support compression + (e.g. JavaScript). + +""" + +import numpy as np + +from ..core import Format + + +def get_bsdf_serializer(options): + from . import _bsdf as bsdf + + class NDArrayExtension(bsdf.Extension): + """Copy of BSDF's NDArrayExtension but deal with lazy blobs.""" + + name = "ndarray" + cls = np.ndarray + + def encode(self, s, v): + return dict(shape=v.shape, dtype=str(v.dtype), data=v.tobytes()) + + def decode(self, s, v): + return v # return as dict, because of lazy blobs, decode in Image + + class ImageExtension(bsdf.Extension): + """We implement two extensions that trigger on the Image classes.""" + + def encode(self, s, v): + return dict(array=v.array, meta=v.meta) + + def decode(self, s, v): + return Image(v["array"], v["meta"]) + + class Image2DExtension(ImageExtension): + name = "image2d" + cls = Image2D + + class Image3DExtension(ImageExtension): + name = "image3d" + cls = Image3D + + exts = [NDArrayExtension, Image2DExtension, Image3DExtension] + serializer = bsdf.BsdfSerializer(exts, **options) + + return bsdf, serializer + + +class Image: + """Class in which we wrap the array and meta data. By using an extension + we can make BSDF trigger on these classes and thus encode the images. + as actual images. + """ + + def __init__(self, array, meta): + self.array = array + self.meta = meta + + def get_array(self): + if not isinstance(self.array, np.ndarray): + v = self.array + blob = v["data"] + if not isinstance(blob, bytes): # then it's a lazy bsdf.Blob + blob = blob.get_bytes() + self.array = np.frombuffer(blob, dtype=v["dtype"]) + self.array.shape = v["shape"] + return self.array + + def get_meta(self): + return self.meta + + +class Image2D(Image): + pass + + +class Image3D(Image): + pass + + +class BsdfFormat(Format): + """The BSDF format enables reading and writing of image data in the + BSDF serialization format. This format allows storage of images, volumes, + and series thereof. Data can be of any numeric data type, and can + optionally be compressed. Each image/volume can have associated + meta data, which can consist of any data type supported by BSDF. + + By default, image data is lazily loaded; the actual image data is + not read until it is requested. This allows storing multiple images + in a single file and still have fast access to individual images. + Alternatively, a series of images can be read in streaming mode, reading + images as they are read (e.g. from http). + + BSDF is a simple generic binary format. It is easy to extend and there + are standard extension definitions for 2D and 3D image data. + Read more at http://bsdf.io. + + Parameters for reading + ---------------------- + random_access : bool + Whether individual images in the file can be read in random order. + Defaults to True for normal files, and to False when reading from HTTP. + If False, the file is read in "streaming mode", allowing reading + files as they are read, but without support for "rewinding". + Note that setting this to True when reading from HTTP, the whole file + is read upon opening it (since lazy loading is not possible over HTTP). + + Parameters for saving + --------------------- + compression : {0, 1, 2} + Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib + compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2 + compression (more compact but slower). Default 1 (zlib). + Note that some BSDF implementations may not support compression + (e.g. JavaScript). + + """ + + def _can_read(self, request): + if request.mode[1] in (self.modes + "?"): + # if request.extension in self.extensions: + # return True + if request.firstbytes.startswith(b"BSDF"): + return True + + def _can_write(self, request): + if request.mode[1] in (self.modes + "?"): + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, random_access=None): + # Validate - we need a BSDF file consisting of a list of images + # The list is typically a stream, but does not have to be. + assert self.request.firstbytes[:4] == b"BSDF", "Not a BSDF file" + # self.request.firstbytes[5:6] == major and minor version + if not ( + self.request.firstbytes[6:15] == b"M\x07image2D" + or self.request.firstbytes[6:15] == b"M\x07image3D" + or self.request.firstbytes[6:7] == b"l" + ): + pass # Actually, follow a more duck-type approach ... + # raise RuntimeError('BSDF file does not look like an ' + # 'image container.') + # Set options. If we think that seeking is allowed, we lazily load + # blobs, and set streaming to False (i.e. the whole file is read, + # but we skip over binary blobs), so that we subsequently allow + # random access to the images. + # If seeking is not allowed (e.g. with a http request), we cannot + # lazily load blobs, but we can still load streaming from the web. + options = {} + if self.request.filename.startswith(("http://", "https://")): + ra = False if random_access is None else bool(random_access) + options["lazy_blob"] = False # Because we cannot seek now + options["load_streaming"] = not ra # Load as a stream? + else: + ra = True if random_access is None else bool(random_access) + options["lazy_blob"] = ra # Don't read data until needed + options["load_streaming"] = not ra + + file = self.request.get_file() + bsdf, self._serializer = get_bsdf_serializer(options) + self._stream = self._serializer.load(file) + # Another validation + if ( + isinstance(self._stream, dict) + and "meta" in self._stream + and "array" in self._stream + ): + self._stream = Image(self._stream["array"], self._stream["meta"]) + if not isinstance(self._stream, (Image, list, bsdf.ListStream)): + raise RuntimeError( + "BSDF file does not look seem to have an " "image container." + ) + + def _close(self): + pass + + def _get_length(self): + if isinstance(self._stream, Image): + return 1 + elif isinstance(self._stream, list): + return len(self._stream) + elif self._stream.count < 0: + return np.inf + return self._stream.count + + def _get_data(self, index): + # Validate + if index < 0 or index >= self.get_length(): + raise IndexError( + "Image index %i not in [0 %i]." % (index, self.get_length()) + ) + # Get Image object + if isinstance(self._stream, Image): + image_ob = self._stream # singleton + elif isinstance(self._stream, list): + # Easy when we have random access + image_ob = self._stream[index] + else: + # For streaming, we need to skip over frames + if index < self._stream.index: + raise IndexError( + "BSDF file is being read in streaming " + "mode, thus does not allow rewinding." + ) + while index > self._stream.index: + self._stream.next() + image_ob = self._stream.next() # Can raise StopIteration + # Is this an image? + if ( + isinstance(image_ob, dict) + and "meta" in image_ob + and "array" in image_ob + ): + image_ob = Image(image_ob["array"], image_ob["meta"]) + if isinstance(image_ob, Image): + # Return as array (if we have lazy blobs, they are read now) + return image_ob.get_array(), image_ob.get_meta() + else: + r = repr(image_ob) + r = r if len(r) < 200 else r[:197] + "..." + raise RuntimeError("BSDF file contains non-image " + r) + + def _get_meta_data(self, index): # pragma: no cover + return {} # This format does not support global meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, compression=1): + options = {"compression": compression} + bsdf, self._serializer = get_bsdf_serializer(options) + if self.request.mode[1] in "iv": + self._stream = None # Singleton image + self._written = False + else: + # Series (stream) of images + file = self.request.get_file() + self._stream = bsdf.ListStream() + self._serializer.save(file, self._stream) + + def _close(self): + # We close the stream here, which will mark the number of written + # elements. If we would not close it, the file would be fine, it's + # just that upon reading it would not be known how many items are + # in there. + if self._stream is not None: + self._stream.close(False) # False says "keep this a stream" + + def _append_data(self, im, meta): + # Determine dimension + ndim = None + if self.request.mode[1] in "iI": + ndim = 2 + elif self.request.mode[1] in "vV": + ndim = 3 + else: + ndim = 3 # Make an educated guess + if im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4): + ndim = 2 + # Validate shape + assert ndim in (2, 3) + if ndim == 2: + assert im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4) + else: + assert im.ndim == 3 or (im.ndim == 4 and im.shape[-1] <= 4) + # Wrap data and meta data in our special class that will trigger + # the BSDF image2D or image3D extension. + if ndim == 2: + ob = Image2D(im, meta) + else: + ob = Image3D(im, meta) + # Write directly or to stream + if self._stream is None: + assert not self._written, "Cannot write singleton image twice" + self._written = True + file = self.request.get_file() + self._serializer.save(file, ob) + else: + self._stream.append(ob) + + def set_meta_data(self, meta): # pragma: no cover + raise RuntimeError("The BSDF format only supports " "per-image meta data.") diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/dicom.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/dicom.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f366449c9a707ad792cbe61c3e46f343e665ff --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/dicom.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read DICOM files. + +Backend Library: internal + +A format for reading DICOM images: a common format used to store +medical image data, such as X-ray, CT and MRI. + +This format borrows some code (and ideas) from the pydicom project. However, +only a predefined subset of tags are extracted from the file. This allows +for great simplifications allowing us to make a stand-alone reader, and +also results in a much faster read time. + +By default, only uncompressed and deflated transfer syntaxes are supported. +If gdcm or dcmtk is installed, these will be used to automatically convert +the data. See https://github.com/malaterre/GDCM/releases for installing GDCM. + +This format provides functionality to group images of the same +series together, thus extracting volumes (and multiple volumes). +Using volread will attempt to yield a volume. If multiple volumes +are present, the first one is given. Using mimread will simply yield +all images in the given directory (not taking series into account). + +Parameters +---------- +progress : {True, False, BaseProgressIndicator} + Whether to show progress when reading from multiple files. + Default True. By passing an object that inherits from + BaseProgressIndicator, the way in which progress is reported + can be costumized. + +""" + +# todo: Use pydicom: +# * Note: is not py3k ready yet +# * Allow reading the full meta info +# I think we can more or less replace the SimpleDicomReader with a +# pydicom.Dataset For series, only ned to read the full info from one +# file: speed still high +# * Perhaps allow writing? + +import os +import sys +import logging +import subprocess + +from ..core import Format, BaseProgressIndicator, StdoutProgressIndicator +from ..core import read_n_bytes + +_dicom = None # lazily loaded in load_lib() + +logger = logging.getLogger(__name__) + + +def load_lib(): + global _dicom + from . import _dicom + + return _dicom + + +# Determine endianity of system +sys_is_little_endian = sys.byteorder == "little" + + +def get_dcmdjpeg_exe(): + fname = "dcmdjpeg" + ".exe" * sys.platform.startswith("win") + for dir in ( + "c:\\dcmtk", + "c:\\Program Files", + "c:\\Program Files\\dcmtk", + "c:\\Program Files (x86)\\dcmtk", + ): + filename = os.path.join(dir, fname) + if os.path.isfile(filename): + return [filename] + + try: + subprocess.check_call([fname, "--version"]) + return [fname] + except Exception: + return None + + +def get_gdcmconv_exe(): + fname = "gdcmconv" + ".exe" * sys.platform.startswith("win") + # Maybe it's on the path + try: + subprocess.check_call([fname, "--version"]) + return [fname, "--raw"] + except Exception: + pass + # Select directories where it could be + candidates = [] + base_dirs = [r"c:\Program Files"] + for base_dir in base_dirs: + if os.path.isdir(base_dir): + for dname in os.listdir(base_dir): + if dname.lower().startswith("gdcm"): + suffix = dname[4:].strip() + candidates.append((suffix, os.path.join(base_dir, dname))) + # Sort, so higher versions are tried earlier + candidates.sort(reverse=True) + # Select executable + filename = None + for _, dirname in candidates: + exe1 = os.path.join(dirname, "gdcmconv.exe") + exe2 = os.path.join(dirname, "bin", "gdcmconv.exe") + if os.path.isfile(exe1): + filename = exe1 + break + if os.path.isfile(exe2): + filename = exe2 + break + else: + return None + return [filename, "--raw"] + + +class DicomFormat(Format): + """See :mod:`imageio.plugins.dicom`""" + + def _can_read(self, request): + # If user URI was a directory, we check whether it has a DICOM file + if os.path.isdir(request.filename): + files = os.listdir(request.filename) + for fname in sorted(files): # Sorting make it consistent + filename = os.path.join(request.filename, fname) + if os.path.isfile(filename) and "DICOMDIR" not in fname: + with open(filename, "rb") as f: + first_bytes = read_n_bytes(f, 140) + return first_bytes[128:132] == b"DICM" + else: + return False + # Check + return request.firstbytes[128:132] == b"DICM" + + def _can_write(self, request): + # We cannot save yet. May be possible if we will used pydicom as + # a backend. + return False + + # -- + + class Reader(Format.Reader): + _compressed_warning_dirs = set() + + def _open(self, progress=True): + if not _dicom: + load_lib() + if os.path.isdir(self.request.filename): + # A dir can be given if the user used the format explicitly + self._info = {} + self._data = None + else: + # Read the given dataset now ... + try: + dcm = _dicom.SimpleDicomReader(self.request.get_file()) + except _dicom.CompressedDicom as err: + # We cannot do this on our own. Perhaps with some help ... + cmd = get_gdcmconv_exe() + if not cmd and "JPEG" in str(err): + cmd = get_dcmdjpeg_exe() + if not cmd: + msg = err.args[0].replace("using", "installing") + msg = msg.replace("convert", "auto-convert") + err.args = (msg,) + raise + else: + fname1 = self.request.get_local_filename() + fname2 = fname1 + ".raw" + try: + subprocess.check_call(cmd + [fname1, fname2]) + except Exception: + raise err + d = os.path.dirname(fname1) + if d not in self._compressed_warning_dirs: + self._compressed_warning_dirs.add(d) + logger.warning( + "DICOM file contained compressed data. " + + "Autoconverting with " + + cmd[0] + + " (this warning is shown once for each directory)" + ) + dcm = _dicom.SimpleDicomReader(fname2) + + self._info = dcm._info + self._data = dcm.get_numpy_array() + + # Initialize series, list of DicomSeries objects + self._series = None # only created if needed + + # Set progress indicator + if isinstance(progress, BaseProgressIndicator): + self._progressIndicator = progress + elif progress is True: + p = StdoutProgressIndicator("Reading DICOM") + self._progressIndicator = p + elif progress in (None, False): + self._progressIndicator = BaseProgressIndicator("Dummy") + else: + raise ValueError("Invalid value for progress.") + + def _close(self): + # Clean up + self._info = None + self._data = None + self._series = None + + @property + def series(self): + if self._series is None: + pi = self._progressIndicator + self._series = _dicom.process_directory(self.request, pi) + return self._series + + def _get_length(self): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + if self.request.mode[1] == "i": + # User expects one, but lets be honest about this file + return nslices + elif self.request.mode[1] == "I": + # User expects multiple, if this file has multiple slices, ok. + # Otherwise we have to check the series. + if nslices > 1: + return nslices + else: + return sum([len(serie) for serie in self.series]) + elif self.request.mode[1] == "v": + # User expects a volume, if this file has one, ok. + # Otherwise we have to check the series + if nslices > 1: + return 1 + else: + return len(self.series) # We assume one volume per series + elif self.request.mode[1] == "V": + # User expects multiple volumes. We have to check the series + return len(self.series) # We assume one volume per series + else: + raise RuntimeError("DICOM plugin should know what to expect.") + + def _get_slice_data(self, index): + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + # Allow index >1 only if this file contains >1 + if nslices > 1: + return self._data[index], self._info + elif index == 0: + return self._data, self._info + else: + raise IndexError("Dicom file contains only one slice.") + + def _get_data(self, index): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + if self.request.mode[1] == "i": + return self._get_slice_data(index) + elif self.request.mode[1] == "I": + # Return slice from volume, or return item from series + if index == 0 and nslices > 1: + return self._data[index], self._info + else: + L = [] + for serie in self.series: + L.extend([dcm_ for dcm_ in serie]) + return L[index].get_numpy_array(), L[index].info + elif self.request.mode[1] in "vV": + # Return volume or series + if index == 0 and nslices > 1: + return self._data, self._info + else: + return ( + self.series[index].get_numpy_array(), + self.series[index].info, + ) + # mode is `?` (typically because we are using V3). If there is a + # series (multiple files), index referrs to the element of the + # series and we read volumes. If there is no series, index + # referrs to the slice in the volume we read "flat" images. + elif len(self.series) > 1: + # mode is `?` and there are multiple series. Each series is a ndimage. + return ( + self.series[index].get_numpy_array(), + self.series[index].info, + ) + else: + # mode is `?` and there is only one series. Each slice is an ndimage. + return self._get_slice_data(index) + + def _get_meta_data(self, index): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + # Default is the meta data of the given file, or the "first" file. + if index is None: + return self._info + + if self.request.mode[1] == "i": + return self._info + elif self.request.mode[1] == "I": + # Return slice from volume, or return item from series + if index == 0 and nslices > 1: + return self._info + else: + L = [] + for serie in self.series: + L.extend([dcm_ for dcm_ in serie]) + return L[index].info + elif self.request.mode[1] in "vV": + # Return volume or series + if index == 0 and nslices > 1: + return self._info + else: + return self.series[index].info + else: # pragma: no cover + raise ValueError("DICOM plugin should know what to expect.") diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/feisem.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/feisem.py new file mode 100644 index 0000000000000000000000000000000000000000..af50768a3c5a904bd5fde6ea98feb942fba4dc10 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/feisem.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read TIFF from FEI SEM microscopes. + +Backend Library: internal + +This format is based on :mod:`TIFF `, and supports the +same parameters. FEI microscopes append metadata as ASCII text at the end of the +file, which this reader correctly extracts. + +Parameters +---------- +discard_watermark : bool + If True (default), discard the bottom rows of the image, which + contain no image data, only a watermark with metadata. +watermark_height : int + The height in pixels of the FEI watermark. The default is 70. + +See Also +-------- + :mod:`imageio.plugins.tifffile` + +""" + + +from .tifffile import TiffFormat + + +class FEISEMFormat(TiffFormat): + """See :mod:`imageio.plugins.feisem`""" + + def _can_write(self, request): + return False # FEI-SEM only supports reading + + class Reader(TiffFormat.Reader): + def _get_data(self, index=0, discard_watermark=True, watermark_height=70): + """Get image and metadata from given index. + + FEI images usually (always?) contain a watermark at the + bottom of the image, 70 pixels high. We discard this by + default as it does not contain any information not present + in the metadata. + """ + im, meta = super(FEISEMFormat.Reader, self)._get_data(index) + if discard_watermark: + im = im[:-watermark_height] + return im, meta + + def _get_meta_data(self, index=None): + """Read the metadata from an FEI SEM TIFF. + + This metadata is included as ASCII text at the end of the file. + + The index, if provided, is ignored. + + Returns + ------- + metadata : dict + Dictionary of metadata. + """ + if hasattr(self, "_fei_meta"): + return self._fei_meta + + md = {"root": {}} + current_tag = "root" + reading_metadata = False + filename = self.request.get_local_filename() + with open(filename, encoding="utf8", errors="ignore") as fin: + for line in fin: + if not reading_metadata: + if not line.startswith("Date="): + continue + else: + reading_metadata = True + line = line.rstrip() + if line.startswith("["): + current_tag = line.lstrip("[").rstrip("]") + md[current_tag] = {} + else: + if "=" in line: # ignore empty and irrelevant lines + key, val = line.split("=", maxsplit=1) + for tag_type in (int, float): + try: + val = tag_type(val) + except ValueError: + continue + else: + break + md[current_tag][key] = val + if not md["root"] and len(md) == 1: + raise ValueError("Input file %s contains no FEI metadata." % filename) + + self._fei_meta = md + return md diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/ffmpeg.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/ffmpeg.py new file mode 100644 index 0000000000000000000000000000000000000000..ce47323b517c8949762515afd3fee3dff4dca90e --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/ffmpeg.py @@ -0,0 +1,729 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read/Write video using FFMPEG + +.. note:: + We are in the process of (slowly) replacing this plugin with a new one that + is based on `pyav `_. It is faster and more + flexible than the plugin documented here. Check the :mod:`pyav + plugin's documentation ` for more information about + this plugin. + +Backend Library: https://github.com/imageio/imageio-ffmpeg + +.. note:: + To use this plugin you have to install its backend:: + + pip install imageio[ffmpeg] + + +The ffmpeg format provides reading and writing for a wide range of movie formats +such as .avi, .mpeg, .mp4, etc. as well as the ability to read streams from +webcams and USB cameras. It is based on ffmpeg and is inspired by/based `moviepy +`_ by Zulko. + +Parameters for reading +---------------------- +fps : scalar + The number of frames per second of the input stream. Default None (i.e. + read at the file's native fps). One can use this for files with a + variable fps, or in cases where imageio is unable to correctly detect + the fps. In case of trouble opening camera streams, it may help to set an + explicit fps value matching a framerate supported by the camera. +loop : bool + If True, the video will rewind as soon as a frame is requested + beyond the last frame. Otherwise, IndexError is raised. Default False. + Setting this to True will internally call ``count_frames()``, + and set the reader's length to that value instead of inf. +size : str | tuple + The frame size (i.e. resolution) to read the images, e.g. + (100, 100) or "640x480". For camera streams, this allows setting + the capture resolution. For normal video data, ffmpeg will + rescale the data. +dtype : str | type + The dtype for the output arrays. Determines the bit-depth that + is requested from ffmpeg. Supported dtypes: uint8, uint16. + Default: uint8. +pixelformat : str + The pixel format for the camera to use (e.g. "yuyv422" or + "gray"). The camera needs to support the format in order for + this to take effect. Note that the images produced by this + reader are always RGB. +input_params : list + List additional arguments to ffmpeg for input file options. + (Can also be provided as ``ffmpeg_params`` for backwards compatibility) + Example ffmpeg arguments to use aggressive error handling: + ['-err_detect', 'aggressive'] +output_params : list + List additional arguments to ffmpeg for output file options (i.e. the + stream being read by imageio). +print_info : bool + Print information about the video file as reported by ffmpeg. + +Parameters for writing +---------------------- +fps : scalar + The number of frames per second. Default 10. +codec : str + the video codec to use. Default 'libx264', which represents the + widely available mpeg4. Except when saving .wmv files, then the + defaults is 'msmpeg4' which is more commonly supported for windows +quality : float | None + Video output quality. Default is 5. Uses variable bit rate. Highest + quality is 10, lowest is 0. Set to None to prevent variable bitrate + flags to FFMPEG so you can manually specify them using output_params + instead. Specifying a fixed bitrate using 'bitrate' disables this + parameter. +bitrate : int | None + Set a constant bitrate for the video encoding. Default is None causing + 'quality' parameter to be used instead. Better quality videos with + smaller file sizes will result from using the 'quality' variable + bitrate parameter rather than specifying a fixed bitrate with this + parameter. +pixelformat: str + The output video pixel format. Default is 'yuv420p' which most widely + supported by video players. +input_params : list + List additional arguments to ffmpeg for input file options (i.e. the + stream that imageio provides). +output_params : list + List additional arguments to ffmpeg for output file options. + (Can also be provided as ``ffmpeg_params`` for backwards compatibility) + Example ffmpeg arguments to use only intra frames and set aspect ratio: + ['-intra', '-aspect', '16:9'] +ffmpeg_log_level: str + Sets ffmpeg output log level. Default is "warning". + Values can be "quiet", "panic", "fatal", "error", "warning", "info" + "verbose", or "debug". Also prints the FFMPEG command being used by + imageio if "info", "verbose", or "debug". +macro_block_size: int + Size constraint for video. Width and height, must be divisible by this + number. If not divisible by this number imageio will tell ffmpeg to + scale the image up to the next closest size + divisible by this number. Most codecs are compatible with a macroblock + size of 16 (default), some can go smaller (4, 8). To disable this + automatic feature set it to None or 1, however be warned many players + can't decode videos that are odd in size and some codecs will produce + poor results or fail. See https://en.wikipedia.org/wiki/Macroblock. +audio_path : str | None + Audio path of any audio that needs to be written. Defaults to nothing, + so no audio will be written. Please note, when writing shorter video + than the original, ffmpeg will not truncate the audio track; it + will maintain its original length and be longer than the video. +audio_codec : str | None + The audio codec to use. Defaults to nothing, but if an audio_path has + been provided ffmpeg will attempt to set a default codec. + +Notes +----- +If you are using anaconda and ``anaconda/ffmpeg`` you will not be able to +encode/decode H.264 (likely due to licensing concerns). If you need this +format on anaconda install ``conda-forge/ffmpeg`` instead. + +You can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force using a +specific ffmpeg executable. + +To get the number of frames before having read them all, you can use the +``reader.count_frames()`` method (the reader will then use +``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of frames, +note that this operation can take a few seconds on large files). Alternatively, +the number of frames can be estimated from the fps and duration in the meta data +(though these values themselves are not always present/reliable). + +""" + +import re +import sys +import time +import logging +import platform +import threading +import subprocess as sp +import imageio_ffmpeg + +import numpy as np + +from ..core import Format, image_as_uint + +logger = logging.getLogger(__name__) + +# Get camera format +if sys.platform.startswith("win"): + CAM_FORMAT = "dshow" # dshow or vfwcap +elif sys.platform.startswith("linux"): + CAM_FORMAT = "video4linux2" +elif sys.platform.startswith("darwin"): + CAM_FORMAT = "avfoundation" +else: # pragma: no cover + CAM_FORMAT = "unknown-cam-format" + + +def download(directory=None, force_download=False): # pragma: no cover + raise RuntimeError( + "imageio.ffmpeg.download() has been deprecated. " + "Use 'pip install imageio-ffmpeg' instead.'" + ) + + +# For backwards compatibility - we dont use this ourselves +def get_exe(): # pragma: no cover + """Wrapper for imageio_ffmpeg.get_ffmpeg_exe()""" + + return imageio_ffmpeg.get_ffmpeg_exe() + + +class FfmpegFormat(Format): + """Read/Write ImageResources using FFMPEG. + + See :mod:`imageio.plugins.ffmpeg` + """ + + def _can_read(self, request): + # Read from video stream? + # Note that we could write the _video flag here, but a user might + # select this format explicitly (and this code is not run) + if re.match(r"", request.filename): + return True + + # Read from file that we know? + if request.extension in self.extensions: + return True + + def _can_write(self, request): + if request.extension in self.extensions: + return True + + # -- + + class Reader(Format.Reader): + _frame_catcher = None + _read_gen = None + + def _get_cam_inputname(self, index): + if sys.platform.startswith("linux"): + return "/dev/" + self.request._video[1:-1] + + elif sys.platform.startswith("win"): + # Ask ffmpeg for list of dshow device names + ffmpeg_api = imageio_ffmpeg + cmd = [ + ffmpeg_api.get_ffmpeg_exe(), + "-list_devices", + "true", + "-f", + CAM_FORMAT, + "-i", + "dummy", + ] + # Set `shell=True` in sp.run to prevent popup of a command + # line window in frozen applications. Note: this would be a + # security vulnerability if user-input goes into the cmd. + # Note that the ffmpeg process returns with exit code 1 when + # using `-list_devices` (or `-list_options`), even if the + # command is successful, so we set `check=False` explicitly. + completed_process = sp.run( + cmd, + stdout=sp.PIPE, + stderr=sp.PIPE, + encoding="utf-8", + shell=True, + check=False, + ) + + # Return device name at index + try: + name = parse_device_names(completed_process.stderr)[index] + except IndexError: + raise IndexError("No ffdshow camera at index %i." % index) + return "video=%s" % name + + elif sys.platform.startswith("darwin"): + # Appears that newer ffmpeg builds don't support -list-devices + # on OS X. But you can directly open the camera by index. + name = str(index) + return name + + else: # pragma: no cover + return "??" + + def _open( + self, + loop=False, + size=None, + dtype=None, + pixelformat=None, + print_info=False, + ffmpeg_params=None, + input_params=None, + output_params=None, + fps=None, + ): + # Get generator functions + self._ffmpeg_api = imageio_ffmpeg + # Process input args + self._arg_loop = bool(loop) + if size is None: + self._arg_size = None + elif isinstance(size, tuple): + self._arg_size = "%ix%i" % size + elif isinstance(size, str) and "x" in size: + self._arg_size = size + else: + raise ValueError('FFMPEG size must be tuple of "NxM"') + if pixelformat is None: + pass + elif not isinstance(pixelformat, str): + raise ValueError("FFMPEG pixelformat must be str") + if dtype is None: + self._dtype = np.dtype("uint8") + else: + self._dtype = np.dtype(dtype) + allowed_dtypes = ["uint8", "uint16"] + if self._dtype.name not in allowed_dtypes: + raise ValueError( + "dtype must be one of: {}".format(", ".join(allowed_dtypes)) + ) + self._arg_pixelformat = pixelformat + self._arg_input_params = input_params or [] + self._arg_output_params = output_params or [] + self._arg_input_params += ffmpeg_params or [] # backward compat + # Write "_video"_arg - indicating webcam support + self.request._video = None + regex_match = re.match(r"", self.request.filename) + if regex_match: + self.request._video = self.request.filename + # Get local filename + if self.request._video: + index = int(regex_match.group(1)) + self._filename = self._get_cam_inputname(index) + else: + self._filename = self.request.get_local_filename() + # When passed to ffmpeg on command line, carets need to be escaped. + self._filename = self._filename.replace("^", "^^") + # Determine pixel format and depth + self._depth = 3 + if self._dtype.name == "uint8": + self._pix_fmt = "rgb24" + self._bytes_per_channel = 1 + else: + self._pix_fmt = "rgb48le" + self._bytes_per_channel = 2 + # Initialize parameters + self._pos = -1 + self._meta = {"plugin": "ffmpeg"} + self._lastread = None + + # Calculating this from fps and duration is not accurate, + # and calculating it exactly with ffmpeg_api.count_frames_and_secs + # takes too long to do for each video. But we need it for looping. + self._nframes = float("inf") + if self._arg_loop and not self.request._video: + self._nframes = self.count_frames() + self._meta["nframes"] = self._nframes + + # Specify input framerate? (only on macOS) + # Ideally we'd get the supported framerate from the metadata, but we get the + # metadata when we boot ffmpeg ... maybe we could refactor this so we can + # get the metadata beforehand, but for now we'll just give it 2 tries on MacOS, + # one with fps 30 and one with fps 15. + need_input_fps = need_output_fps = False + if self.request._video and platform.system().lower() == "darwin": + if "-framerate" not in str(self._arg_input_params): + need_input_fps = True + if not self.request.kwargs.get("fps", None): + need_output_fps = True + if need_input_fps: + self._arg_input_params.extend(["-framerate", str(float(30))]) + if need_output_fps: + self._arg_output_params.extend(["-r", str(float(30))]) + + # Start ffmpeg subprocess and get meta information + try: + self._initialize() + except IndexError: + # Specify input framerate again, this time different. + if need_input_fps: + self._arg_input_params[-1] = str(float(15)) + self._initialize() + else: + raise + + # For cameras, create thread that keeps reading the images + if self.request._video: + self._frame_catcher = FrameCatcher(self._read_gen) + + # For reference - but disabled, because it is inaccurate + # if self._meta["nframes"] == float("inf"): + # if self._meta.get("fps", 0) > 0: + # if self._meta.get("duration", 0) > 0: + # n = round(self._meta["duration"] * self._meta["fps"]) + # self._meta["nframes"] = int(n) + + def _close(self): + # First close the frame catcher, because we cannot close the gen + # if the frame catcher thread is using it + if self._frame_catcher is not None: + self._frame_catcher.stop_me() + self._frame_catcher = None + if self._read_gen is not None: + self._read_gen.close() + self._read_gen = None + + def count_frames(self): + """Count the number of frames. Note that this can take a few + seconds for large files. Also note that it counts the number + of frames in the original video and does not take a given fps + into account. + """ + # This would have been nice, but this does not work :( + # oargs = [] + # if self.request.kwargs.get("fps", None): + # fps = float(self.request.kwargs["fps"]) + # oargs += ["-r", "%.02f" % fps] + cf = self._ffmpeg_api.count_frames_and_secs + return cf(self._filename)[0] + + def _get_length(self): + return self._nframes # only not inf if loop is True + + def _get_data(self, index): + """Reads a frame at index. Note for coders: getting an + arbitrary frame in the video with ffmpeg can be painfully + slow if some decoding has to be done. This function tries + to avoid fectching arbitrary frames whenever possible, by + moving between adjacent frames.""" + # Modulo index (for looping) + if self._arg_loop and self._nframes < float("inf"): + index %= self._nframes + + if index == self._pos: + return self._lastread, dict(new=False) + elif index < 0: + raise IndexError("Frame index must be >= 0") + elif index >= self._nframes: + raise IndexError("Reached end of video") + else: + if (index < self._pos) or (index > self._pos + 100): + self._initialize(index) + else: + self._skip_frames(index - self._pos - 1) + result, is_new = self._read_frame() + self._pos = index + return result, dict(new=is_new) + + def _get_meta_data(self, index): + return self._meta + + def _initialize(self, index=0): + # Close the current generator, and thereby terminate its subprocess + if self._read_gen is not None: + self._read_gen.close() + + iargs = [] + oargs = [] + + # Create input args + iargs += self._arg_input_params + if self.request._video: + iargs += ["-f", CAM_FORMAT] + if self._arg_pixelformat: + iargs += ["-pix_fmt", self._arg_pixelformat] + if self._arg_size: + iargs += ["-s", self._arg_size] + elif index > 0: # re-initialize / seek + # Note: only works if we initialized earlier, and now have meta + # Some info here: https://trac.ffmpeg.org/wiki/Seeking + # There are two ways to seek, one before -i (input_params) and + # after (output_params). The former is fast, because it uses + # keyframes, the latter is slow but accurate. According to + # the article above, the fast method should also be accurate + # from ffmpeg version 2.1, however in version 4.1 our tests + # start failing again. Not sure why, but we can solve this + # by combining slow and fast. Seek the long stretch using + # the fast method, and seek the last 10s the slow way. + starttime = index / self._meta["fps"] + seek_slow = min(10, starttime) + seek_fast = starttime - seek_slow + # We used to have this epsilon earlier, when we did not use + # the slow seek. I don't think we need it anymore. + # epsilon = -1 / self._meta["fps"] * 0.1 + iargs += ["-ss", "%.06f" % (seek_fast)] + oargs += ["-ss", "%.06f" % (seek_slow)] + + # Output args, for writing to pipe + if self._arg_size: + oargs += ["-s", self._arg_size] + if self.request.kwargs.get("fps", None): + fps = float(self.request.kwargs["fps"]) + oargs += ["-r", "%.02f" % fps] + oargs += self._arg_output_params + + # Get pixelformat and bytes per pixel + pix_fmt = self._pix_fmt + bpp = self._depth * self._bytes_per_channel + + # Create generator + rf = self._ffmpeg_api.read_frames + self._read_gen = rf( + self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs + ) + + # Read meta data. This start the generator (and ffmpeg subprocess) + if self.request._video: + # With cameras, catch error and turn into IndexError + try: + meta = self._read_gen.__next__() + except IOError as err: + err_text = str(err) + if "darwin" in sys.platform: + if "Unknown input format: 'avfoundation'" in err_text: + err_text += ( + "Try installing FFMPEG using " + "home brew to get a version with " + "support for cameras." + ) + raise IndexError( + "No (working) camera at {}.\n\n{}".format( + self.request._video, err_text + ) + ) + else: + self._meta.update(meta) + elif index == 0: + self._meta.update(self._read_gen.__next__()) + else: + self._read_gen.__next__() # we already have meta data + + def _skip_frames(self, n=1): + """Reads and throws away n frames""" + for i in range(n): + self._read_gen.__next__() + self._pos += n + + def _read_frame(self): + # Read and convert to numpy array + w, h = self._meta["size"] + framesize = w * h * self._depth * self._bytes_per_channel + # t0 = time.time() + + # Read frame + if self._frame_catcher: # pragma: no cover - camera thing + s, is_new = self._frame_catcher.get_frame() + else: + s = self._read_gen.__next__() + is_new = True + + # Check + if len(s) != framesize: + raise RuntimeError( + "Frame is %i bytes, but expected %i." % (len(s), framesize) + ) + + result = np.frombuffer(s, dtype=self._dtype).copy() + result = result.reshape((h, w, self._depth)) + # t1 = time.time() + # print('etime', t1-t0) + + # Store and return + self._lastread = result + return result, is_new + + # -- + + class Writer(Format.Writer): + _write_gen = None + + def _open( + self, + fps=10, + codec="libx264", + bitrate=None, + pixelformat="yuv420p", + ffmpeg_params=None, + input_params=None, + output_params=None, + ffmpeg_log_level="quiet", + quality=5, + macro_block_size=16, + audio_path=None, + audio_codec=None, + ): + self._ffmpeg_api = imageio_ffmpeg + self._filename = self.request.get_local_filename() + self._pix_fmt = None + self._depth = None + self._size = None + + def _close(self): + if self._write_gen is not None: + self._write_gen.close() + self._write_gen = None + + def _append_data(self, im, meta): + # Get props of image + h, w = im.shape[:2] + size = w, h + depth = 1 if im.ndim == 2 else im.shape[2] + + # Ensure that image is in uint8 + im = image_as_uint(im, bitdepth=8) + # To be written efficiently, ie. without creating an immutable + # buffer, by calling im.tobytes() the array must be contiguous. + if not im.flags.c_contiguous: + # checkign the flag is a micro optimization. + # the image will be a numpy subclass. See discussion + # https://github.com/numpy/numpy/issues/11804 + im = np.ascontiguousarray(im) + + # Set size and initialize if not initialized yet + if self._size is None: + map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"} + self._pix_fmt = map.get(depth, None) + if self._pix_fmt is None: + raise ValueError("Image must have 1, 2, 3 or 4 channels") + self._size = size + self._depth = depth + self._initialize() + + # Check size of image + if size != self._size: + raise ValueError("All images in a movie should have same size") + if depth != self._depth: + raise ValueError( + "All images in a movie should have same " "number of channels" + ) + + assert self._write_gen is not None # Check status + + # Write. Yes, we can send the data in as a numpy array + self._write_gen.send(im) + + def set_meta_data(self, meta): + raise RuntimeError( + "The ffmpeg format does not support setting " "meta data." + ) + + def _initialize(self): + # Close existing generator + if self._write_gen is not None: + self._write_gen.close() + + # Get parameters + # Use None to let imageio-ffmpeg (or ffmpeg) select good results + fps = self.request.kwargs.get("fps", 10) + codec = self.request.kwargs.get("codec", None) + bitrate = self.request.kwargs.get("bitrate", None) + quality = self.request.kwargs.get("quality", None) + input_params = self.request.kwargs.get("input_params") or [] + output_params = self.request.kwargs.get("output_params") or [] + output_params += self.request.kwargs.get("ffmpeg_params") or [] + pixelformat = self.request.kwargs.get("pixelformat", None) + macro_block_size = self.request.kwargs.get("macro_block_size", 16) + ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None) + audio_path = self.request.kwargs.get("audio_path", None) + audio_codec = self.request.kwargs.get("audio_codec", None) + + macro_block_size = macro_block_size or 1 # None -> 1 + + # Create generator + self._write_gen = self._ffmpeg_api.write_frames( + self._filename, + self._size, + pix_fmt_in=self._pix_fmt, + pix_fmt_out=pixelformat, + fps=fps, + quality=quality, + bitrate=bitrate, + codec=codec, + macro_block_size=macro_block_size, + ffmpeg_log_level=ffmpeg_log_level, + input_params=input_params, + output_params=output_params, + audio_path=audio_path, + audio_codec=audio_codec, + ) + + # Seed the generator (this is where the ffmpeg subprocess starts) + self._write_gen.send(None) + + +class FrameCatcher(threading.Thread): + """Thread to keep reading the frame data from stdout. This is + useful when streaming from a webcam. Otherwise, if the user code + does not grab frames fast enough, the buffer will fill up, leading + to lag, and ffmpeg can also stall (experienced on Linux). The + get_frame() method always returns the last available image. + """ + + def __init__(self, gen): + self._gen = gen + self._frame = None + self._frame_is_new = False + self._lock = threading.RLock() + threading.Thread.__init__(self) + self.daemon = True # do not let this thread hold up Python shutdown + self._should_stop = False + self.start() + + def stop_me(self): + self._should_stop = True + while self.is_alive(): + time.sleep(0.001) + + def get_frame(self): + while self._frame is None: # pragma: no cover - an init thing + time.sleep(0.001) + with self._lock: + is_new = self._frame_is_new + self._frame_is_new = False # reset + return self._frame, is_new + + def run(self): + # This runs in the worker thread + try: + while not self._should_stop: + time.sleep(0) # give control to other threads + frame = self._gen.__next__() + with self._lock: + self._frame = frame + self._frame_is_new = True + except (StopIteration, EOFError): + pass + + +def parse_device_names(ffmpeg_output): + """Parse the output of the ffmpeg -list-devices command""" + # Collect device names - get [friendly_name, alt_name] of each + device_names = [] + in_video_devices = False + for line in ffmpeg_output.splitlines(): + if line.startswith("[dshow"): + logger.debug(line) + line = line.split("]", 1)[1].strip() + if in_video_devices and line.startswith('"'): + friendly_name = line[1:-1] + device_names.append([friendly_name, ""]) + elif in_video_devices and line.lower().startswith("alternative name"): + alt_name = line.split(" name ", 1)[1].strip()[1:-1] + if sys.platform.startswith("win"): + alt_name = alt_name.replace("&", "^&") # Tested to work + else: + alt_name = alt_name.replace("&", "\\&") # Does this work? + device_names[-1][-1] = alt_name + elif "video devices" in line: + in_video_devices = True + elif "devices" in line: + # set False for subsequent "devices" sections + in_video_devices = False + # Post-process, see #441 + # prefer friendly names, use alt name if two cams have same friendly name + device_names2 = [] + for friendly_name, alt_name in device_names: + if friendly_name not in device_names2: + device_names2.append(friendly_name) + elif alt_name: + device_names2.append(alt_name) + else: + device_names2.append(friendly_name) # duplicate, but not much we can do + return device_names2 diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/fits.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/fits.py new file mode 100644 index 0000000000000000000000000000000000000000..4617d1ea8c0cc2ce351151e700ff14651102685d --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/fits.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read FITS files. + +Backend Library: `Astropy `_ + +.. note:: + To use this plugin you have to install its backend:: + + pip install imageio[fits] + +Flexible Image Transport System (FITS) is an open standard defining a +digital file format useful for storage, transmission and processing of +scientific and other images. FITS is the most commonly used digital +file format in astronomy. + + +Parameters +---------- +cache : bool + If the file name is a URL, `~astropy.utils.data.download_file` is used + to open the file. This specifies whether or not to save the file + locally in Astropy's download cache (default: `True`). +uint : bool + Interpret signed integer data where ``BZERO`` is the + central value and ``BSCALE == 1`` as unsigned integer + data. For example, ``int16`` data with ``BZERO = 32768`` + and ``BSCALE = 1`` would be treated as ``uint16`` data. + + Note, for backward compatibility, the kwarg **uint16** may + be used instead. The kwarg was renamed when support was + added for integers of any size. +ignore_missing_end : bool + Do not issue an exception when opening a file that is + missing an ``END`` card in the last header. +checksum : bool or str + If `True`, verifies that both ``DATASUM`` and + ``CHECKSUM`` card values (when present in the HDU header) + match the header and data of all HDU's in the file. Updates to a + file that already has a checksum will preserve and update the + existing checksums unless this argument is given a value of + 'remove', in which case the CHECKSUM and DATASUM values are not + checked, and are removed when saving changes to the file. +disable_image_compression : bool, optional + If `True`, treats compressed image HDU's like normal + binary table HDU's. +do_not_scale_image_data : bool + If `True`, image data is not scaled using BSCALE/BZERO values + when read. +ignore_blank : bool + If `True`, the BLANK keyword is ignored if present. +scale_back : bool + If `True`, when saving changes to a file that contained scaled + image data, restore the data to the original type and reapply the + original BSCALE/BZERO values. This could lead to loss of accuracy + if scaling back to integer values after performing floating point + operations on the data. + +""" + +from ..core import Format + +_fits = None # lazily loaded + + +def load_lib(): + global _fits + try: + from astropy.io import fits as _fits + except ImportError: + raise ImportError( + "The FITS format relies on the astropy package." + "Please refer to http://www.astropy.org/ " + "for further instructions." + ) + return _fits + + +class FitsFormat(Format): + """See :mod:`imageio.plugins.fits`""" + + def _can_read(self, request): + # We return True if ext matches, because this is the only plugin + # that can. If astropy is not installed, a useful error follows. + return request.extension in self.extensions + + def _can_write(self, request): + # No write support + return False + + # -- reader + + class Reader(Format.Reader): + def _open(self, cache=False, **kwargs): + if not _fits: + load_lib() + hdulist = _fits.open(self.request.get_file(), cache=cache, **kwargs) + + self._index = [] + allowed_hdu_types = (_fits.ImageHDU, _fits.PrimaryHDU, _fits.CompImageHDU) + for n, hdu in zip(range(len(hdulist)), hdulist): + if isinstance(hdu, allowed_hdu_types): + # Ignore (primary) header units with no data (use '.size' + # rather than '.data' to avoid actually loading the image): + if hdu.size > 0: + self._index.append(n) + self._hdulist = hdulist + + def _close(self): + self._hdulist.close() + + def _get_length(self): + return len(self._index) + + def _get_data(self, index): + # Get data + if index < 0 or index >= len(self._index): + raise IndexError("Index out of range while reading from fits") + im = self._hdulist[self._index[index]].data + # Return array and empty meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index + raise RuntimeError("The fits format does not support meta data.") diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/freeimage.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/freeimage.py new file mode 100644 index 0000000000000000000000000000000000000000..922899f88f1b68c5d2be8c4ff215383590fa7709 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/freeimage.py @@ -0,0 +1,404 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read/Write images using FreeImage. + +Backend Library: `FreeImage `_ + +.. note:: + To use this plugin you have to install its backend:: + + imageio_download_bin freeimage + + or you can download the backend using the function:: + + imageio.plugins.freeimage.download() + +Each Freeimage format has the ``flags`` keyword argument. See the `Freeimage +documentation `_ for more information. + +Parameters +---------- +flags : int + A freeimage-specific option. In most cases we provide explicit + parameters for influencing image reading. + +""" + +import numpy as np + +from ..core import Format, image_as_uint +from ..core.request import RETURN_BYTES +from ._freeimage import FNAME_PER_PLATFORM, IO_FLAGS, download, fi # noqa + +# todo: support files with only meta data + + +class FreeimageFormat(Format): + """See :mod:`imageio.plugins.freeimage`""" + + _modes = "i" + + def __init__(self, name, description, extensions=None, modes=None, *, fif=None): + super().__init__(name, description, extensions=extensions, modes=modes) + self._fif = fif + + @property + def fif(self): + return self._fif # Set when format is created + + def _can_read(self, request): + # Ask freeimage if it can read it, maybe ext missing + if fi.has_lib(): + if not hasattr(request, "_fif"): + try: + request._fif = fi.getFIF(request.filename, "r", request.firstbytes) + except Exception: # pragma: no cover + request._fif = -1 + if request._fif == self.fif: + return True + elif request._fif == 7 and self.fif == 14: + # PPM gets identified as PBM and PPM can read PBM + # see: https://github.com/imageio/imageio/issues/677 + return True + + def _can_write(self, request): + # Ask freeimage, because we are not aware of all formats + if fi.has_lib(): + if not hasattr(request, "_fif"): + try: + request._fif = fi.getFIF(request.filename, "w") + except ValueError: # pragma: no cover + if request.raw_uri == RETURN_BYTES: + request._fif = self.fif + else: + request._fif = -1 + if request._fif is self.fif: + return True + + # -- + + class Reader(Format.Reader): + def _get_length(self): + return 1 + + def _open(self, flags=0): + self._bm = fi.create_bitmap(self.request.filename, self.format.fif, flags) + self._bm.load_from_filename(self.request.get_local_filename()) + + def _close(self): + self._bm.close() + + def _get_data(self, index): + if index != 0: + raise IndexError("This format only supports singleton images.") + return self._bm.get_image_data(), self._bm.get_meta_data() + + def _get_meta_data(self, index): + if not (index is None or index == 0): + raise IndexError() + return self._bm.get_meta_data() + + # -- + + class Writer(Format.Writer): + def _open(self, flags=0): + self._flags = flags # Store flags for later use + self._bm = None + self._is_set = False # To prevent appending more than one image + self._meta = {} + + def _close(self): + # Set global meta data + self._bm.set_meta_data(self._meta) + # Write and close + self._bm.save_to_filename(self.request.get_local_filename()) + self._bm.close() + + def _append_data(self, im, meta): + # Check if set + if not self._is_set: + self._is_set = True + else: + raise RuntimeError( + "Singleton image; " "can only append image data once." + ) + # Pop unit dimension for grayscale images + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + # Lazy instantaion of the bitmap, we need image data + if self._bm is None: + self._bm = fi.create_bitmap( + self.request.filename, self.format.fif, self._flags + ) + self._bm.allocate(im) + # Set data + self._bm.set_image_data(im) + # There is no distinction between global and per-image meta data + # for singleton images + self._meta = meta + + def _set_meta_data(self, meta): + self._meta = meta + + +# Special plugins + +# todo: there is also FIF_LOAD_NOPIXELS, +# but perhaps that should be used with get_meta_data. + + +class FreeimageBmpFormat(FreeimageFormat): + """A BMP format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for saving + --------------------- + compression : bool + Whether to compress the bitmap using RLE when saving. Default False. + It seems this does not always work, but who cares, you should use + PNG anyway. + + """ + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, compression=False): + # Build flags from kwargs + flags = int(flags) + if compression: + flags |= IO_FLAGS.BMP_SAVE_RLE + else: + flags |= IO_FLAGS.BMP_DEFAULT + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + im = image_as_uint(im, bitdepth=8) + return FreeimageFormat.Writer._append_data(self, im, meta) + + +class FreeimagePngFormat(FreeimageFormat): + """A PNG format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + ignoregamma : bool + Avoid gamma correction. Default True. + + Parameters for saving + --------------------- + compression : {0, 1, 6, 9} + The compression factor. Higher factors result in more + compression at the cost of speed. Note that PNG compression is + always lossless. Default 9. + quantize : int + If specified, turn the given RGB or RGBA image in a paletted image + for more efficient storage. The value should be between 2 and 256. + If the value of 0 the image is not quantized. + interlaced : bool + Save using Adam7 interlacing. Default False. + """ + + class Reader(FreeimageFormat.Reader): + def _open(self, flags=0, ignoregamma=True): + # Build flags from kwargs + flags = int(flags) + if ignoregamma: + flags |= IO_FLAGS.PNG_IGNOREGAMMA + # Enter as usual, with modified flags + return FreeimageFormat.Reader._open(self, flags) + + # -- + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, compression=9, quantize=0, interlaced=False): + compression_map = { + 0: IO_FLAGS.PNG_Z_NO_COMPRESSION, + 1: IO_FLAGS.PNG_Z_BEST_SPEED, + 6: IO_FLAGS.PNG_Z_DEFAULT_COMPRESSION, + 9: IO_FLAGS.PNG_Z_BEST_COMPRESSION, + } + # Build flags from kwargs + flags = int(flags) + if interlaced: + flags |= IO_FLAGS.PNG_INTERLACED + try: + flags |= compression_map[compression] + except KeyError: + raise ValueError("Png compression must be 0, 1, 6, or 9.") + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + if str(im.dtype) == "uint16": + im = image_as_uint(im, bitdepth=16) + else: + im = image_as_uint(im, bitdepth=8) + FreeimageFormat.Writer._append_data(self, im, meta) + # Quantize? + q = int(self.request.kwargs.get("quantize", False)) + if not q: + pass + elif not (im.ndim == 3 and im.shape[-1] == 3): + raise ValueError("Can only quantize RGB images") + elif q < 2 or q > 256: + raise ValueError("PNG quantize param must be 2..256") + else: + bm = self._bm.quantize(0, q) + self._bm.close() + self._bm = bm + + +class FreeimageJpegFormat(FreeimageFormat): + """A JPEG format based on the Freeimage library. + + This format supports grayscale and RGB images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + exifrotate : bool + Automatically rotate the image according to the exif flag. + Default True. If 2 is given, do the rotation in Python instead + of freeimage. + quickread : bool + Read the image more quickly, at the expense of quality. + Default False. + + Parameters for saving + --------------------- + quality : scalar + The compression factor of the saved image (1..100), higher + numbers result in higher quality but larger file size. Default 75. + progressive : bool + Save as a progressive JPEG file (e.g. for images on the web). + Default False. + optimize : bool + On saving, compute optimal Huffman coding tables (can reduce a + few percent of file size). Default False. + baseline : bool + Save basic JPEG, without metadata or any markers. Default False. + + """ + + class Reader(FreeimageFormat.Reader): + def _open(self, flags=0, exifrotate=True, quickread=False): + # Build flags from kwargs + flags = int(flags) + if exifrotate and exifrotate != 2: + flags |= IO_FLAGS.JPEG_EXIFROTATE + if not quickread: + flags |= IO_FLAGS.JPEG_ACCURATE + # Enter as usual, with modified flags + return FreeimageFormat.Reader._open(self, flags) + + def _get_data(self, index): + im, meta = FreeimageFormat.Reader._get_data(self, index) + im = self._rotate(im, meta) + return im, meta + + def _rotate(self, im, meta): + """Use Orientation information from EXIF meta data to + orient the image correctly. Freeimage is also supposed to + support that, and I am pretty sure it once did, but now it + does not, so let's just do it in Python. + Edit: and now it works again, just leave in place as a fallback. + """ + if self.request.kwargs.get("exifrotate", None) == 2: + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(FreeimageFormat.Writer): + def _open( + self, flags=0, quality=75, progressive=False, optimize=False, baseline=False + ): + # Test quality + quality = int(quality) + if quality < 1 or quality > 100: + raise ValueError("JPEG quality should be between 1 and 100.") + # Build flags from kwargs + flags = int(flags) + flags |= quality + if progressive: + flags |= IO_FLAGS.JPEG_PROGRESSIVE + if optimize: + flags |= IO_FLAGS.JPEG_OPTIMIZE + if baseline: + flags |= IO_FLAGS.JPEG_BASELINE + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError("JPEG does not support alpha channel.") + im = image_as_uint(im, bitdepth=8) + return FreeimageFormat.Writer._append_data(self, im, meta) + + +class FreeimagePnmFormat(FreeimageFormat): + """A PNM format based on the Freeimage library. + + This format supports single bit (PBM), grayscale (PGM) and RGB (PPM) + images, even with ASCII or binary coding. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for saving + --------------------- + use_ascii : bool + Save with ASCII coding. Default True. + """ + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, use_ascii=True): + # Build flags from kwargs + flags = int(flags) + if use_ascii: + flags |= IO_FLAGS.PNM_SAVE_ASCII + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/freeimagemulti.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/freeimagemulti.py new file mode 100644 index 0000000000000000000000000000000000000000..bad53d40f9cdd979304982c4b2430d53b4872b27 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/freeimagemulti.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Plugin for multi-image freeimafe formats, like animated GIF and ico. +""" + +import logging +import numpy as np + +from ..core import Format, image_as_uint +from ._freeimage import fi, IO_FLAGS +from .freeimage import FreeimageFormat + +logger = logging.getLogger(__name__) + + +class FreeimageMulti(FreeimageFormat): + """Base class for freeimage formats that support multiple images.""" + + _modes = "iI" + _fif = -1 + + class Reader(Format.Reader): + def _open(self, flags=0): + flags = int(flags) + # Create bitmap + self._bm = fi.create_multipage_bitmap( + self.request.filename, self.format.fif, flags + ) + self._bm.load_from_filename(self.request.get_local_filename()) + + def _close(self): + self._bm.close() + + def _get_length(self): + return len(self._bm) + + def _get_data(self, index): + sub = self._bm.get_page(index) + try: + return sub.get_image_data(), sub.get_meta_data() + finally: + sub.close() + + def _get_meta_data(self, index): + index = index or 0 + if index < 0 or index >= len(self._bm): + raise IndexError() + sub = self._bm.get_page(index) + try: + return sub.get_meta_data() + finally: + sub.close() + + # -- + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0): + # Set flags + self._flags = flags = int(flags) + # Instantiate multi-page bitmap + self._bm = fi.create_multipage_bitmap( + self.request.filename, self.format.fif, flags + ) + self._bm.save_to_filename(self.request.get_local_filename()) + + def _close(self): + # Close bitmap + self._bm.close() + + def _append_data(self, im, meta): + # Prepare data + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + im = image_as_uint(im, bitdepth=8) + # Create sub bitmap + sub1 = fi.create_bitmap(self._bm._filename, self.format.fif) + # Let subclass add data to bitmap, optionally return new + sub2 = self._append_bitmap(im, meta, sub1) + # Add + self._bm.append_bitmap(sub2) + sub2.close() + if sub1 is not sub2: + sub1.close() + + def _append_bitmap(self, im, meta, bitmap): + # Set data + bitmap.allocate(im) + bitmap.set_image_data(im) + bitmap.set_meta_data(meta) + # Return that same bitmap + return bitmap + + def _set_meta_data(self, meta): + pass # ignore global meta data + + +class MngFormat(FreeimageMulti): + """An Mng format based on the Freeimage library. + + Read only. Seems broken. + """ + + _fif = 6 + + def _can_write(self, request): # pragma: no cover + return False + + +class IcoFormat(FreeimageMulti): + """An ICO format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + is not available on the system, it can be downloaded by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + makealpha : bool + Convert to 32-bit and create an alpha channel from the AND- + mask when loading. Default False. Note that this returns wrong + results if the image was already RGBA. + + """ + + _fif = 1 + + class Reader(FreeimageMulti.Reader): + def _open(self, flags=0, makealpha=False): + # Build flags from kwargs + flags = int(flags) + if makealpha: + flags |= IO_FLAGS.ICO_MAKEALPHA + return FreeimageMulti.Reader._open(self, flags) + + +class GifFormat(FreeimageMulti): + """A format for reading and writing static and animated GIF, based + on the Freeimage library. + + Images read with this format are always RGBA. Currently, + the alpha channel is ignored when saving RGB images with this + format. + + The freeimage plugin requires a `freeimage` binary. If this binary + is not available on the system, it can be downloaded by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + playback : bool + 'Play' the GIF to generate each frame (as 32bpp) instead of + returning raw frame data when loading. Default True. + + Parameters for saving + --------------------- + loop : int + The number of iterations. Default 0 (meaning loop indefinitely) + duration : {float, list} + The duration (in seconds) of each frame. Either specify one value + that is used for all frames, or one value for each frame. + Note that in the GIF format the duration/delay is expressed in + hundredths of a second, which limits the precision of the duration. + fps : float + The number of frames per second. If duration is not given, the + duration for each frame is set to 1/fps. Default 10. + palettesize : int + The number of colors to quantize the image to. Is rounded to + the nearest power of two. Default 256. + quantizer : {'wu', 'nq'} + The quantization algorithm: + * wu - Wu, Xiaolin, Efficient Statistical Computations for + Optimal Color Quantization + * nq (neuqant) - Dekker A. H., Kohonen neural networks for + optimal color quantization + subrectangles : bool + If True, will try and optimize the GIF by storing only the + rectangular parts of each frame that change with respect to the + previous. Unfortunately, this option seems currently broken + because FreeImage does not handle DisposalMethod correctly. + Default False. + """ + + _fif = 25 + + class Reader(FreeimageMulti.Reader): + def _open(self, flags=0, playback=True): + # Build flags from kwargs + flags = int(flags) + if playback: + flags |= IO_FLAGS.GIF_PLAYBACK + FreeimageMulti.Reader._open(self, flags) + + def _get_data(self, index): + im, meta = FreeimageMulti.Reader._get_data(self, index) + # im = im[:, :, :3] # Drop alpha channel + return im, meta + + # -- writer + + class Writer(FreeimageMulti.Writer): + # todo: subrectangles + # todo: global palette + + def _open( + self, + flags=0, + loop=0, + duration=None, + fps=10, + palettesize=256, + quantizer="Wu", + subrectangles=False, + ): + # Check palettesize + if palettesize < 2 or palettesize > 256: + raise ValueError("GIF quantize param must be 2..256") + if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]: + palettesize = 2 ** int(np.log2(128) + 0.999) + logger.warning( + "Warning: palettesize (%r) modified to a factor of " + "two between 2-256." % palettesize + ) + self._palettesize = palettesize + # Check quantizer + self._quantizer = {"wu": 0, "nq": 1}.get(quantizer.lower(), None) + if self._quantizer is None: + raise ValueError('Invalid quantizer, must be "wu" or "nq".') + # Check frametime + if duration is None: + self._frametime = [int(1000 / float(fps) + 0.5)] + elif isinstance(duration, list): + self._frametime = [int(1000 * d) for d in duration] + elif isinstance(duration, (float, int)): + self._frametime = [int(1000 * duration)] + else: + raise ValueError("Invalid value for duration: %r" % duration) + # Check subrectangles + self._subrectangles = bool(subrectangles) + self._prev_im = None + # Init + FreeimageMulti.Writer._open(self, flags) + # Set global meta data + self._meta = {} + self._meta["ANIMATION"] = { + # 'GlobalPalette': np.array([0]).astype(np.uint8), + "Loop": np.array([loop]).astype(np.uint32), + # 'LogicalWidth': np.array([x]).astype(np.uint16), + # 'LogicalHeight': np.array([x]).astype(np.uint16), + } + + def _append_bitmap(self, im, meta, bitmap): + # Prepare meta data + meta = meta.copy() + meta_a = meta["ANIMATION"] = {} + # If this is the first frame, assign it our "global" meta data + if len(self._bm) == 0: + meta.update(self._meta) + meta_a = meta["ANIMATION"] + # Set frame time + index = len(self._bm) + if index < len(self._frametime): + ft = self._frametime[index] + else: + ft = self._frametime[-1] + meta_a["FrameTime"] = np.array([ft]).astype(np.uint32) + # Check array + if im.ndim == 3 and im.shape[-1] == 4: + im = im[:, :, :3] + # Process subrectangles + im_uncropped = im + if self._subrectangles and self._prev_im is not None: + im, xy = self._get_sub_rectangles(self._prev_im, im) + meta_a["DisposalMethod"] = np.array([1]).astype(np.uint8) + meta_a["FrameLeft"] = np.array([xy[0]]).astype(np.uint16) + meta_a["FrameTop"] = np.array([xy[1]]).astype(np.uint16) + self._prev_im = im_uncropped + # Set image data + sub2 = sub1 = bitmap + sub1.allocate(im) + sub1.set_image_data(im) + # Quantize it if its RGB + if im.ndim == 3 and im.shape[-1] == 3: + sub2 = sub1.quantize(self._quantizer, self._palettesize) + # Set meta data and return + sub2.set_meta_data(meta) + return sub2 + + def _get_sub_rectangles(self, prev, im): + """ + Calculate the minimal rectangles that need updating each frame. + Returns a two-element tuple containing the cropped images and a + list of x-y positions. + """ + # Get difference, sum over colors + diff = np.abs(im - prev) + if diff.ndim == 3: + diff = diff.sum(2) + # Get begin and end for both dimensions + X = np.argwhere(diff.sum(0)) + Y = np.argwhere(diff.sum(1)) + # Get rect coordinates + if X.size and Y.size: + x0, x1 = int(X[0]), int(X[-1]) + 1 + y0, y1 = int(Y[0]), int(Y[-1]) + 1 + else: # No change ... make it minimal + x0, x1 = 0, 2 + y0, y1 = 0, 2 + # Cut out and return + return im[y0:y1, x0:x1], (x0, y0) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/gdal.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/gdal.py new file mode 100644 index 0000000000000000000000000000000000000000..04cabb7e3ea71605328e4f7c915ea947ec37afbd --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/gdal.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read GDAL files. + +Backend: `GDAL `_ + +.. note:: + To use this plugin you have to install its backend:: + + pip install imageio[gdal] + +Parameters +---------- +none +""" + +from ..core import Format, has_module + +_gdal = None # lazily loaded in load_lib() + + +def load_lib(): + global _gdal + try: + import osgeo.gdal as _gdal + except ImportError: + raise ImportError( + "The GDAL format relies on the GDAL package." + "Please refer to http://www.gdal.org/" + "for further instructions." + ) + return _gdal + + +GDAL_FORMATS = (".tiff", " .tif", ".img", ".ecw", ".jpg", ".jpeg") + + +class GdalFormat(Format): + """See :mod:`imageio.plugins.gdal`""" + + def _can_read(self, request): + if request.extension in (".ecw",): + return True + if has_module("osgeo.gdal"): + return request.extension in self.extensions + + def _can_write(self, request): + return False + + # -- + + class Reader(Format.Reader): + def _open(self): + if not _gdal: + load_lib() + self._ds = _gdal.Open(self.request.get_local_filename()) + + def _close(self): + del self._ds + + def _get_length(self): + return 1 + + def _get_data(self, index): + if index != 0: + raise IndexError("Gdal file contains only one dataset") + return self._ds.ReadAsArray(), self._get_meta_data(index) + + def _get_meta_data(self, index): + return self._ds.GetMetadata() diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/grab.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/grab.py new file mode 100644 index 0000000000000000000000000000000000000000..8477863e30757740e83f55d880f2a7554dbe1521 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/grab.py @@ -0,0 +1,105 @@ +""" +PIL-based formats to take screenshots and grab from the clipboard. +""" + +import threading + +import numpy as np + +from ..core import Format + + +class BaseGrabFormat(Format): + """Base format for grab formats.""" + + _pillow_imported = False + _ImageGrab = None + + def __init__(self, *args, **kwargs): + super(BaseGrabFormat, self).__init__(*args, **kwargs) + self._lock = threading.RLock() + + def _can_write(self, request): + return False + + def _init_pillow(self): + with self._lock: + if not self._pillow_imported: + self._pillow_imported = True # more like tried to import + import PIL + + if not hasattr(PIL, "__version__"): # pragma: no cover + raise ImportError("Imageio Pillow requires " "Pillow, not PIL!") + try: + from PIL import ImageGrab + except ImportError: + return None + self._ImageGrab = ImageGrab + return self._ImageGrab + + class Reader(Format.Reader): + def _open(self): + pass + + def _close(self): + pass + + def _get_data(self, index): + return self.format._get_data(index) + + +class ScreenGrabFormat(BaseGrabFormat): + """The ScreenGrabFormat provided a means to grab screenshots using + the uri of "". + + This functionality is provided via Pillow. Note that "" is + only supported on Windows and OS X. + + Parameters for reading + ---------------------- + No parameters. + """ + + def _can_read(self, request): + if request.filename != "": + return False + return bool(self._init_pillow()) + + def _get_data(self, index): + ImageGrab = self._init_pillow() + assert ImageGrab + + pil_im = ImageGrab.grab() + assert pil_im is not None + im = np.asarray(pil_im) + return im, {} + + +class ClipboardGrabFormat(BaseGrabFormat): + """The ClipboardGrabFormat provided a means to grab image data from + the clipboard, using the uri "" + + This functionality is provided via Pillow. Note that "" is + only supported on Windows. + + Parameters for reading + ---------------------- + No parameters. + """ + + def _can_read(self, request): + if request.filename != "": + return False + return bool(self._init_pillow()) + + def _get_data(self, index): + ImageGrab = self._init_pillow() + assert ImageGrab + + pil_im = ImageGrab.grabclipboard() + if pil_im is None: + raise RuntimeError( + "There seems to be no image data on the " "clipboard now." + ) + im = np.asarray(pil_im) + return im, {} diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/npz.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/npz.py new file mode 100644 index 0000000000000000000000000000000000000000..87b37e44a0cc85671f42d1e25c775b687c709f71 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/npz.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read/Write NPZ files. + +Backend: `Numpy `_ + +NPZ is a file format by numpy that provides storage of array data using gzip +compression. This imageio plugin supports data of any shape, and also supports +multiple images per file. However, the npz format does not provide streaming; +all data is read/written at once. Further, there is no support for meta data. + +See the BSDF format for a similar (but more fully featured) format. + +Parameters +---------- +None + +Notes +----- +This format is not available on Pypy. + +""" + +import numpy as np + +from ..core import Format + + +class NpzFormat(Format): + """See :mod:`imageio.plugins.npz`""" + + def _can_read(self, request): + # We support any kind of image data + return request.extension in self.extensions + + def _can_write(self, request): + # We support any kind of image data + return request.extension in self.extensions + + # -- reader + + class Reader(Format.Reader): + def _open(self): + # Load npz file, which provides another file like object + self._npz = np.load(self.request.get_file()) + assert isinstance(self._npz, np.lib.npyio.NpzFile) + # Get list of names, ordered by name, but smarter + self._names = sorted(self._npz.files, key=lambda x: x.split("_")[-1]) + + def _close(self): + self._npz.close() + + def _get_length(self): + return len(self._names) + + def _get_data(self, index): + # Get data + if index < 0 or index >= len(self._names): + raise IndexError("Index out of range while reading from nzp") + im = self._npz[self._names[index]] + # Return array and empty meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index + raise RuntimeError("The npz format does not support meta data.") + + # -- writer + + class Writer(Format.Writer): + def _open(self): + # Npz is not such a great format. We cannot stream to the file. + # So we remember all images and write them to file at the end. + self._images = [] + + def _close(self): + # Write everything + np.savez_compressed(self.request.get_file(), *self._images) + + def _append_data(self, im, meta): + self._images.append(im) # discart meta data + + def set_meta_data(self, meta): + raise RuntimeError("The npz format does not support meta data.") diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/opencv.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/opencv.py new file mode 100644 index 0000000000000000000000000000000000000000..944a75776b8b091405da342703ab76b26677c0e9 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/opencv.py @@ -0,0 +1,313 @@ +"""Read/Write images using OpenCV. + +Backend Library: `OpenCV `_ + +This plugin wraps OpenCV (also known as ``cv2``), a popular image processing +library. Currently, it exposes OpenCVs image reading capability (no video or GIF +support yet); however, this may be added in future releases. + +Methods +------- +.. note:: + Check the respective function for a list of supported kwargs and their + documentation. + +.. autosummary:: + :toctree: + + OpenCVPlugin.read + OpenCVPlugin.iter + OpenCVPlugin.write + OpenCVPlugin.properties + OpenCVPlugin.metadata + +Pixel Formats (Colorspaces) +--------------------------- + +OpenCV is known to process images in BGR; however, most of the python ecosystem +(in particular matplotlib and other pydata libraries) use the RGB. As such, +images are converted to RGB, RGBA, or grayscale (where applicable) by default. + +""" + +import warnings +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +import cv2 +import numpy as np + +from ..core import Request +from ..core.request import URI_BYTES, InitializationError, IOMode +from ..core.v3_plugin_api import ImageProperties, PluginV3 +from ..typing import ArrayLike + + +class OpenCVPlugin(PluginV3): + def __init__(self, request: Request) -> None: + super().__init__(request) + + self.file_handle = request.get_local_filename() + if request._uri_type is URI_BYTES: + self.filename = "" + else: + self.filename = request.raw_uri + + mode = request.mode.io_mode + if mode == IOMode.read and not cv2.haveImageReader(self.file_handle): + raise InitializationError(f"OpenCV can't read `{self.filename}`.") + elif mode == IOMode.write and not cv2.haveImageWriter(self.file_handle): + raise InitializationError(f"OpenCV can't write to `{self.filename}`.") + + def read( + self, + *, + index: int = None, + colorspace: Union[int, str] = None, + flags: int = cv2.IMREAD_COLOR, + ) -> np.ndarray: + """Read an image from the ImageResource. + + Parameters + ---------- + index : int, Ellipsis + If int, read the index-th image from the ImageResource. If ``...``, + read all images from the ImageResource and stack them along a new, + prepended, batch dimension. If None (default), use ``index=0`` if + the image contains exactly one image and ``index=...`` otherwise. + colorspace : str, int + The colorspace to convert into after loading and before returning + the image. If None (default) keep grayscale images as is, convert + images with an alpha channel to ``RGBA`` and all other images to + ``RGB``. If int, interpret ``colorspace`` as one of OpenCVs + `conversion flags + `_ + and use it for conversion. If str, convert the image into the given + colorspace. Possible string values are: ``"RGB"``, ``"BGR"``, + ``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``. + flags : int + The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs + `_ + for details. + + Returns + ------- + ndimage : np.ndarray + The decoded image as a numpy array. + + """ + + if index is None: + n_images = cv2.imcount(self.file_handle, flags) + index = 0 if n_images == 1 else ... + + if index is ...: + retval, img = cv2.imreadmulti(self.file_handle, flags=flags) + is_batch = True + else: + retval, img = cv2.imreadmulti(self.file_handle, index, 1, flags=flags) + is_batch = False + + if retval is False: + raise ValueError(f"Could not read index `{index}` from `{self.filename}`.") + + if img[0].ndim == 2: + in_colorspace = "GRAY" + out_colorspace = colorspace or "GRAY" + elif img[0].shape[-1] == 4: + in_colorspace = "BGRA" + out_colorspace = colorspace or "RGBA" + else: + in_colorspace = "BGR" + out_colorspace = colorspace or "RGB" + + if isinstance(colorspace, int): + cvt_space = colorspace + elif in_colorspace == out_colorspace.upper(): + cvt_space = None + else: + out_colorspace = out_colorspace.upper() + cvt_space = getattr(cv2, f"COLOR_{in_colorspace}2{out_colorspace}") + + if cvt_space is not None: + img = np.stack([cv2.cvtColor(x, cvt_space) for x in img]) + else: + img = np.stack(img) + + return img if is_batch else img[0] + + def iter( + self, + colorspace: Union[int, str] = None, + flags: int = cv2.IMREAD_COLOR, + ) -> np.ndarray: + """Yield images from the ImageResource. + + Parameters + ---------- + colorspace : str, int + The colorspace to convert into after loading and before returning + the image. If None (default) keep grayscale images as is, convert + images with an alpha channel to ``RGBA`` and all other images to + ``RGB``. If int, interpret ``colorspace`` as one of OpenCVs + `conversion flags + `_ + and use it for conversion. If str, convert the image into the given + colorspace. Possible string values are: ``"RGB"``, ``"BGR"``, + ``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``. + flags : int + The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs + `_ + for details. + + Yields + ------ + ndimage : np.ndarray + The decoded image as a numpy array. + + """ + for idx in range(cv2.imcount(self.file_handle)): + yield self.read(index=idx, flags=flags, colorspace=colorspace) + + def write( + self, + ndimage: Union[ArrayLike, List[ArrayLike]], + is_batch: bool = False, + params: List[int] = None, + ) -> Optional[bytes]: + """Save an ndimage in the ImageResource. + + Parameters + ---------- + ndimage : ArrayLike, List[ArrayLike] + The image data that will be written to the file. It is either a + single image, a batch of images, or a list of images. + is_batch : bool + If True, the provided ndimage is a batch of images. If False (default), the + provided ndimage is a single image. If the provided ndimage is a list of images, + this parameter has no effect. + params : List[int] + A list of parameters that will be passed to OpenCVs imwrite or + imwritemulti functions. Possible values are documented in the + `OpenCV documentation + `_. + + Returns + ------- + encoded_image : bytes, None + If the ImageResource is ``""`` the call to write returns the + encoded image as a bytes string. Otherwise it returns None. + + """ + + if isinstance(ndimage, list): + ndimage = np.stack(ndimage, axis=0) + elif not is_batch: + ndimage = ndimage[None, ...] + + if ndimage[0].ndim == 2: + n_channels = 1 + else: + n_channels = ndimage[0].shape[-1] + + if n_channels == 1: + ndimage_cv2 = [x for x in ndimage] + elif n_channels == 4: + ndimage_cv2 = [cv2.cvtColor(x, cv2.COLOR_RGBA2BGRA) for x in ndimage] + else: + ndimage_cv2 = [cv2.cvtColor(x, cv2.COLOR_RGB2BGR) for x in ndimage] + + retval = cv2.imwritemulti(self.file_handle, ndimage_cv2, params) + + if retval is False: + # not sure what scenario would trigger this, but + # it can occur theoretically. + raise IOError("OpenCV failed to write.") # pragma: no cover + + if self.request._uri_type == URI_BYTES: + return Path(self.file_handle).read_bytes() + + def properties( + self, + index: int = None, + colorspace: Union[int, str] = None, + flags: int = cv2.IMREAD_COLOR, + ) -> ImageProperties: + """Standardized image metadata. + + Parameters + ---------- + index : int, Ellipsis + If int, get the properties of the index-th image in the + ImageResource. If ``...``, get the properties of the image stack + that contains all images. If None (default), use ``index=0`` if the + image contains exactly one image and ``index=...`` otherwise. + colorspace : str, int + The colorspace to convert into after loading and before returning + the image. If None (default) keep grayscale images as is, convert + images with an alpha channel to ``RGBA`` and all other images to + ``RGB``. If int, interpret ``colorspace`` as one of OpenCVs + `conversion flags + `_ + and use it for conversion. If str, convert the image into the given + colorspace. Possible string values are: ``"RGB"``, ``"BGR"``, + ``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``. + flags : int + The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs + `_ + for details. + + Returns + ------- + props : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + Reading properties with OpenCV involves decoding pixel data, because + OpenCV doesn't provide a direct way to access metadata. + + """ + + if index is None: + n_images = cv2.imcount(self.file_handle, flags) + is_batch = n_images > 1 + elif index is Ellipsis: + n_images = cv2.imcount(self.file_handle, flags) + is_batch = True + else: + is_batch = False + + # unfortunately, OpenCV doesn't allow reading shape without reading pixel data + if is_batch: + img = self.read(index=0, flags=flags, colorspace=colorspace) + return ImageProperties( + shape=(n_images, *img.shape), + dtype=img.dtype, + n_images=n_images, + is_batch=True, + ) + + img = self.read(index=index, flags=flags, colorspace=colorspace) + return ImageProperties(shape=img.shape, dtype=img.dtype, is_batch=False) + + def metadata( + self, index: int = None, exclude_applied: bool = True + ) -> Dict[str, Any]: + """Format-specific metadata. + + .. warning:: + OpenCV does not support reading metadata. When called, this function + will raise a ``NotImplementedError``. + + Parameters + ---------- + index : int + This parameter has no effect. + exclude_applied : bool + This parameter has no effect. + + """ + + warnings.warn("OpenCV does not support reading metadata.", UserWarning) + return dict() diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow.py new file mode 100644 index 0000000000000000000000000000000000000000..8826f35cc2c52fc3827fa826894db423b3661719 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow.py @@ -0,0 +1,613 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write images using Pillow/PIL. + +Backend Library: `Pillow `_ + +Plugin that wraps the the Pillow library. Pillow is a friendly fork of PIL +(Python Image Library) and supports reading and writing of common formats (jpg, +png, gif, tiff, ...). For, the complete list of features and supported formats +please refer to pillows official docs (see the Backend Library link). + +Parameters +---------- +request : Request + A request object representing the resource to be operated on. + +Methods +------- + +.. autosummary:: + :toctree: _plugins/pillow + + PillowPlugin.read + PillowPlugin.write + PillowPlugin.iter + PillowPlugin.get_meta + +""" + +import sys +import warnings +from io import BytesIO +from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union, cast + +import numpy as np +from PIL import ExifTags, GifImagePlugin, Image, ImageSequence, UnidentifiedImageError +from PIL import __version__ as pil_version # type: ignore + +from ..core.request import URI_BYTES, InitializationError, IOMode, Request +from ..core.v3_plugin_api import ImageProperties, PluginV3 +from ..typing import ArrayLike + + +def pillow_version() -> Tuple[int]: + return tuple(int(x) for x in pil_version.split(".")) + + +def _exif_orientation_transform(orientation: int, mode: str) -> Callable: + # get transformation that transforms an image from a + # given EXIF orientation into the standard orientation + + # -1 if the mode has color channel, 0 otherwise + axis = -2 if Image.getmodebands(mode) > 1 else -1 + + EXIF_ORIENTATION = { + 1: lambda x: x, + 2: lambda x: np.flip(x, axis=axis), + 3: lambda x: np.rot90(x, k=2), + 4: lambda x: np.flip(x, axis=axis - 1), + 5: lambda x: np.flip(np.rot90(x, k=3), axis=axis), + 6: lambda x: np.rot90(x, k=3), + 7: lambda x: np.flip(np.rot90(x, k=1), axis=axis), + 8: lambda x: np.rot90(x, k=1), + } + + return EXIF_ORIENTATION[orientation] + + +class PillowPlugin(PluginV3): + def __init__(self, request: Request) -> None: + """Instantiate a new Pillow Plugin Object + + Parameters + ---------- + request : {Request} + A request object representing the resource to be operated on. + + """ + + super().__init__(request) + + # Register HEIF opener for Pillow + try: + from pillow_heif import register_heif_opener + except ImportError: + pass + else: + register_heif_opener() + + # Register AVIF opener for Pillow + try: + from pillow_heif import register_avif_opener + except ImportError: + pass + else: + register_avif_opener() + + self._image: Image = None + self.images_to_write = [] + + if request.mode.io_mode == IOMode.read: + try: + with Image.open(request.get_file()): + # Check if it is generally possible to read the image. + # This will not read any data and merely try to find a + # compatible pillow plugin (ref: the pillow docs). + pass + except UnidentifiedImageError: + if request._uri_type == URI_BYTES: + raise InitializationError( + "Pillow can not read the provided bytes." + ) from None + else: + raise InitializationError( + f"Pillow can not read {request.raw_uri}." + ) from None + + self._image = Image.open(self._request.get_file()) + else: + self.save_args = {} + + extension = self.request.extension or self.request.format_hint + if extension is None: + warnings.warn( + "Can't determine file format to write as. You _must_" + " set `format` during write or the call will fail. Use " + "`extension` to supress this warning. ", + UserWarning, + ) + return + + tirage = [Image.preinit, Image.init] + for format_loader in tirage: + format_loader() + if extension in Image.registered_extensions().keys(): + return + + raise InitializationError( + f"Pillow can not write `{extension}` files." + ) from None + + def close(self) -> None: + self._flush_writer() + + if self._image: + self._image.close() + + self._request.finish() + + def read( + self, + *, + index: int = None, + mode: str = None, + rotate: bool = False, + apply_gamma: bool = False, + writeable_output: bool = True, + pilmode: str = None, + exifrotate: bool = None, + as_gray: bool = None, + ) -> np.ndarray: + """ + Parses the given URI and creates a ndarray from it. + + Parameters + ---------- + index : int + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return it. + If index is an ellipsis (...), read all ndimages in the file and + stack them along a new batch dimension and return them. If index is + None, this plugin reads the first image of the file (index=0) unless + the image is a GIF or APNG, in which case all images are read + (index=...). + mode : str + Convert the image to the given mode before returning it. If None, + the mode will be left unchanged. Possible modes can be found at: + https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes + rotate : bool + If True and the image contains an EXIF orientation tag, + apply the orientation before returning the ndimage. + apply_gamma : bool + If True and the image contains metadata about gamma, apply gamma + correction to the image. + writable_output : bool + If True, ensure that the image is writable before returning it to + the user. This incurs a full copy of the pixel data if the data + served by pillow is read-only. Consequentially, setting this flag to + False improves performance for some images. + pilmode : str + Deprecated, use `mode` instead. + exifrotate : bool + Deprecated, use `rotate` instead. + as_gray : bool + Deprecated. Exists to raise a constructive error message. + + Returns + ------- + ndimage : ndarray + A numpy array containing the loaded image data + + Notes + ----- + If you read a paletted image (e.g. GIF) then the plugin will apply the + palette by default. Should you wish to read the palette indices of each + pixel use ``mode="P"``. The coresponding color pallete can be found in + the image's metadata using the ``palette`` key when metadata is + extracted using the ``exclude_applied=False`` kwarg. The latter is + needed, as palettes are applied by default and hence excluded by default + to keep metadata and pixel data consistent. + + """ + + if pilmode is not None: + warnings.warn( + "`pilmode` is deprecated. Use `mode` instead.", DeprecationWarning + ) + mode = pilmode + + if exifrotate is not None: + warnings.warn( + "`exifrotate` is deprecated. Use `rotate` instead.", DeprecationWarning + ) + rotate = exifrotate + + if as_gray is not None: + raise TypeError( + "The keyword `as_gray` is no longer supported." + "Use `mode='F'` for a backward-compatible result, or " + " `mode='L'` for an integer-valued result." + ) + + if self._image.format == "GIF": + # Converting GIF P frames to RGB + # https://github.com/python-pillow/Pillow/pull/6150 + GifImagePlugin.LOADING_STRATEGY = ( + GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY + ) + + if index is None: + if self._image.format == "GIF": + index = Ellipsis + elif self._image.custom_mimetype == "image/apng": + index = Ellipsis + else: + index = 0 + + if isinstance(index, int): + # will raise IO error if index >= number of frames in image + self._image.seek(index) + image = self._apply_transforms( + self._image, mode, rotate, apply_gamma, writeable_output + ) + else: + iterator = self.iter( + mode=mode, + rotate=rotate, + apply_gamma=apply_gamma, + writeable_output=writeable_output, + ) + image = np.stack([im for im in iterator], axis=0) + + return image + + def iter( + self, + *, + mode: str = None, + rotate: bool = False, + apply_gamma: bool = False, + writeable_output: bool = True, + ) -> Iterator[np.ndarray]: + """ + Iterate over all ndimages/frames in the URI + + Parameters + ---------- + mode : {str, None} + Convert the image to the given mode before returning it. If None, + the mode will be left unchanged. Possible modes can be found at: + https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes + rotate : {bool} + If set to ``True`` and the image contains an EXIF orientation tag, + apply the orientation before returning the ndimage. + apply_gamma : {bool} + If ``True`` and the image contains metadata about gamma, apply gamma + correction to the image. + writable_output : bool + If True, ensure that the image is writable before returning it to + the user. This incurs a full copy of the pixel data if the data + served by pillow is read-only. Consequentially, setting this flag to + False improves performance for some images. + """ + + for im in ImageSequence.Iterator(self._image): + yield self._apply_transforms( + im, mode, rotate, apply_gamma, writeable_output + ) + + def _apply_transforms( + self, image, mode, rotate, apply_gamma, writeable_output + ) -> np.ndarray: + if mode is not None: + image = image.convert(mode) + elif image.mode == "P": + # adjust for pillow9 changes + # see: https://github.com/python-pillow/Pillow/issues/5929 + image = image.convert(image.palette.mode) + elif image.format == "PNG" and image.mode == "I": + major, minor, patch = pillow_version() + + if sys.byteorder == "little": + desired_mode = "I;16" + else: # pragma: no cover + # can't test big-endian in GH-Actions + desired_mode = "I;16B" + + if major < 10: # pragma: no cover + warnings.warn( + "Loading 16-bit (uint16) PNG as int32 due to limitations " + "in pillow's PNG decoder. This will be fixed in a future " + "version of pillow which will make this warning dissapear.", + UserWarning, + ) + elif minor < 1: # pragma: no cover + # pillow<10.1.0 can directly decode into 16-bit grayscale + image.mode = desired_mode + else: + # pillow >= 10.1.0 + image = image.convert(desired_mode) + + image = np.asarray(image) + + meta = self.metadata(index=self._image.tell(), exclude_applied=False) + if rotate and "Orientation" in meta: + transformation = _exif_orientation_transform( + meta["Orientation"], self._image.mode + ) + image = transformation(image) + + if apply_gamma and "gamma" in meta: + gamma = float(meta["gamma"]) + scale = float(65536 if image.dtype == np.uint16 else 255) + gain = 1.0 + image = ((image / scale) ** gamma) * scale * gain + 0.4999 + image = np.round(image).astype(np.uint8) + + if writeable_output and not image.flags["WRITEABLE"]: + image = np.array(image) + + return image + + def write( + self, + ndimage: Union[ArrayLike, List[ArrayLike]], + *, + mode: str = None, + format: str = None, + is_batch: bool = None, + **kwargs, + ) -> Optional[bytes]: + """ + Write an ndimage to the URI specified in path. + + If the URI points to a file on the current host and the file does not + yet exist it will be created. If the file exists already, it will be + appended if possible; otherwise, it will be replaced. + + If necessary, the image is broken down along the leading dimension to + fit into individual frames of the chosen format. If the format doesn't + support multiple frames, and IOError is raised. + + Parameters + ---------- + image : ndarray or list + The ndimage to write. If a list is given each element is expected to + be an ndimage. + mode : str + Specify the image's color format. If None (default), the mode is + inferred from the array's shape and dtype. Possible modes can be + found at: + https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes + format : str + Optional format override. If omitted, the format to use is + determined from the filename extension. If a file object was used + instead of a filename, this parameter must always be used. + is_batch : bool + Explicitly tell the writer that ``image`` is a batch of images + (True) or not (False). If None, the writer will guess this from the + provided ``mode`` or ``image.shape``. While the latter often works, + it may cause problems for small images due to aliasing of spatial + and color-channel axes. + kwargs : ... + Extra arguments to pass to pillow. If a writer doesn't recognise an + option, it is silently ignored. The available options are described + in pillow's `image format documentation + `_ + for each writer. + + Notes + ----- + When writing batches of very narrow (2-4 pixels wide) gray images set + the ``mode`` explicitly to avoid the batch being identified as a colored + image. + + """ + if "fps" in kwargs: + warnings.warn( + "The keyword `fps` is no longer supported. Use `duration`" + "(in ms) instead, e.g. `fps=50` == `duration=20` (1000 * 1/50).", + DeprecationWarning, + ) + kwargs["duration"] = 1000 * 1 / kwargs.get("fps") + + if isinstance(ndimage, list): + ndimage = np.stack(ndimage, axis=0) + is_batch = True + else: + ndimage = np.asarray(ndimage) + + # check if ndimage is a batch of frames/pages (e.g. for writing GIF) + # if mode is given, use it; otherwise fall back to image.ndim only + if is_batch is not None: + pass + elif mode is not None: + is_batch = ( + ndimage.ndim > 3 if Image.getmodebands(mode) > 1 else ndimage.ndim > 2 + ) + elif ndimage.ndim == 2: + is_batch = False + elif ndimage.ndim == 3 and ndimage.shape[-1] == 1: + raise ValueError("Can't write images with one color channel.") + elif ndimage.ndim == 3 and ndimage.shape[-1] in [2, 3, 4]: + # Note: this makes a channel-last assumption + is_batch = False + else: + is_batch = True + + if not is_batch: + ndimage = ndimage[None, ...] + + for frame in ndimage: + pil_frame = Image.fromarray(frame, mode=mode) + if "bits" in kwargs: + pil_frame = pil_frame.quantize(colors=2 ** kwargs["bits"]) + self.images_to_write.append(pil_frame) + + if ( + format is not None + and "format" in self.save_args + and self.save_args["format"] != format + ): + old_format = self.save_args["format"] + warnings.warn( + "Changing the output format during incremental" + " writes is strongly discouraged." + f" Was `{old_format}`, is now `{format}`.", + UserWarning, + ) + + extension = self.request.extension or self.request.format_hint + self.save_args["format"] = format or Image.registered_extensions()[extension] + self.save_args.update(kwargs) + + # when writing to `bytes` we flush instantly + result = None + if self._request._uri_type == URI_BYTES: + self._flush_writer() + file = cast(BytesIO, self._request.get_file()) + result = file.getvalue() + + return result + + def _flush_writer(self): + if len(self.images_to_write) == 0: + return + + primary_image = self.images_to_write.pop(0) + + if len(self.images_to_write) > 0: + self.save_args["save_all"] = True + self.save_args["append_images"] = self.images_to_write + + primary_image.save(self._request.get_file(), **self.save_args) + self.images_to_write.clear() + self.save_args.clear() + + def get_meta(self, *, index=0) -> Dict[str, Any]: + return self.metadata(index=index, exclude_applied=False) + + def metadata( + self, index: int = None, exclude_applied: bool = True + ) -> Dict[str, Any]: + """Read ndimage metadata. + + Parameters + ---------- + index : {integer, None} + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return its + metadata. If index is an ellipsis (...), read and return global + metadata. If index is None, this plugin reads metadata from the + first image of the file (index=0) unless the image is a GIF or APNG, + in which case global metadata is read (index=...). + exclude_applied : bool + If True, exclude metadata fields that are applied to the image while + reading. For example, if the binary data contains a rotation flag, + the image is rotated by default and the rotation flag is excluded + from the metadata to avoid confusion. + + Returns + ------- + metadata : dict + A dictionary of format-specific metadata. + + """ + + if index is None: + if self._image.format == "GIF": + index = Ellipsis + elif self._image.custom_mimetype == "image/apng": + index = Ellipsis + else: + index = 0 + + if isinstance(index, int) and self._image.tell() != index: + self._image.seek(index) + + metadata = self._image.info.copy() + metadata["mode"] = self._image.mode + metadata["shape"] = self._image.size + + if self._image.mode == "P" and not exclude_applied: + metadata["palette"] = np.asarray(tuple(self._image.palette.colors.keys())) + + if self._image.getexif(): + exif_data = { + ExifTags.TAGS.get(key, "unknown"): value + for key, value in dict(self._image.getexif()).items() + } + exif_data.pop("unknown", None) + metadata.update(exif_data) + + if exclude_applied: + metadata.pop("Orientation", None) + + return metadata + + def properties(self, index: int = None) -> ImageProperties: + """Standardized ndimage metadata + Parameters + ---------- + index : int + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return its + properties. If index is an ellipsis (...), read and return the + properties of all ndimages in the file stacked along a new batch + dimension. If index is None, this plugin reads and returns the + properties of the first image (index=0) unless the image is a GIF or + APNG, in which case it reads and returns the properties all images + (index=...). + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + This does not decode pixel data and is fast for large images. + + """ + + if index is None: + if self._image.format == "GIF": + index = Ellipsis + elif self._image.custom_mimetype == "image/apng": + index = Ellipsis + else: + index = 0 + + if index is Ellipsis: + self._image.seek(0) + else: + self._image.seek(index) + + if self._image.mode == "P": + # mode of palette images is determined by their palette + mode = self._image.palette.mode + else: + mode = self._image.mode + + width: int = self._image.width + height: int = self._image.height + shape: Tuple[int, ...] = (height, width) + + n_frames: Optional[int] = None + if index is ...: + n_frames = getattr(self._image, "n_frames", 1) + shape = (n_frames, *shape) + + dummy = np.asarray(Image.new(mode, (1, 1))) + pil_shape: Tuple[int, ...] = dummy.shape + if len(pil_shape) > 2: + shape = (*shape, *pil_shape[2:]) + + return ImageProperties( + shape=shape, + dtype=dummy.dtype, + n_images=n_frames, + is_batch=index is Ellipsis, + ) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow_info.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow_info.py new file mode 100644 index 0000000000000000000000000000000000000000..59b971ce792cca172764da7f2faf8f0654547643 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow_info.py @@ -0,0 +1,1053 @@ +# -*- coding: utf-8 -*- + +# styletest: ignore E122 E123 E501 + +""" +Module that contain info about the Pillow formats. The first part of +this module generates this info and writes it to its own bottom half +if run as a script. +""" + +import warnings + +warnings.warn( + "The `PillowFormat` plugin is deprecated and will be removed in ImageIO v3." + " Use the new `PillowPlugin` instead.", + DeprecationWarning, +) + + +def generate_info(): # pragma: no cover + from urllib.request import urlopen + import PIL + from PIL import Image + + Image.init() + + ids = [] + formats = [] + docs = {} + + # Collect formats and their summary from plugin modules + for mod_name in dir(PIL): + if "ImagePlugin" in mod_name: + mod = getattr(PIL, mod_name) + for ob_name in dir(mod): + ob = getattr(mod, ob_name) + if isinstance(ob, type) and issubclass(ob, Image.Image): + if ob.format in ids: + print("Found duplicate for", ob.format) + else: + ids.append(ob.format) + formats.append((ob.format, ob.format_description)) + + # Add extension info + for i in range(len(formats)): + id, summary = formats[i] + ext = " ".join([e for e in Image.EXTENSION if Image.EXTENSION[e] == id]) + formats[i] = id, summary, ext + + # Get documentation of formats + url = "https://raw.githubusercontent.com/python-pillow/Pillow/master/docs/handbook/image-file-formats.rst" # noqa + lines = urlopen(url).read().decode().splitlines() + lines.append("End") + lines.append("---") # for the end + + # Parse documentation + cur_name = "" + cur_part = [] + for i in range(len(lines)): + line = lines[i] + if line.startswith(("^^^", "---", "===")): + if cur_name and cur_name in ids: + text = "\n".join(cur_part[:-1]) + text = text.replace("versionadded::", "versionadded:: Pillow ") + text = text.replace("Image.open`", "Image.write`") + docs[cur_name] = text + cur_part = [] + cur_name = lines[i - 1].strip().replace(" ", "").upper() + else: + cur_part.append(" " + line) + + # Fill in the blancs + for id in ids: + if id in docs: + docs[id] = "*From the Pillow docs:*\n\n" + docs[id] + else: + docs[id] = "No docs for %s." % id + print("no docs for", id) + + # Sort before writing + formats.sort(key=lambda x: x[0]) + ids.sort() + + # Read file ... + code = open(__file__, "rb").read().decode() + code, divider, _ = code.partition("## BELOW IS " + "AUTOGENERATED") + code += divider + "\n\n" + + # Write formats + code += "pillow_formats = [\n" + for i in range(len(formats)): + print(formats[i]) + code += " (%r, %r, %r),\n" % formats[i] + code += " ]\n\n\n" + + # Write docs + code += "pillow_docs = {\n" + for id in ids: + code += '%r:\nu"""%s""",\n' % (id, docs[id]) + code += "}\n" + + # Write back + with open(__file__, "wb") as f: + f.write(code.encode()) + + +if __name__ == "__main__": + generate_info() + + +# BELOW IS AUTOGENERATED + +pillow_formats = [ + ("BMP", "Windows Bitmap", ".bmp"), + ("BUFR", "BUFR", ".bufr"), + ("CUR", "Windows Cursor", ".cur"), + ("DCX", "Intel DCX", ".dcx"), + ("DDS", "DirectDraw Surface", ".dds"), + ("DIB", "Windows Bitmap", ""), + ("EPS", "Encapsulated Postscript", ".ps .eps"), + ("FITS", "FITS", ".fit .fits"), + ("FLI", "Autodesk FLI/FLC Animation", ".fli .flc"), + ("FPX", "FlashPix", ".fpx"), + ("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu"), + ("GBR", "GIMP brush file", ".gbr"), + ("GIF", "Compuserve GIF", ".gif"), + ("GRIB", "GRIB", ".grib"), + ("HDF5", "HDF5", ".h5 .hdf"), + ("ICNS", "Mac OS icns resource", ".icns"), + ("ICO", "Windows Icon", ".ico"), + ("IM", "IFUNC Image Memory", ".im"), + ("IMT", "IM Tools", ""), + ("IPTC", "IPTC/NAA", ".iim"), + ("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg"), + ("JPEG2000", "JPEG 2000 (ISO 15444)", ".jp2 .j2k .jpc .jpf .jpx .j2c"), + ("MCIDAS", "McIdas area file", ""), + ("MIC", "Microsoft Image Composer", ".mic"), + ("MPEG", "MPEG", ".mpg .mpeg"), + ("MPO", "MPO (CIPA DC-007)", ".mpo"), + ("MSP", "Windows Paint", ".msp"), + ("PCD", "Kodak PhotoCD", ".pcd"), + ("PCX", "Paintbrush", ".pcx"), + ("PIXAR", "PIXAR raster image", ".pxr"), + ("PNG", "Portable network graphics", ".png"), + ("PPM", "Pbmplus image", ".pbm .pgm .ppm"), + ("PSD", "Adobe Photoshop", ".psd"), + ("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi"), + ("SPIDER", "Spider 2D image", ""), + ("SUN", "Sun Raster File", ".ras"), + ("TGA", "Targa", ".tga"), + ("TIFF", "Adobe TIFF", ".tif .tiff"), + ("WMF", "Windows Metafile", ".wmf .emf"), + ("XBM", "X11 Bitmap", ".xbm"), + ("XPM", "X11 Pixel Map", ".xpm"), + ("XVThumb", "XV thumbnail image", ""), +] + + +pillow_docs = { + "BMP": """*From the Pillow docs:* + + + PIL reads and writes Windows and OS/2 BMP files containing ``1``, ``L``, ``P``, + or ``RGB`` data. 16-colour images are read as ``P`` images. Run-length encoding + is not supported. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **compression** + Set to ``bmp_rle`` if the file is run-length encoded. + """, + "BUFR": """*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.3 + + PIL provides a stub driver for BUFR files. + + To add read or write support to your application, use + :py:func:`PIL.BufrStubImagePlugin.register_handler`. + """, + "CUR": """*From the Pillow docs:* + + + CUR is used to store cursors on Windows. The CUR decoder reads the largest + available cursor. Animated cursors are not supported. + """, + "DCX": """*From the Pillow docs:* + + + DCX is a container file format for PCX files, defined by Intel. The DCX format + is commonly used in fax applications. The DCX decoder can read files containing + ``1``, ``L``, ``P``, or ``RGB`` data. + + When the file is opened, only the first image is read. You can use + :py:meth:`~file.seek` or :py:mod:`~PIL.ImageSequence` to read other images. + + """, + "DDS": """*From the Pillow docs:* + + + DDS is a popular container texture format used in video games and natively + supported by DirectX. + Currently, DXT1, DXT3, and DXT5 pixel formats are supported and only in ``RGBA`` + mode. + + .. versionadded:: Pillow 3.4.0 DXT3 + """, + "DIB": """No docs for DIB.""", + "EPS": """*From the Pillow docs:* + + + PIL identifies EPS files containing image data, and can read files that contain + embedded raster images (ImageData descriptors). If Ghostscript is available, + other EPS files can be read as well. The EPS driver can also write EPS + images. The EPS driver can read EPS images in ``L``, ``LAB``, ``RGB`` and + ``CMYK`` mode, but Ghostscript may convert the images to ``RGB`` mode rather + than leaving them in the original color space. The EPS driver can write images + in ``L``, ``RGB`` and ``CMYK`` modes. + + If Ghostscript is available, you can call the :py:meth:`~PIL.Image.Image.load` + method with the following parameter to affect how Ghostscript renders the EPS + + **scale** + Affects the scale of the resultant rasterized image. If the EPS suggests + that the image be rendered at 100px x 100px, setting this parameter to + 2 will make the Ghostscript render a 200px x 200px image instead. The + relative position of the bounding box is maintained:: + + im = Image.open(...) + im.size #(100,100) + im.load(scale=2) + im.size #(200,200) + """, + "FITS": """*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for FITS files. + + To add read or write support to your application, use + :py:func:`PIL.FitsStubImagePlugin.register_handler`. + """, + "FLI": """No docs for FLI.""", + "FPX": """*From the Pillow docs:* + + + PIL reads Kodak FlashPix files. In the current version, only the highest + resolution image is read from the file, and the viewing transform is not taken + into account. + + .. note:: + + To enable full FlashPix support, you need to build and install the IJG JPEG + library before building the Python Imaging Library. See the distribution + README for details. + """, + "FTEX": """*From the Pillow docs:* + + + .. versionadded:: Pillow 3.2.0 + + The FTEX decoder reads textures used for 3D objects in + Independence War 2: Edge Of Chaos. The plugin reads a single texture + per file, in the compressed and uncompressed formats. + """, + "GBR": """*From the Pillow docs:* + + + The GBR decoder reads GIMP brush files, version 1 and 2. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **comment** + The brush name. + + **spacing** + The spacing between the brushes, in pixels. Version 2 only. + + GD + ^^ + + PIL reads uncompressed GD files. Note that this file format cannot be + automatically identified, so you must use :py:func:`PIL.GdImageFile.open` to + read such a file. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + """, + "GIF": """*From the Pillow docs:* + + + PIL reads GIF87a and GIF89a versions of the GIF file format. The library writes + run-length encoded files in GIF87a by default, unless GIF89a features + are used or GIF89a is already in use. + + Note that GIF files are always read as grayscale (``L``) + or palette mode (``P``) images. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **background** + Default background color (a palette color index). + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + + **version** + Version (either ``GIF87a`` or ``GIF89a``). + + **duration** + May not be present. The time to display the current frame + of the GIF, in milliseconds. + + **loop** + May not be present. The number of times the GIF should loop. + + Reading sequences + ~~~~~~~~~~~~~~~~~ + + The GIF loader supports the :py:meth:`~file.seek` and :py:meth:`~file.tell` + methods. You can seek to the next frame (``im.seek(im.tell() + 1)``), or rewind + the file by seeking to the first frame. Random access is not supported. + + ``im.seek()`` raises an ``EOFError`` if you try to seek after the last frame. + + Saving + ~~~~~~ + + When calling :py:meth:`~PIL.Image.Image.save`, the following options + are available:: + + im.save(out, save_all=True, append_images=[im1, im2, ...]) + + **save_all** + If present and true, all frames of the image will be saved. If + not, then only the first frame of a multiframe image will be saved. + + **append_images** + A list of images to append as additional frames. Each of the + images in the list can be single or multiframe images. + This is currently only supported for GIF, PDF, TIFF, and WebP. + + **duration** + The display duration of each frame of the multiframe gif, in + milliseconds. Pass a single integer for a constant duration, or a + list or tuple to set the duration for each frame separately. + + **loop** + Integer number of times the GIF should loop. + + **optimize** + If present and true, attempt to compress the palette by + eliminating unused colors. This is only useful if the palette can + be compressed to the next smaller power of 2 elements. + + **palette** + Use the specified palette for the saved image. The palette should + be a bytes or bytearray object containing the palette entries in + RGBRGB... form. It should be no more than 768 bytes. Alternately, + the palette can be passed in as an + :py:class:`PIL.ImagePalette.ImagePalette` object. + + **disposal** + Indicates the way in which the graphic is to be treated after being displayed. + + * 0 - No disposal specified. + * 1 - Do not dispose. + * 2 - Restore to background color. + * 3 - Restore to previous content. + + Pass a single integer for a constant disposal, or a list or tuple + to set the disposal for each frame separately. + + Reading local images + ~~~~~~~~~~~~~~~~~~~~ + + The GIF loader creates an image memory the same size as the GIF file’s *logical + screen size*, and pastes the actual pixel data (the *local image*) into this + image. If you only want the actual pixel rectangle, you can manipulate the + :py:attr:`~PIL.Image.Image.size` and :py:attr:`~PIL.Image.Image.tile` + attributes before loading the file:: + + im = Image.open(...) + + if im.tile[0][0] == "gif": + # only read the first "local image" from this GIF file + tag, (x0, y0, x1, y1), offset, extra = im.tile[0] + im.size = (x1 - x0, y1 - y0) + im.tile = [(tag, (0, 0) + im.size, offset, extra)] + """, + "GRIB": """*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for GRIB files. + + The driver requires the file to start with a GRIB header. If you have files + with embedded GRIB data, or files with multiple GRIB fields, your application + has to seek to the header before passing the file handle to PIL. + + To add read or write support to your application, use + :py:func:`PIL.GribStubImagePlugin.register_handler`. + """, + "HDF5": """*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for HDF5 files. + + To add read or write support to your application, use + :py:func:`PIL.Hdf5StubImagePlugin.register_handler`. + """, + "ICNS": """*From the Pillow docs:* + + + PIL reads and (macOS only) writes macOS ``.icns`` files. By default, the + largest available icon is read, though you can override this by setting the + :py:attr:`~PIL.Image.Image.size` property before calling + :py:meth:`~PIL.Image.Image.load`. The :py:meth:`~PIL.Image.Image.write` method + sets the following :py:attr:`~PIL.Image.Image.info` property: + + **sizes** + A list of supported sizes found in this icon file; these are a + 3-tuple, ``(width, height, scale)``, where ``scale`` is 2 for a retina + icon and 1 for a standard icon. You *are* permitted to use this 3-tuple + format for the :py:attr:`~PIL.Image.Image.size` property if you set it + before calling :py:meth:`~PIL.Image.Image.load`; after loading, the size + will be reset to a 2-tuple containing pixel dimensions (so, e.g. if you + ask for ``(512, 512, 2)``, the final value of + :py:attr:`~PIL.Image.Image.size` will be ``(1024, 1024)``). + """, + "ICO": """*From the Pillow docs:* + + + ICO is used to store icons on Windows. The largest available icon is read. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **sizes** + A list of sizes including in this ico file; these are a 2-tuple, + ``(width, height)``; Default to ``[(16, 16), (24, 24), (32, 32), (48, 48), + (64, 64), (128, 128), (256, 256)]``. Any sizes bigger than the original + size or 256 will be ignored. + + IM + ^^ + + IM is a format used by LabEye and other applications based on the IFUNC image + processing library. The library reads and writes most uncompressed interchange + versions of this format. + + IM is the only format that can store all internal PIL formats. + """, + "IM": """No docs for IM.""", + "IMT": """*From the Pillow docs:* + + + PIL reads Image Tools images containing ``L`` data. + """, + "IPTC": """No docs for IPTC.""", + "JPEG": """*From the Pillow docs:* + + + PIL reads JPEG, JFIF, and Adobe JPEG files containing ``L``, ``RGB``, or + ``CMYK`` data. It writes standard and progressive JFIF files. + + Using the :py:meth:`~PIL.Image.Image.draft` method, you can speed things up by + converting ``RGB`` images to ``L``, and resize images to 1/2, 1/4 or 1/8 of + their original size while loading them. + + The :py:meth:`~PIL.Image.Image.write` method may set the following + :py:attr:`~PIL.Image.Image.info` properties if available: + + **jfif** + JFIF application marker found. If the file is not a JFIF file, this key is + not present. + + **jfif_version** + A tuple representing the jfif version, (major version, minor version). + + **jfif_density** + A tuple representing the pixel density of the image, in units specified + by jfif_unit. + + **jfif_unit** + Units for the jfif_density: + + * 0 - No Units + * 1 - Pixels per Inch + * 2 - Pixels per Centimeter + + **dpi** + A tuple representing the reported pixel density in pixels per inch, if + the file is a jfif file and the units are in inches. + + **adobe** + Adobe application marker found. If the file is not an Adobe JPEG file, this + key is not present. + + **adobe_transform** + Vendor Specific Tag. + + **progression** + Indicates that this is a progressive JPEG file. + + **icc_profile** + The ICC color profile for the image. + + **exif** + Raw EXIF data from the image. + + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **quality** + The image quality, on a scale from 1 (worst) to 95 (best). The default is + 75. Values above 95 should be avoided; 100 disables portions of the JPEG + compression algorithm, and results in large files with hardly any gain in + image quality. + + **optimize** + If present and true, indicates that the encoder should make an extra pass + over the image in order to select optimal encoder settings. + + **progressive** + If present and true, indicates that this image should be stored as a + progressive JPEG file. + + **dpi** + A tuple of integers representing the pixel density, ``(x,y)``. + + **icc_profile** + If present and true, the image is stored with the provided ICC profile. + If this parameter is not provided, the image will be saved with no profile + attached. To preserve the existing profile:: + + im.save(filename, 'jpeg', icc_profile=im.info.get('icc_profile')) + + **exif** + If present, the image will be stored with the provided raw EXIF data. + + **subsampling** + If present, sets the subsampling for the encoder. + + * ``keep``: Only valid for JPEG files, will retain the original image setting. + * ``4:4:4``, ``4:2:2``, ``4:2:0``: Specific sampling values + * ``-1``: equivalent to ``keep`` + * ``0``: equivalent to ``4:4:4`` + * ``1``: equivalent to ``4:2:2`` + * ``2``: equivalent to ``4:2:0`` + + **qtables** + If present, sets the qtables for the encoder. This is listed as an + advanced option for wizards in the JPEG documentation. Use with + caution. ``qtables`` can be one of several types of values: + + * a string, naming a preset, e.g. ``keep``, ``web_low``, or ``web_high`` + * a list, tuple, or dictionary (with integer keys = + range(len(keys))) of lists of 64 integers. There must be + between 2 and 4 tables. + + .. versionadded:: Pillow 2.5.0 + + + .. note:: + + To enable JPEG support, you need to build and install the IJG JPEG library + before building the Python Imaging Library. See the distribution README for + details. + """, + "JPEG2000": """*From the Pillow docs:* + + + .. versionadded:: Pillow 2.4.0 + + PIL reads and writes JPEG 2000 files containing ``L``, ``LA``, ``RGB`` or + ``RGBA`` data. It can also read files containing ``YCbCr`` data, which it + converts on read into ``RGB`` or ``RGBA`` depending on whether or not there is + an alpha channel. PIL supports JPEG 2000 raw codestreams (``.j2k`` files), as + well as boxed JPEG 2000 files (``.j2p`` or ``.jpx`` files). PIL does *not* + support files whose components have different sampling frequencies. + + When loading, if you set the ``mode`` on the image prior to the + :py:meth:`~PIL.Image.Image.load` method being invoked, you can ask PIL to + convert the image to either ``RGB`` or ``RGBA`` rather than choosing for + itself. It is also possible to set ``reduce`` to the number of resolutions to + discard (each one reduces the size of the resulting image by a factor of 2), + and ``layers`` to specify the number of quality layers to load. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **offset** + The image offset, as a tuple of integers, e.g. (16, 16) + + **tile_offset** + The tile offset, again as a 2-tuple of integers. + + **tile_size** + The tile size as a 2-tuple. If not specified, or if set to None, the + image will be saved without tiling. + + **quality_mode** + Either `"rates"` or `"dB"` depending on the units you want to use to + specify image quality. + + **quality_layers** + A sequence of numbers, each of which represents either an approximate size + reduction (if quality mode is `"rates"`) or a signal to noise ratio value + in decibels. If not specified, defaults to a single layer of full quality. + + **num_resolutions** + The number of different image resolutions to be stored (which corresponds + to the number of Discrete Wavelet Transform decompositions plus one). + + **codeblock_size** + The code-block size as a 2-tuple. Minimum size is 4 x 4, maximum is 1024 x + 1024, with the additional restriction that no code-block may have more + than 4096 coefficients (i.e. the product of the two numbers must be no + greater than 4096). + + **precinct_size** + The precinct size as a 2-tuple. Must be a power of two along both axes, + and must be greater than the code-block size. + + **irreversible** + If ``True``, use the lossy Irreversible Color Transformation + followed by DWT 9-7. Defaults to ``False``, which means to use the + Reversible Color Transformation with DWT 5-3. + + **progression** + Controls the progression order; must be one of ``"LRCP"``, ``"RLCP"``, + ``"RPCL"``, ``"PCRL"``, ``"CPRL"``. The letters stand for Component, + Position, Resolution and Layer respectively and control the order of + encoding, the idea being that e.g. an image encoded using LRCP mode can + have its quality layers decoded as they arrive at the decoder, while one + encoded using RLCP mode will have increasing resolutions decoded as they + arrive, and so on. + + **cinema_mode** + Set the encoder to produce output compliant with the digital cinema + specifications. The options here are ``"no"`` (the default), + ``"cinema2k-24"`` for 24fps 2K, ``"cinema2k-48"`` for 48fps 2K, and + ``"cinema4k-24"`` for 24fps 4K. Note that for compliant 2K files, + *at least one* of your image dimensions must match 2048 x 1080, while + for compliant 4K files, *at least one* of the dimensions must match + 4096 x 2160. + + .. note:: + + To enable JPEG 2000 support, you need to build and install the OpenJPEG + library, version 2.0.0 or higher, before building the Python Imaging + Library. + + Windows users can install the OpenJPEG binaries available on the + OpenJPEG website, but must add them to their PATH in order to use PIL (if + you fail to do this, you will get errors about not being able to load the + ``_imaging`` DLL). + """, + "MCIDAS": """*From the Pillow docs:* + + + PIL identifies and reads 8-bit McIdas area files. + """, + "MIC": """*From the Pillow docs:* + + + PIL identifies and reads Microsoft Image Composer (MIC) files. When opened, the + first sprite in the file is loaded. You can use :py:meth:`~file.seek` and + :py:meth:`~file.tell` to read other sprites from the file. + + Note that there may be an embedded gamma of 2.2 in MIC files. + """, + "MPEG": """*From the Pillow docs:* + + + PIL identifies MPEG files. + """, + "MPO": """*From the Pillow docs:* + + + Pillow identifies and reads Multi Picture Object (MPO) files, loading the primary + image when first opened. The :py:meth:`~file.seek` and :py:meth:`~file.tell` + methods may be used to read other pictures from the file. The pictures are + zero-indexed and random access is supported. + """, + "MSP": """*From the Pillow docs:* + + + PIL identifies and reads MSP files from Windows 1 and 2. The library writes + uncompressed (Windows 1) versions of this format. + """, + "PCD": """*From the Pillow docs:* + + + PIL reads PhotoCD files containing ``RGB`` data. This only reads the 768x512 + resolution image from the file. Higher resolutions are encoded in a proprietary + encoding. + """, + "PCX": """*From the Pillow docs:* + + + PIL reads and writes PCX files containing ``1``, ``L``, ``P``, or ``RGB`` data. + """, + "PIXAR": """*From the Pillow docs:* + + + PIL provides limited support for PIXAR raster files. The library can identify + and read “dumped” RGB files. + + The format code is ``PIXAR``. + """, + "PNG": """*From the Pillow docs:* + + + PIL identifies, reads, and writes PNG files containing ``1``, ``L``, ``P``, + ``RGB``, or ``RGBA`` data. Interlaced files are supported as of v1.1.7. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties, when appropriate: + + **chromaticity** + The chromaticity points, as an 8 tuple of floats. (``White Point + X``, ``White Point Y``, ``Red X``, ``Red Y``, ``Green X``, ``Green + Y``, ``Blue X``, ``Blue Y``) + + **gamma** + Gamma, given as a floating point number. + + **srgb** + The sRGB rendering intent as an integer. + + * 0 Perceptual + * 1 Relative Colorimetric + * 2 Saturation + * 3 Absolute Colorimetric + + **transparency** + For ``P`` images: Either the palette index for full transparent pixels, + or a byte string with alpha values for each palette entry. + + For ``L`` and ``RGB`` images, the color that represents full transparent + pixels in this image. + + This key is omitted if the image is not a transparent palette image. + + ``Open`` also sets ``Image.text`` to a list of the values of the + ``tEXt``, ``zTXt``, and ``iTXt`` chunks of the PNG image. Individual + compressed chunks are limited to a decompressed size of + ``PngImagePlugin.MAX_TEXT_CHUNK``, by default 1MB, to prevent + decompression bombs. Additionally, the total size of all of the text + chunks is limited to ``PngImagePlugin.MAX_TEXT_MEMORY``, defaulting to + 64MB. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **optimize** + If present and true, instructs the PNG writer to make the output file as + small as possible. This includes extra processing in order to find optimal + encoder settings. + + **transparency** + For ``P``, ``L``, and ``RGB`` images, this option controls what + color image to mark as transparent. + + For ``P`` images, this can be a either the palette index, + or a byte string with alpha values for each palette entry. + + **dpi** + A tuple of two numbers corresponding to the desired dpi in each direction. + + **pnginfo** + A :py:class:`PIL.PngImagePlugin.PngInfo` instance containing text tags. + + **compress_level** + ZLIB compression level, a number between 0 and 9: 1 gives best speed, + 9 gives best compression, 0 gives no compression at all. Default is 6. + When ``optimize`` option is True ``compress_level`` has no effect + (it is set to 9 regardless of a value passed). + + **icc_profile** + The ICC Profile to include in the saved file. + + **bits (experimental)** + For ``P`` images, this option controls how many bits to store. If omitted, + the PNG writer uses 8 bits (256 colors). + + **dictionary (experimental)** + Set the ZLIB encoder dictionary. + + .. note:: + + To enable PNG support, you need to build and install the ZLIB compression + library before building the Python Imaging Library. See the installation + documentation for details. + """, + "PPM": """*From the Pillow docs:* + + + PIL reads and writes PBM, PGM and PPM files containing ``1``, ``L`` or ``RGB`` + data. + """, + "PSD": """*From the Pillow docs:* + + + PIL identifies and reads PSD files written by Adobe Photoshop 2.5 and 3.0. + + """, + "SGI": """*From the Pillow docs:* + + + Pillow reads and writes uncompressed ``L``, ``RGB``, and ``RGBA`` files. + + """, + "SPIDER": """*From the Pillow docs:* + + + PIL reads and writes SPIDER image files of 32-bit floating point data + ("F;32F"). + + PIL also reads SPIDER stack files containing sequences of SPIDER images. The + :py:meth:`~file.seek` and :py:meth:`~file.tell` methods are supported, and + random access is allowed. + + The :py:meth:`~PIL.Image.Image.write` method sets the following attributes: + + **format** + Set to ``SPIDER`` + + **istack** + Set to 1 if the file is an image stack, else 0. + + **nimages** + Set to the number of images in the stack. + + A convenience method, :py:meth:`~PIL.Image.Image.convert2byte`, is provided for + converting floating point data to byte data (mode ``L``):: + + im = Image.open('image001.spi').convert2byte() + + Writing files in SPIDER format + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The extension of SPIDER files may be any 3 alphanumeric characters. Therefore + the output format must be specified explicitly:: + + im.save('newimage.spi', format='SPIDER') + + For more information about the SPIDER image processing package, see the + `SPIDER homepage`_ at `Wadsworth Center`_. + + .. _SPIDER homepage: https://spider.wadsworth.org/spider_doc/spider/docs/spider.html + .. _Wadsworth Center: https://www.wadsworth.org/ + """, + "SUN": """No docs for SUN.""", + "TGA": """*From the Pillow docs:* + + + PIL reads 24- and 32-bit uncompressed and run-length encoded TGA files. + """, + "TIFF": """*From the Pillow docs:* + + + Pillow reads and writes TIFF files. It can read both striped and tiled + images, pixel and plane interleaved multi-band images. If you have + libtiff and its headers installed, PIL can read and write many kinds + of compressed TIFF files. If not, PIL will only read and write + uncompressed files. + + .. note:: + + Beginning in version 5.0.0, Pillow requires libtiff to read or + write compressed files. Prior to that release, Pillow had buggy + support for reading Packbits, LZW and JPEG compressed TIFFs + without using libtiff. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **compression** + Compression mode. + + .. versionadded:: Pillow 2.0.0 + + **dpi** + Image resolution as an ``(xdpi, ydpi)`` tuple, where applicable. You can use + the :py:attr:`~PIL.Image.Image.tag` attribute to get more detailed + information about the image resolution. + + .. versionadded:: Pillow 1.1.5 + + **resolution** + Image resolution as an ``(xres, yres)`` tuple, where applicable. This is a + measurement in whichever unit is specified by the file. + + .. versionadded:: Pillow 1.1.5 + + + The :py:attr:`~PIL.Image.Image.tag_v2` attribute contains a dictionary + of TIFF metadata. The keys are numerical indexes from + :py:attr:`~PIL.TiffTags.TAGS_V2`. Values are strings or numbers for single + items, multiple values are returned in a tuple of values. Rational + numbers are returned as a :py:class:`~PIL.TiffImagePlugin.IFDRational` + object. + + .. versionadded:: Pillow 3.0.0 + + For compatibility with legacy code, the + :py:attr:`~PIL.Image.Image.tag` attribute contains a dictionary of + decoded TIFF fields as returned prior to version 3.0.0. Values are + returned as either strings or tuples of numeric values. Rational + numbers are returned as a tuple of ``(numerator, denominator)``. + + .. deprecated:: 3.0.0 + + + Saving Tiff Images + ~~~~~~~~~~~~~~~~~~ + + The :py:meth:`~PIL.Image.Image.save` method can take the following keyword arguments: + + **save_all** + If true, Pillow will save all frames of the image to a multiframe tiff document. + + .. versionadded:: Pillow 3.4.0 + + **tiffinfo** + A :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` object or dict + object containing tiff tags and values. The TIFF field type is + autodetected for Numeric and string values, any other types + require using an :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + object and setting the type in + :py:attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype` with + the appropriate numerical value from + ``TiffTags.TYPES``. + + .. versionadded:: Pillow 2.3.0 + + Metadata values that are of the rational type should be passed in + using a :py:class:`~PIL.TiffImagePlugin.IFDRational` object. + + .. versionadded:: Pillow 3.1.0 + + For compatibility with legacy code, a + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` object may + be passed in this field. However, this is deprecated. + + .. versionadded:: Pillow 3.0.0 + + .. note:: + + Only some tags are currently supported when writing using + libtiff. The supported list is found in + :py:attr:`~PIL:TiffTags.LIBTIFF_CORE`. + + **compression** + A string containing the desired compression method for the + file. (valid only with libtiff installed) Valid compression + methods are: ``None``, ``"tiff_ccitt"``, ``"group3"``, + ``"group4"``, ``"tiff_jpeg"``, ``"tiff_adobe_deflate"``, + ``"tiff_thunderscan"``, ``"tiff_deflate"``, ``"tiff_sgilog"``, + ``"tiff_sgilog24"``, ``"tiff_raw_16"`` + + These arguments to set the tiff header fields are an alternative to + using the general tags available through tiffinfo. + + **description** + + **software** + + **date_time** + + **artist** + + **copyright** + Strings + + **resolution_unit** + A string of "inch", "centimeter" or "cm" + + **resolution** + + **x_resolution** + + **y_resolution** + + **dpi** + Either a Float, 2 tuple of (numerator, denominator) or a + :py:class:`~PIL.TiffImagePlugin.IFDRational`. Resolution implies + an equal x and y resolution, dpi also implies a unit of inches. + + """, + "WMF": """*From the Pillow docs:* + + + PIL can identify playable WMF files. + + In PIL 1.1.4 and earlier, the WMF driver provides some limited rendering + support, but not enough to be useful for any real application. + + In PIL 1.1.5 and later, the WMF driver is a stub driver. To add WMF read or + write support to your application, use + :py:func:`PIL.WmfImagePlugin.register_handler` to register a WMF handler. + + :: + + from PIL import Image + from PIL import WmfImagePlugin + + class WmfHandler: + def open(self, im): + ... + def load(self, im): + ... + return image + def save(self, im, fp, filename): + ... + + wmf_handler = WmfHandler() + + WmfImagePlugin.register_handler(wmf_handler) + + im = Image.open("sample.wmf")""", + "XBM": """*From the Pillow docs:* + + + PIL reads and writes X bitmap files (mode ``1``). + """, + "XPM": """*From the Pillow docs:* + + + PIL reads X pixmap files (mode ``P``) with 256 colors or less. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + """, + "XVThumb": """No docs for XVThumb.""", +} diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow_legacy.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow_legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..75637ba5122e917aa178a70c82c4f94fd2042ba8 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillow_legacy.py @@ -0,0 +1,823 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write images using pillow/PIL (legacy). + +Backend Library: `Pillow `_ + +Pillow is a friendly fork of PIL (Python Image Library) and supports +reading and writing of common formats (jpg, png, gif, tiff, ...). While +these docs provide an overview of some of its features, pillow is +constantly improving. Hence, the complete list of features can be found +in pillows official docs (see the Backend Library link). + +Parameters for Reading +---------------------- +pilmode : str + (Available for all formats except GIF-PIL) + From the Pillow documentation: + + * 'L' (8-bit pixels, grayscale) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to grayscale (mode 'L', 'I' or 'F'), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 +as_gray : bool + (Available for all formats except GIF-PIL) + If True, the image is converted using mode 'F'. When `mode` is + not None and `as_gray` is True, the image is first converted + according to `mode`, and the result is then "flattened" using + mode 'F'. +ignoregamma : bool + (Only available in PNG-PIL) + Avoid gamma correction. Default True. +exifrotate : bool + (Only available in JPEG-PIL) + Automatically rotate the image according to exif flag. Default True. + + +Parameters for saving +--------------------- +optimize : bool + (Only available in PNG-PIL) + If present and true, instructs the PNG writer to make the output file + as small as possible. This includes extra processing in order to find + optimal encoder settings. +transparency: + (Only available in PNG-PIL) + This option controls what color image to mark as transparent. +dpi: tuple of two scalars + (Only available in PNG-PIL) + The desired dpi in each direction. +pnginfo: PIL.PngImagePlugin.PngInfo + (Only available in PNG-PIL) + Object containing text tags. +compress_level: int + (Only available in PNG-PIL) + ZLIB compression level, a number between 0 and 9: 1 gives best speed, + 9 gives best compression, 0 gives no compression at all. Default is 9. + When ``optimize`` option is True ``compress_level`` has no effect + (it is set to 9 regardless of a value passed). +compression: int + (Only available in PNG-PIL) + Compatibility with the freeimage PNG format. If given, it overrides + compress_level. +icc_profile: + (Only available in PNG-PIL) + The ICC Profile to include in the saved file. +bits (experimental): int + (Only available in PNG-PIL) + This option controls how many bits to store. If omitted, + the PNG writer uses 8 bits (256 colors). +quantize: + (Only available in PNG-PIL) + Compatibility with the freeimage PNG format. If given, it overrides + bits. In this case, given as a number between 1-256. +dictionary (experimental): dict + (Only available in PNG-PIL) + Set the ZLIB encoder dictionary. +prefer_uint8: bool + (Only available in PNG-PIL) + Let the PNG writer truncate uint16 image arrays to uint8 if their values fall + within the range [0, 255]. Defaults to true for legacy compatibility, however + it is recommended to set this to false to avoid unexpected behavior when + saving e.g. weakly saturated images. + +quality : scalar + (Only available in JPEG-PIL) + The compression factor of the saved image (1..100), higher + numbers result in higher quality but larger file size. Default 75. +progressive : bool + (Only available in JPEG-PIL) + Save as a progressive JPEG file (e.g. for images on the web). + Default False. +optimize : bool + (Only available in JPEG-PIL) + On saving, compute optimal Huffman coding tables (can reduce a few + percent of file size). Default False. +dpi : tuple of int + (Only available in JPEG-PIL) + The pixel density, ``(x,y)``. +icc_profile : object + (Only available in JPEG-PIL) + If present and true, the image is stored with the provided ICC profile. + If this parameter is not provided, the image will be saved with no + profile attached. +exif : dict + (Only available in JPEG-PIL) + If present, the image will be stored with the provided raw EXIF data. +subsampling : str + (Only available in JPEG-PIL) + Sets the subsampling for the encoder. See Pillow docs for details. +qtables : object + (Only available in JPEG-PIL) + Set the qtables for the encoder. See Pillow docs for details. +quality_mode : str + (Only available in JPEG2000-PIL) + Either `"rates"` or `"dB"` depending on the units you want to use to + specify image quality. +quality : float + (Only available in JPEG2000-PIL) + Approximate size reduction (if quality mode is `rates`) or a signal to noise ratio + in decibels (if quality mode is `dB`). +loop : int + (Only available in GIF-PIL) + The number of iterations. Default 0 (meaning loop indefinitely). +duration : {float, list} + (Only available in GIF-PIL) + The duration (in milliseconds) of each frame. Either specify one value + that is used for all frames, or one value for each frame. +fps : float + (Only available in GIF-PIL) + The number of frames per second. If duration is not given, the + duration for each frame is set to 1/fps. Default 10. +palettesize : int + (Only available in GIF-PIL) + The number of colors to quantize the image to. Is rounded to + the nearest power of two. Default 256. +subrectangles : bool + (Only available in GIF-PIL) + If True, will try and optimize the GIF by storing only the + rectangular parts of each frame that change with respect to the + previous. Default False. + +Notes +----- +To enable JPEG 2000 support, you need to build and install the OpenJPEG library, +version 2.0.0 or higher, before building the Python Imaging Library. Windows +users can install the OpenJPEG binaries available on the OpenJPEG website, but +must add them to their PATH in order to use PIL (if you fail to do this, you +will get errors about not being able to load the ``_imaging`` DLL). + +GIF images read with this plugin are always RGBA. The alpha channel is ignored +when saving RGB images. +""" + +import logging +import threading + +import numpy as np + +from ..core import Format, image_as_uint +from ..core.request import URI_FILE, URI_BYTES + + +logger = logging.getLogger(__name__) + + +# todo: Pillow ImageGrab module supports grabbing the screen on Win and OSX. + + +GENERIC_DOCS = """ + Parameters for reading + ---------------------- + + pilmode : str + From the Pillow documentation: + + * 'L' (8-bit pixels, grayscale) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to grayscale (mode 'L', 'I' or 'F'), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + as_gray : bool + If True, the image is converted using mode 'F'. When `mode` is + not None and `as_gray` is True, the image is first converted + according to `mode`, and the result is then "flattened" using + mode 'F'. +""" + + +class PillowFormat(Format): + """ + Base format class for Pillow formats. + """ + + _pillow_imported = False + _Image = None + _modes = "i" + _description = "" + + def __init__(self, *args, plugin_id: str = None, **kwargs): + super(PillowFormat, self).__init__(*args, **kwargs) + # Used to synchronize _init_pillow(), see #244 + self._lock = threading.RLock() + + self._plugin_id = plugin_id + + @property + def plugin_id(self): + """The PIL plugin id.""" + return self._plugin_id # Set when format is created + + def _init_pillow(self): + with self._lock: + if not self._pillow_imported: + self._pillow_imported = True # more like tried to import + import PIL + + if not hasattr(PIL, "__version__"): # pragma: no cover + raise ImportError( + "Imageio Pillow plugin requires " "Pillow, not PIL!" + ) + from PIL import Image + + self._Image = Image + elif self._Image is None: # pragma: no cover + raise RuntimeError("Imageio Pillow plugin requires " "Pillow lib.") + Image = self._Image + + if self.plugin_id in ("PNG", "JPEG", "BMP", "GIF", "PPM"): + Image.preinit() + else: + Image.init() + return Image + + def _can_read(self, request): + Image = self._init_pillow() + if self.plugin_id in Image.OPEN: + factory, accept = Image.OPEN[self.plugin_id] + if accept: + if request.firstbytes and accept(request.firstbytes): + return True + + def _can_write(self, request): + Image = self._init_pillow() + if request.extension in self.extensions or request._uri_type in [ + URI_FILE, + URI_BYTES, + ]: + if self.plugin_id in Image.SAVE: + return True + + class Reader(Format.Reader): + def _open(self, pilmode=None, as_gray=False): + Image = self.format._init_pillow() + try: + factory, accept = Image.OPEN[self.format.plugin_id] + except KeyError: + raise RuntimeError("Format %s cannot read images." % self.format.name) + self._fp = self._get_file() + self._im = factory(self._fp, "") + if hasattr(Image, "_decompression_bomb_check"): + Image._decompression_bomb_check(self._im.size) + # Save the raw mode used by the palette for a BMP because it may not be the number of channels + # When the data is read, imageio hands the palette to PIL to handle and clears the rawmode argument + # However, there is a bug in PIL with handling animated GIFs with a different color palette on each frame. + # This issue is resolved by using the raw palette data but the rawmode information is now lost. So we + # store the raw mode for later use + if self._im.palette and self._im.palette.dirty: + self._im.palette.rawmode_saved = self._im.palette.rawmode + pil_try_read(self._im) + # Store args + self._kwargs = dict( + as_gray=as_gray, is_gray=_palette_is_grayscale(self._im) + ) + # setting mode=None is not the same as just not providing it + if pilmode is not None: + self._kwargs["mode"] = pilmode + # Set length + self._length = 1 + if hasattr(self._im, "n_frames"): + self._length = self._im.n_frames + + def _get_file(self): + self._we_own_fp = False + return self.request.get_file() + + def _close(self): + save_pillow_close(self._im) + if self._we_own_fp: + self._fp.close() + # else: request object handles closing the _fp + + def _get_length(self): + return self._length + + def _seek(self, index): + try: + self._im.seek(index) + except EOFError: + raise IndexError("Could not seek to index %i" % index) + + def _get_data(self, index): + if index >= self._length: + raise IndexError("Image index %i > %i" % (index, self._length)) + i = self._im.tell() + if i > index: + self._seek(index) # just try + else: + while i < index: # some formats need to be read in sequence + i += 1 + self._seek(i) + if self._im.palette and self._im.palette.dirty: + self._im.palette.rawmode_saved = self._im.palette.rawmode + self._im.getdata()[0] + im = pil_get_frame(self._im, **self._kwargs) + return im, self._im.info + + def _get_meta_data(self, index): + if not (index is None or index == 0): + raise IndexError() + return self._im.info + + class Writer(Format.Writer): + def _open(self): + Image = self.format._init_pillow() + try: + self._save_func = Image.SAVE[self.format.plugin_id] + except KeyError: + raise RuntimeError("Format %s cannot write images." % self.format.name) + self._fp = self.request.get_file() + self._meta = {} + self._written = False + + def _close(self): + pass # request object handled closing _fp + + def _append_data(self, im, meta): + if self._written: + raise RuntimeError( + "Format %s only supports single images." % self.format.name + ) + # Pop unit dimension for grayscale images + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + self._written = True + self._meta.update(meta) + img = ndarray_to_pil( + im, self.format.plugin_id, self._meta.pop("prefer_uint8", True) + ) + if "bits" in self._meta: + img = img.quantize() # Make it a P image, so bits arg is used + img.save(self._fp, format=self.format.plugin_id, **self._meta) + save_pillow_close(img) + + def set_meta_data(self, meta): + self._meta.update(meta) + + +class PNGFormat(PillowFormat): + """See :mod:`imageio.plugins.pillow_legacy`""" + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False, ignoregamma=True): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + if not self.request.kwargs.get("ignoregamma", True): + # The gamma value in the file represents the gamma factor for the + # hardware on the system where the file was created, and is meant + # to be able to match the colors with the system on which the + # image is shown. See also issue #366 + try: + gamma = float(info["gamma"]) + except (KeyError, ValueError): + pass + else: + scale = float(65536 if im.dtype == np.uint16 else 255) + gain = 1.0 + im[:] = ((im / scale) ** gamma) * scale * gain + 0.4999 + return im, info + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, compression=None, quantize=None, interlaced=False, **kwargs): + # Better default for compression + kwargs["compress_level"] = kwargs.get("compress_level", 9) + + if compression is not None: + if compression < 0 or compression > 9: + raise ValueError("Invalid PNG compression level: %r" % compression) + kwargs["compress_level"] = compression + if quantize is not None: + for bits in range(1, 9): + if 2**bits == quantize: + break + else: + raise ValueError( + "PNG quantize must be power of two, " "not %r" % quantize + ) + kwargs["bits"] = bits + if interlaced: + logger.warning("PIL PNG writer cannot produce interlaced images.") + + ok_keys = ( + "optimize", + "transparency", + "dpi", + "pnginfo", + "bits", + "compress_level", + "icc_profile", + "dictionary", + "prefer_uint8", + ) + for key in kwargs: + if key not in ok_keys: + raise TypeError("Invalid arg for PNG writer: %r" % key) + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if str(im.dtype) == "uint16" and (im.ndim == 2 or im.shape[-1] == 1): + im = image_as_uint(im, bitdepth=16) + else: + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + + +class JPEGFormat(PillowFormat): + """See :mod:`imageio.plugins.pillow_legacy`""" + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False, exifrotate=True): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_file(self): + # Pillow uses seek for JPG, so we cannot directly stream from web + if self.request.filename.startswith( + ("http://", "https://") + ) or ".zip/" in self.request.filename.replace("\\", "/"): + self._we_own_fp = True + return open(self.request.get_local_filename(), "rb") + else: + self._we_own_fp = False + return self.request.get_file() + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + + # Handle exif + if "exif" in info: + from PIL.ExifTags import TAGS + + info["EXIF_MAIN"] = {} + for tag, value in self._im._getexif().items(): + decoded = TAGS.get(tag, tag) + info["EXIF_MAIN"][decoded] = value + + im = self._rotate(im, info) + return im, info + + def _rotate(self, im, meta): + """Use Orientation information from EXIF meta data to + orient the image correctly. Similar code as in FreeImage plugin. + """ + if self.request.kwargs.get("exifrotate", True): + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, quality=75, progressive=False, optimize=False, **kwargs): + # The JPEG quality can be between 0 (worst) and 100 (best) + quality = int(quality) + if quality < 0 or quality > 100: + raise ValueError("JPEG quality should be between 0 and 100.") + + kwargs["quality"] = quality + kwargs["progressive"] = bool(progressive) + kwargs["optimize"] = bool(progressive) + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError("JPEG does not support alpha channel.") + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + return + + +class JPEG2000Format(PillowFormat): + """See :mod:`imageio.plugins.pillow_legacy`""" + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_file(self): + # Pillow uses seek for JPG, so we cannot directly stream from web + if self.request.filename.startswith( + ("http://", "https://") + ) or ".zip/" in self.request.filename.replace("\\", "/"): + self._we_own_fp = True + return open(self.request.get_local_filename(), "rb") + else: + self._we_own_fp = False + return self.request.get_file() + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + + # Handle exif + if "exif" in info: + from PIL.ExifTags import TAGS + + info["EXIF_MAIN"] = {} + for tag, value in self._im._getexif().items(): + decoded = TAGS.get(tag, tag) + info["EXIF_MAIN"][decoded] = value + + im = self._rotate(im, info) + return im, info + + def _rotate(self, im, meta): + """Use Orientation information from EXIF meta data to + orient the image correctly. Similar code as in FreeImage plugin. + """ + if self.request.kwargs.get("exifrotate", True): + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, quality_mode="rates", quality=5, **kwargs): + # Check quality - in Pillow it should be no higher than 95 + if quality_mode not in {"rates", "dB"}: + raise ValueError("Quality mode should be either 'rates' or 'dB'") + + quality = float(quality) + + if quality_mode == "rates" and (quality < 1 or quality > 1000): + raise ValueError( + "The quality value {} seems to be an invalid rate!".format(quality) + ) + elif quality_mode == "dB" and (quality < 15 or quality > 100): + raise ValueError( + "The quality value {} seems to be an invalid PSNR!".format(quality) + ) + + kwargs["quality_mode"] = quality_mode + kwargs["quality_layers"] = [quality] + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError( + "The current implementation of JPEG 2000 does not support alpha channel." + ) + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + return + + +def save_pillow_close(im): + # see issue #216 and #300 + if hasattr(im, "close"): + if hasattr(getattr(im, "fp", None), "close"): + im.close() + + +# Func from skimage + +# This cells contains code from scikit-image, in particular from +# http://github.com/scikit-image/scikit-image/blob/master/ +# skimage/io/_plugins/pil_plugin.py +# The scikit-image license applies. + + +def pil_try_read(im): + try: + # this will raise an IOError if the file is not readable + im.getdata()[0] + except IOError as e: + site = "http://pillow.readthedocs.io/en/latest/installation.html" + site += "#external-libraries" + pillow_error_message = str(e) + error_message = ( + 'Could not load "%s" \n' + 'Reason: "%s"\n' + "Please see documentation at: %s" + % (im.filename, pillow_error_message, site) + ) + raise ValueError(error_message) + + +def _palette_is_grayscale(pil_image): + if pil_image.mode != "P": + return False + elif pil_image.info.get("transparency", None): # see issue #475 + return False + # get palette as an array with R, G, B columns + # Note: starting in pillow 9.1 palettes may have less than 256 entries + palette = np.asarray(pil_image.getpalette()).reshape((-1, 3)) + # Not all palette colors are used; unused colors have junk values. + start, stop = pil_image.getextrema() + valid_palette = palette[start : stop + 1] + # Image is grayscale if channel differences (R - G and G - B) + # are all zero. + return np.allclose(np.diff(valid_palette), 0) + + +def pil_get_frame(im, is_gray=None, as_gray=None, mode=None, dtype=None): + """ + is_gray: Whether the image *is* gray (by inspecting its palette). + as_gray: Whether the resulting image must be converted to gaey. + mode: The mode to convert to. + """ + + if is_gray is None: + is_gray = _palette_is_grayscale(im) + + frame = im + + # Convert ... + if mode is not None: + # Mode is explicitly given ... + if mode != im.mode: + frame = im.convert(mode) + elif as_gray: + pass # don't do any auto-conversions (but do the explicit one above) + elif im.mode == "P" and is_gray: + # Paletted images that are already gray by their palette + # are converted so that the resulting numpy array is 2D. + frame = im.convert("L") + elif im.mode == "P": + # Paletted images are converted to RGB/RGBA. We jump some loops to make + # this work well. + if im.info.get("transparency", None) is not None: + # Let Pillow apply the transparency, see issue #210 and #246 + frame = im.convert("RGBA") + elif im.palette.mode in ("RGB", "RGBA"): + # We can do this ourselves. Pillow seems to sometimes screw + # this up if a multi-gif has a palette for each frame ... + # Create palette array + p = np.frombuffer(im.palette.getdata()[1], np.uint8) + # Restore the raw mode that was saved to be used to parse the palette + if hasattr(im.palette, "rawmode_saved"): + im.palette.rawmode = im.palette.rawmode_saved + mode = im.palette.rawmode if im.palette.rawmode else im.palette.mode + nchannels = len(mode) + # Shape it. + p.shape = -1, nchannels + if p.shape[1] == 3 or (p.shape[1] == 4 and mode[-1] == "X"): + p = np.column_stack((p[:, :3], 255 * np.ones(p.shape[0], p.dtype))) + # Swap the axes if the mode is in BGR and not RGB + if mode.startswith("BGR"): + p = p[:, [2, 1, 0]] if p.shape[1] == 3 else p[:, [2, 1, 0, 3]] + # Apply palette + frame_paletted = np.array(im, np.uint8) + try: + frame = p[frame_paletted] + except Exception: + # Ok, let PIL do it. The introduction of the branch that + # tests `im.info['transparency']` should make this happen + # much less often, but let's keep it, to be safe. + frame = im.convert("RGBA") + else: + # Let Pillow do it. Unlinke skimage, we always convert + # to RGBA; palettes can be RGBA. + if True: # im.format == 'PNG' and 'transparency' in im.info: + frame = im.convert("RGBA") + else: + frame = im.convert("RGB") + elif "A" in im.mode: + frame = im.convert("RGBA") + elif im.mode == "CMYK": + frame = im.convert("RGB") + elif im.format == "GIF" and im.mode == "RGB": + # pillow9 returns RGBA images for subsequent frames so that it can deal + # with multi-frame GIF that use frame-level palettes and don't dispose + # all areas. + + # For backwards compatibility, we promote everything to RGBA. + frame = im.convert("RGBA") + + # Apply a post-convert if necessary + if as_gray: + frame = frame.convert("F") # Scipy compat + elif not isinstance(frame, np.ndarray) and frame.mode == "1": + # Workaround for crash in PIL. When im is 1-bit, the call array(im) + # can cause a segfault, or generate garbage. See + # https://github.com/scipy/scipy/issues/2138 and + # https://github.com/python-pillow/Pillow/issues/350. + # + # This converts im from a 1-bit image to an 8-bit image. + frame = frame.convert("L") + + # Convert to numpy array + if im.mode.startswith("I;16"): + # e.g. in16 PNG's + shape = im.size + dtype = ">u2" if im.mode.endswith("B") else "= 0: + arr = arr.astype(np.uint8) + mode = mode_base = "L" + + else: + arr = image_as_uint(arr, bitdepth=16) + + else: + arr = image_as_uint(arr, bitdepth=8) + mode = "L" + mode_base = "L" + + if mode == "I;16" and int(getattr(Image, "__version__", "0").split(".")[0]) < 6: + # Pillow < v6.0.0 has limited support for the "I;16" mode, + # requiring us to fall back to this expensive workaround. + # tobytes actually creates a copy of the image, which is costly. + array_buffer = arr.tobytes() + if arr.ndim == 2: + im = Image.new(mode_base, arr.T.shape) + im.frombytes(array_buffer, "raw", mode) + else: + image_shape = (arr.shape[1], arr.shape[0]) + im = Image.frombytes(mode, image_shape, array_buffer) + return im + else: + return Image.fromarray(arr, mode) + + +# imported for backwards compatibility +from .pillowmulti import GIFFormat, TIFFFormat # noqa: E402, F401 diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillowmulti.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillowmulti.py new file mode 100644 index 0000000000000000000000000000000000000000..e41c71073fd5fe2b301eac4031da22745e9cec99 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pillowmulti.py @@ -0,0 +1,338 @@ +""" +PIL formats for multiple images. +""" + +import logging + +import numpy as np + +from .pillow_legacy import PillowFormat, image_as_uint, ndarray_to_pil + +logger = logging.getLogger(__name__) + +NeuQuant = None # we can implement this when we need it + + +class TIFFFormat(PillowFormat): + _modes = "i" # arg, why bother; people should use the tiffile version + _description = "TIFF format (Pillow)" + + +class GIFFormat(PillowFormat): + """See :mod:`imageio.plugins.pillow_legacy`""" + + _modes = "iI" + _description = "Static and animated gif (Pillow)" + + # GIF reader needs no modifications compared to base pillow reader + + class Writer(PillowFormat.Writer): # pragma: no cover + def _open( + self, + loop=0, + duration=None, + fps=10, + palettesize=256, + quantizer=0, + subrectangles=False, + ): + from PIL import __version__ as pillow_version + + major, minor, patch = tuple(int(x) for x in pillow_version.split(".")) + if major == 10 and minor >= 1: + raise ImportError( + f"Pillow v{pillow_version} is not supported by ImageIO's legacy " + "pillow plugin when writing GIFs. Consider switching to the new " + "plugin or downgrading to `pillow<10.1.0`." + ) + + # Check palettesize + palettesize = int(palettesize) + if palettesize < 2 or palettesize > 256: + raise ValueError("GIF quantize param must be 2..256") + if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]: + palettesize = 2 ** int(np.log2(128) + 0.999) + logger.warning( + "Warning: palettesize (%r) modified to a factor of " + "two between 2-256." % palettesize + ) + # Duratrion / fps + if duration is None: + self._duration = 1.0 / float(fps) + elif isinstance(duration, (list, tuple)): + self._duration = [float(d) for d in duration] + else: + self._duration = float(duration) + # loop + loop = float(loop) + if loop <= 0 or loop == float("inf"): + loop = 0 + loop = int(loop) + # Subrectangles / dispose + subrectangles = bool(subrectangles) + self._dispose = 1 if subrectangles else 2 + # The "0" (median cut) quantizer is by far the best + + fp = self.request.get_file() + self._writer = GifWriter( + fp, subrectangles, loop, quantizer, int(palettesize) + ) + + def _close(self): + self._writer.close() + + def _append_data(self, im, meta): + im = image_as_uint(im, bitdepth=8) + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + duration = self._duration + if isinstance(duration, list): + duration = duration[min(len(duration) - 1, self._writer._count)] + dispose = self._dispose + self._writer.add_image(im, duration, dispose) + + return + + +def intToBin(i): + return i.to_bytes(2, byteorder="little") + + +class GifWriter: # pragma: no cover + """Class that for helping write the animated GIF file. This is based on + code from images2gif.py (part of visvis). The version here is modified + to allow streamed writing. + """ + + def __init__( + self, + file, + opt_subrectangle=True, + opt_loop=0, + opt_quantizer=0, + opt_palette_size=256, + ): + self.fp = file + + self.opt_subrectangle = opt_subrectangle + self.opt_loop = opt_loop + self.opt_quantizer = opt_quantizer + self.opt_palette_size = opt_palette_size + + self._previous_image = None # as np array + self._global_palette = None # as bytes + self._count = 0 + + from PIL.GifImagePlugin import getdata + + self.getdata = getdata + + def add_image(self, im, duration, dispose): + # Prepare image + im_rect, rect = im, (0, 0) + if self.opt_subrectangle: + im_rect, rect = self.getSubRectangle(im) + im_pil = self.converToPIL(im_rect, self.opt_quantizer, self.opt_palette_size) + + # Get pallette - apparently, this is the 3d element of the header + # (but it has not always been). Best we've got. Its not the same + # as im_pil.palette.tobytes(). + from PIL.GifImagePlugin import getheader + + palette = getheader(im_pil)[0][3] + + # Write image + if self._count == 0: + self.write_header(im_pil, palette, self.opt_loop) + self._global_palette = palette + self.write_image(im_pil, palette, rect, duration, dispose) + # assert len(palette) == len(self._global_palette) + + # Bookkeeping + self._previous_image = im + self._count += 1 + + def write_header(self, im, globalPalette, loop): + # Gather info + header = self.getheaderAnim(im) + appext = self.getAppExt(loop) + # Write + self.fp.write(header) + self.fp.write(globalPalette) + self.fp.write(appext) + + def close(self): + self.fp.write(";".encode("utf-8")) # end gif + + def write_image(self, im, palette, rect, duration, dispose): + fp = self.fp + + # Gather local image header and data, using PIL's getdata. That + # function returns a list of bytes objects, but which parts are + # what has changed multiple times, so we put together the first + # parts until we have enough to form the image header. + data = self.getdata(im) + imdes = b"" + while data and len(imdes) < 11: + imdes += data.pop(0) + assert len(imdes) == 11 + + # Make image descriptor suitable for using 256 local color palette + lid = self.getImageDescriptor(im, rect) + graphext = self.getGraphicsControlExt(duration, dispose) + + # Write local header + if (palette != self._global_palette) or (dispose != 2): + # Use local color palette + fp.write(graphext) + fp.write(lid) # write suitable image descriptor + fp.write(palette) # write local color table + fp.write(b"\x08") # LZW minimum size code + else: + # Use global color palette + fp.write(graphext) + fp.write(imdes) # write suitable image descriptor + + # Write image data + for d in data: + fp.write(d) + + def getheaderAnim(self, im): + """Get animation header. To replace PILs getheader()[0]""" + bb = b"GIF89a" + bb += intToBin(im.size[0]) + bb += intToBin(im.size[1]) + bb += b"\x87\x00\x00" + return bb + + def getImageDescriptor(self, im, xy=None): + """Used for the local color table properties per image. + Otherwise global color table applies to all frames irrespective of + whether additional colors comes in play that require a redefined + palette. Still a maximum of 256 color per frame, obviously. + + Written by Ant1 on 2010-08-22 + Modified by Alex Robinson in Janurari 2011 to implement subrectangles. + """ + + # Defaule use full image and place at upper left + if xy is None: + xy = (0, 0) + + # Image separator, + bb = b"\x2C" + + # Image position and size + bb += intToBin(xy[0]) # Left position + bb += intToBin(xy[1]) # Top position + bb += intToBin(im.size[0]) # image width + bb += intToBin(im.size[1]) # image height + + # packed field: local color table flag1, interlace0, sorted table0, + # reserved00, lct size111=7=2^(7 + 1)=256. + bb += b"\x87" + + # LZW minimum size code now comes later, begining of [imagedata] blocks + return bb + + def getAppExt(self, loop): + """Application extension. This part specifies the amount of loops. + If loop is 0 or inf, it goes on infinitely. + """ + if loop == 1: + return b"" + if loop == 0: + loop = 2**16 - 1 + bb = b"" + if loop != 0: # omit the extension if we would like a nonlooping gif + bb = b"\x21\xFF\x0B" # application extension + bb += b"NETSCAPE2.0" + bb += b"\x03\x01" + bb += intToBin(loop) + bb += b"\x00" # end + return bb + + def getGraphicsControlExt(self, duration=0.1, dispose=2): + """Graphics Control Extension. A sort of header at the start of + each image. Specifies duration and transparancy. + + Dispose + ------- + * 0 - No disposal specified. + * 1 - Do not dispose. The graphic is to be left in place. + * 2 - Restore to background color. The area used by the graphic + must be restored to the background color. + * 3 - Restore to previous. The decoder is required to restore the + area overwritten by the graphic with what was there prior to + rendering the graphic. + * 4-7 -To be defined. + """ + + bb = b"\x21\xF9\x04" + bb += chr((dispose & 3) << 2).encode("utf-8") + # low bit 1 == transparency, + # 2nd bit 1 == user input , next 3 bits, the low two of which are used, + # are dispose. + bb += intToBin(int(duration * 100 + 0.5)) # in 100th of seconds + bb += b"\x00" # no transparant color + bb += b"\x00" # end + return bb + + def getSubRectangle(self, im): + """Calculate the minimal rectangle that need updating. Returns + a two-element tuple containing the cropped image and an x-y tuple. + + Calculating the subrectangles takes extra time, obviously. However, + if the image sizes were reduced, the actual writing of the GIF + goes faster. In some cases applying this method produces a GIF faster. + """ + + # Cannot do subrectangle for first image + if self._count == 0: + return im, (0, 0) + + prev = self._previous_image + + # Get difference, sum over colors + diff = np.abs(im - prev) + if diff.ndim == 3: + diff = diff.sum(2) + # Get begin and end for both dimensions + X = np.argwhere(diff.sum(0)) + Y = np.argwhere(diff.sum(1)) + # Get rect coordinates + if X.size and Y.size: + x0, x1 = int(X[0]), int(X[-1] + 1) + y0, y1 = int(Y[0]), int(Y[-1] + 1) + else: # No change ... make it minimal + x0, x1 = 0, 2 + y0, y1 = 0, 2 + + return im[y0:y1, x0:x1], (x0, y0) + + def converToPIL(self, im, quantizer, palette_size=256): + """Convert image to Paletted PIL image. + + PIL used to not do a very good job at quantization, but I guess + this has improved a lot (at least in Pillow). I don't think we need + neuqant (and we can add it later if we really want). + """ + + im_pil = ndarray_to_pil(im, "gif") + + if quantizer in ("nq", "neuquant"): + # NeuQuant algorithm + nq_samplefac = 10 # 10 seems good in general + im_pil = im_pil.convert("RGBA") # NQ assumes RGBA + nqInstance = NeuQuant(im_pil, nq_samplefac) # Learn colors + im_pil = nqInstance.quantize(im_pil, colors=palette_size) + elif quantizer in (0, 1, 2): + # Adaptive PIL algorithm + if quantizer == 2: + im_pil = im_pil.convert("RGBA") + else: + im_pil = im_pil.convert("RGB") + im_pil = im_pil.quantize(colors=palette_size, method=quantizer) + else: + raise ValueError("Invalid value for quantizer: %r" % quantizer) + return im_pil diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/pyav.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pyav.py new file mode 100644 index 0000000000000000000000000000000000000000..32007761a7c8984cb2a292b1f4672ffa68a09911 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/pyav.py @@ -0,0 +1,1199 @@ +"""Read/Write Videos (and images) using PyAV. + +.. note:: + To use this plugin you need to have `PyAV `_ + installed:: + + pip install av + +This plugin wraps pyAV, a pythonic binding for the FFMPEG library. It is similar +to our FFMPEG plugin, has improved performance, features a robust interface, and +aims to supersede the FFMPEG plugin in the future. + + +Methods +------- +.. note:: + Check the respective function for a list of supported kwargs and detailed + documentation. + +.. autosummary:: + :toctree: + + PyAVPlugin.read + PyAVPlugin.iter + PyAVPlugin.write + PyAVPlugin.properties + PyAVPlugin.metadata + +Additional methods available inside the :func:`imopen ` +context: + +.. autosummary:: + :toctree: + + PyAVPlugin.init_video_stream + PyAVPlugin.write_frame + PyAVPlugin.set_video_filter + PyAVPlugin.container_metadata + PyAVPlugin.video_stream_metadata + +Advanced API +------------ + +In addition to the default ImageIO v3 API this plugin exposes custom functions +that are specific to reading/writing video and its metadata. These are available +inside the :func:`imopen ` context and allow fine-grained +control over how the video is processed. The functions are documented above and +below you can find a usage example:: + + import imageio.v3 as iio + + with iio.imopen("test.mp4", "w", plugin="pyav") as file: + file.init_video_stream("libx264") + file.container_metadata["comment"] = "This video was created using ImageIO." + + for _ in range(5): + for frame in iio.imiter("imageio:newtonscradle.gif"): + file.write_frame(frame) + + meta = iio.immeta("test.mp4", plugin="pyav") + assert meta["comment"] == "This video was created using ImageIO." + + + +Pixel Formats (Colorspaces) +--------------------------- + +By default, this plugin converts the video into 8-bit RGB (called ``rgb24`` in +ffmpeg). This is a useful behavior for many use-cases, but sometimes you may +want to use the video's native colorspace or you may wish to convert the video +into an entirely different colorspace. This is controlled using the ``format`` +kwarg. You can use ``format=None`` to leave the image in its native colorspace +or specify any colorspace supported by FFMPEG as long as it is stridable, i.e., +as long as it can be represented by a single numpy array. Some useful choices +include: + +- rgb24 (default; 8-bit RGB) +- rgb48le (16-bit lower-endian RGB) +- bgr24 (8-bit BGR; openCVs default colorspace) +- gray (8-bit grayscale) +- yuv444p (8-bit channel-first YUV) + +Further, FFMPEG maintains a list of available formats, albeit not as part of the +narrative docs. It can be `found here +`_ (warning: C source +code). + +Filters +------- + +On top of providing basic read/write functionality, this plugin allows you to +use the full collection of `video filters available in FFMPEG +`_. This means that you +can apply excessive preprocessing to your video before retrieving it as a numpy +array or apply excessive post-processing before you encode your data. + +Filters come in two forms: sequences or graphs. Filter sequences are, as the +name suggests, sequences of filters that are applied one after the other. They +are specified using the ``filter_sequence`` kwarg. Filter graphs, on the other +hand, come in the form of a directed graph and are specified using the +``filter_graph`` kwarg. + +.. note:: + All filters are either sequences or graphs. If all you want is to apply a + single filter, you can do this by specifying a filter sequence with a single + entry. + +A ``filter_sequence`` is a list of filters, each defined through a 2-element +tuple of the form ``(filter_name, filter_parameters)``. The first element of the +tuple is the name of the filter. The second element are the filter parameters, +which can be given either as a string or a dict. The string matches the same +format that you would use when specifying the filter using the ffmpeg +command-line tool and the dict has entries of the form ``parameter:value``. For +example:: + + import imageio.v3 as iio + + # using a filter_parameters str + img1 = iio.imread( + "imageio:cockatoo.mp4", + plugin="pyav", + filter_sequence=[ + ("rotate", "45*PI/180") + ] + ) + + # using a filter_parameters dict + img2 = iio.imread( + "imageio:cockatoo.mp4", + plugin="pyav", + filter_sequence=[ + ("rotate", {"angle":"45*PI/180", "fillcolor":"AliceBlue"}) + ] + ) + +A ``filter_graph``, on the other hand, is specified using a ``(nodes, edges)`` +tuple. It is best explained using an example:: + + img = iio.imread( + "imageio:cockatoo.mp4", + plugin="pyav", + filter_graph=( + { + "split": ("split", ""), + "scale_overlay":("scale", "512:-1"), + "overlay":("overlay", "x=25:y=25:enable='between(t,1,8)'"), + }, + [ + ("video_in", "split", 0, 0), + ("split", "overlay", 0, 0), + ("split", "scale_overlay", 1, 0), + ("scale_overlay", "overlay", 0, 1), + ("overlay", "video_out", 0, 0), + ] + ) + ) + +The above transforms the video to have picture-in-picture of itself in the top +left corner. As you can see, nodes are specified using a dict which has names as +its keys and filter tuples as values; the same tuples as the ones used when +defining a filter sequence. Edges are a list of a 4-tuples of the form +``(node_out, node_in, output_idx, input_idx)`` and specify which two filters are +connected and which inputs/outputs should be used for this. + +Further, there are two special nodes in a filter graph: ``video_in`` and +``video_out``, which represent the graph's input and output respectively. These +names can not be chosen for other nodes (those nodes would simply be +overwritten), and for a graph to be valid there must be a path from the input to +the output and all nodes in the graph must be connected. + +While most graphs are quite simple, they can become very complex and we +recommend that you read through the `FFMPEG documentation +`_ and their +examples to better understand how to use them. + +""" + +from fractions import Fraction +from math import ceil +from typing import Any, Dict, List, Optional, Tuple, Union, Generator + +import av +import av.filter +import numpy as np +from numpy.lib.stride_tricks import as_strided + +from ..core import Request +from ..core.request import URI_BYTES, InitializationError, IOMode +from ..core.v3_plugin_api import ImageProperties, PluginV3 + + +def _format_to_dtype(format: av.VideoFormat) -> np.dtype: + """Convert a pyAV video format into a numpy dtype""" + + if len(format.components) == 0: + # fake format + raise ValueError( + f"Can't determine dtype from format `{format.name}`. It has no channels." + ) + + endian = ">" if format.is_big_endian else "<" + dtype = "f" if "f32" in format.name else "u" + bits_per_channel = [x.bits for x in format.components] + n_bytes = str(int(ceil(bits_per_channel[0] / 8))) + + return np.dtype(endian + dtype + n_bytes) + + +def _get_frame_shape(frame: av.VideoFrame) -> Tuple[int, ...]: + """Compute the frame's array shape + + Parameters + ---------- + frame : av.VideoFrame + A frame for which the resulting shape should be computed. + + Returns + ------- + shape : Tuple[int, ...] + A tuple describing the shape of the image data in the frame. + + """ + + widths = [component.width for component in frame.format.components] + heights = [component.height for component in frame.format.components] + bits = np.array([component.bits for component in frame.format.components]) + line_sizes = [plane.line_size for plane in frame.planes] + + subsampled_width = widths[:-1] != widths[1:] + subsampled_height = heights[:-1] != heights[1:] + unaligned_components = np.any(bits % 8 != 0) or (line_sizes[:-1] != line_sizes[1:]) + if subsampled_width or subsampled_height or unaligned_components: + raise IOError( + f"{frame.format.name} can't be expressed as a strided array." + "Use `format=` to select a format to convert into." + ) + + shape = [frame.height, frame.width] + + # ffmpeg doesn't have a notion of channel-first or channel-last formats + # instead it stores frames in one or more planes which contain individual + # components of a pixel depending on the pixel format. For channel-first + # formats each component lives on a separate plane (n_planes) and for + # channel-last formats all components are packed on a single plane + # (n_channels) + n_planes = max([component.plane for component in frame.format.components]) + 1 + if n_planes > 1: + shape = [n_planes] + shape + + channels_per_plane = [0] * n_planes + for component in frame.format.components: + channels_per_plane[component.plane] += 1 + n_channels = max(channels_per_plane) + + if n_channels > 1: + shape = shape + [n_channels] + + return tuple(shape) + + +class PyAVPlugin(PluginV3): + """Support for pyAV as backend. + + Parameters + ---------- + request : iio.Request + A request object that represents the users intent. It provides a + standard interface to access various the various ImageResources and + serves them to the plugin as a file object (or file). Check the docs for + details. + container : str + Only used during `iio_mode="w"`! If not None, overwrite the default container + format chosen by pyav. + kwargs : Any + Additional kwargs are forwarded to PyAV's constructor. + + """ + + def __init__(self, request: Request, *, container: str = None, **kwargs) -> None: + """Initialize a new Plugin Instance. + + See Plugin's docstring for detailed documentation. + + Notes + ----- + The implementation here stores the request as a local variable that is + exposed using a @property below. If you inherit from PluginV3, remember + to call ``super().__init__(request)``. + + """ + + super().__init__(request) + + self._container = None + self._video_stream = None + self._video_filter = None + + if request.mode.io_mode == IOMode.read: + self._next_idx = 0 + try: + if request._uri_type == 5: # 5 is the value of URI_HTTP + # pyav should read from HTTP by itself. This enables reading + # HTTP-based streams like DASH. Note that solving streams + # like this is temporary until the new request object gets + # implemented. + self._container = av.open(request.raw_uri, **kwargs) + else: + self._container = av.open(request.get_file(), **kwargs) + self._video_stream = self._container.streams.video[0] + self._decoder = self._container.decode(video=0) + except av.AVError: + if isinstance(request.raw_uri, bytes): + msg = "PyAV does not support these ``" + else: + msg = f"PyAV does not support `{request.raw_uri}`" + raise InitializationError(msg) from None + else: + self.frames_written = 0 + file_handle = self.request.get_file() + filename = getattr(file_handle, "name", None) + extension = self.request.extension or self.request.format_hint + if extension is None: + raise InitializationError("Can't determine output container to use.") + + # hacky, but beats running our own format selection logic + # (since av_guess_format is not exposed) + try: + setattr(file_handle, "name", filename or "tmp" + extension) + except AttributeError: + pass # read-only, nothing we can do + + try: + self._container = av.open( + file_handle, mode="w", format=container, **kwargs + ) + except ValueError: + raise InitializationError( + f"PyAV can not write to `{self.request.raw_uri}`" + ) + + # --------------------- + # Standard V3 Interface + # --------------------- + + def read( + self, + *, + index: int = ..., + format: str = "rgb24", + filter_sequence: List[Tuple[str, Union[str, dict]]] = None, + filter_graph: Tuple[dict, List] = None, + constant_framerate: bool = None, + thread_count: int = 0, + thread_type: str = None, + ) -> np.ndarray: + """Read frames from the video. + + If ``index`` is an integer, this function reads the index-th frame from + the file. If ``index`` is ... (Ellipsis), this function reads all frames + from the video, stacks them along the first dimension, and returns a + batch of frames. + + Parameters + ---------- + index : int + The index of the frame to read, e.g. ``index=5`` reads the 5th + frame. If ``...``, read all the frames in the video and stack them + along a new, prepended, batch dimension. + format : str + Set the returned colorspace. If not None (default: rgb24), convert + the data into the given format before returning it. If ``None`` + return the data in the encoded format if it can be expressed as a + strided array; otherwise raise an Exception. + filter_sequence : List[str, str, dict] + If not None, apply the given sequence of FFmpeg filters to each + ndimage. Check the (module-level) plugin docs for details and + examples. + filter_graph : (dict, List) + If not None, apply the given graph of FFmpeg filters to each + ndimage. The graph is given as a tuple of two dicts. The first dict + contains a (named) set of nodes, and the second dict contains a set + of edges between nodes of the previous dict. Check the (module-level) + plugin docs for details and examples. + constant_framerate : bool + If True assume the video's framerate is constant. This allows for + faster seeking inside the file. If False, the video is reset before + each read and searched from the beginning. If None (default), this + value will be read from the container format. + thread_count : int + How many threads to use when decoding a frame. The default is 0, + which will set the number using ffmpeg's default, which is based on + the codec, number of available cores, threadding model, and other + considerations. + thread_type : str + The threading model to be used. One of + + - `"SLICE"`: threads assemble parts of the current frame + - `"FRAME"`: threads may assemble future frames + - None (default): Uses ``"FRAME"`` if ``index=...`` and ffmpeg's + default otherwise. + + + Returns + ------- + frame : np.ndarray + A numpy array containing loaded frame data. + + Notes + ----- + Accessing random frames repeatedly is costly (O(k), where k is the + average distance between two keyframes). You should do so only sparingly + if possible. In some cases, it can be faster to bulk-read the video (if + it fits into memory) and to then access the returned ndarray randomly. + + The current implementation may cause problems for b-frames, i.e., + bidirectionaly predicted pictures. I lack test videos to write unit + tests for this case. + + Reading from an index other than ``...``, i.e. reading a single frame, + currently doesn't support filters that introduce delays. + + """ + + if index is ...: + props = self.properties(format=format) + uses_filter = ( + self._video_filter is not None + or filter_graph is not None + or filter_sequence is not None + ) + + self._container.seek(0) + if not uses_filter and props.shape[0] != 0: + frames = np.empty(props.shape, dtype=props.dtype) + for idx, frame in enumerate( + self.iter( + format=format, + filter_sequence=filter_sequence, + filter_graph=filter_graph, + thread_count=thread_count, + thread_type=thread_type or "FRAME", + ) + ): + frames[idx] = frame + else: + frames = np.stack( + [ + x + for x in self.iter( + format=format, + filter_sequence=filter_sequence, + filter_graph=filter_graph, + thread_count=thread_count, + thread_type=thread_type or "FRAME", + ) + ] + ) + + # reset stream container, because threading model can't change after + # first access + self._video_stream.close() + self._video_stream = self._container.streams.video[0] + + return frames + + if thread_type is not None and thread_type != self._video_stream.thread_type: + self._video_stream.thread_type = thread_type + if ( + thread_count != 0 + and thread_count != self._video_stream.codec_context.thread_count + ): + # in FFMPEG thread_count == 0 means use the default count, which we + # change to mean don't change the thread count. + self._video_stream.codec_context.thread_count = thread_count + + if constant_framerate is None: + constant_framerate = not self._container.format.variable_fps + + # note: cheap for contigous incremental reads + self._seek(index, constant_framerate=constant_framerate) + desired_frame = next(self._decoder) + self._next_idx += 1 + + self.set_video_filter(filter_sequence, filter_graph) + if self._video_filter is not None: + desired_frame = self._video_filter.send(desired_frame) + + return self._unpack_frame(desired_frame, format=format) + + def iter( + self, + *, + format: str = "rgb24", + filter_sequence: List[Tuple[str, Union[str, dict]]] = None, + filter_graph: Tuple[dict, List] = None, + thread_count: int = 0, + thread_type: str = None, + ) -> np.ndarray: + """Yield frames from the video. + + Parameters + ---------- + frame : np.ndarray + A numpy array containing loaded frame data. + format : str + Convert the data into the given format before returning it. If None, + return the data in the encoded format if it can be expressed as a + strided array; otherwise raise an Exception. + filter_sequence : List[str, str, dict] + Set the returned colorspace. If not None (default: rgb24), convert + the data into the given format before returning it. If ``None`` + return the data in the encoded format if it can be expressed as a + strided array; otherwise raise an Exception. + filter_graph : (dict, List) + If not None, apply the given graph of FFmpeg filters to each + ndimage. The graph is given as a tuple of two dicts. The first dict + contains a (named) set of nodes, and the second dict contains a set + of edges between nodes of the previous dict. Check the (module-level) + plugin docs for details and examples. + thread_count : int + How many threads to use when decoding a frame. The default is 0, + which will set the number using ffmpeg's default, which is based on + the codec, number of available cores, threadding model, and other + considerations. + thread_type : str + The threading model to be used. One of + + - `"SLICE"` (default): threads assemble parts of the current frame + - `"FRAME"`: threads may assemble future frames (faster for bulk reading) + + + Yields + ------ + frame : np.ndarray + A (decoded) video frame. + + + """ + + self._video_stream.thread_type = thread_type or "SLICE" + self._video_stream.codec_context.thread_count = thread_count + + self.set_video_filter(filter_sequence, filter_graph) + + for frame in self._decoder: + self._next_idx += 1 + + if self._video_filter is not None: + try: + frame = self._video_filter.send(frame) + except StopIteration: + break + + if frame is None: + continue + + yield self._unpack_frame(frame, format=format) + + if self._video_filter is not None: + for frame in self._video_filter: + yield self._unpack_frame(frame, format=format) + + def write( + self, + ndimage: Union[np.ndarray, List[np.ndarray]], + *, + codec: str = None, + is_batch: bool = True, + fps: int = 24, + in_pixel_format: str = "rgb24", + out_pixel_format: str = None, + filter_sequence: List[Tuple[str, Union[str, dict]]] = None, + filter_graph: Tuple[dict, List] = None, + ) -> Optional[bytes]: + """Save a ndimage as a video. + + Given a batch of frames (stacked along the first axis) or a list of + frames, encode them and add the result to the ImageResource. + + Parameters + ---------- + ndimage : ArrayLike, List[ArrayLike] + The ndimage to encode and write to the ImageResource. + codec : str + The codec to use when encoding frames. Only needed on first write + and ignored on subsequent writes. + is_batch : bool + If True (default), the ndimage is a batch of images, otherwise it is + a single image. This parameter has no effect on lists of ndimages. + fps : str + The resulting videos frames per second. + in_pixel_format : str + The pixel format of the incoming ndarray. Defaults to "rgb24" and can + be any stridable pix_fmt supported by FFmpeg. + out_pixel_format : str + The pixel format to use while encoding frames. If None (default) + use the codec's default. + filter_sequence : List[str, str, dict] + If not None, apply the given sequence of FFmpeg filters to each + ndimage. Check the (module-level) plugin docs for details and + examples. + filter_graph : (dict, List) + If not None, apply the given graph of FFmpeg filters to each + ndimage. The graph is given as a tuple of two dicts. The first dict + contains a (named) set of nodes, and the second dict contains a set + of edges between nodes of the previous dict. Check the (module-level) + plugin docs for details and examples. + + Returns + ------- + encoded_image : bytes or None + If the chosen ImageResource is the special target ``""`` then + write will return a byte string containing the encoded image data. + Otherwise, it returns None. + + Notes + ----- + When writing ````, the video is finalized immediately after the + first write call and calling write multiple times to append frames is + not possible. + + """ + + if isinstance(ndimage, list): + # frames shapes must agree for video + if any(f.shape != ndimage[0].shape for f in ndimage): + raise ValueError("All frames should have the same shape") + elif not is_batch: + ndimage = np.asarray(ndimage)[None, ...] + else: + ndimage = np.asarray(ndimage) + + if self._video_stream is None: + self.init_video_stream(codec, fps=fps, pixel_format=out_pixel_format) + + self.set_video_filter(filter_sequence, filter_graph) + + for img in ndimage: + self.write_frame(img, pixel_format=in_pixel_format) + + if self.request._uri_type == URI_BYTES: + # bytes are immutuable, so we have to flush immediately + # and can't support appending + self._flush_writer() + self._container.close() + + return self.request.get_file().getvalue() + + def properties(self, index: int = ..., *, format: str = "rgb24") -> ImageProperties: + """Standardized ndimage metadata. + + Parameters + ---------- + index : int + The index of the ndimage for which to return properties. If ``...`` + (Ellipsis, default), return the properties for the resulting batch + of frames. + format : str + If not None (default: rgb24), convert the data into the given format + before returning it. If None return the data in the encoded format + if that can be expressed as a strided array; otherwise raise an + Exception. + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + This function is efficient and won't process any pixel data. + + The provided metadata does not include modifications by any filters + (through ``filter_sequence`` or ``filter_graph``). + + """ + + video_width = self._video_stream.codec_context.width + video_height = self._video_stream.codec_context.height + pix_format = format or self._video_stream.codec_context.pix_fmt + frame_template = av.VideoFrame(video_width, video_height, pix_format) + + shape = _get_frame_shape(frame_template) + if index is ...: + n_frames = self._video_stream.frames + shape = (n_frames,) + shape + + return ImageProperties( + shape=tuple(shape), + dtype=_format_to_dtype(frame_template.format), + n_images=shape[0] if index is ... else None, + is_batch=index is ..., + ) + + def metadata( + self, + index: int = ..., + exclude_applied: bool = True, + constant_framerate: bool = None, + ) -> Dict[str, Any]: + """Format-specific metadata. + + Returns a dictionary filled with metadata that is either stored in the + container, the video stream, or the frame's side-data. + + Parameters + ---------- + index : int + If ... (Ellipsis, default) return global metadata (the metadata + stored in the container and video stream). If not ..., return the + side data stored in the frame at the given index. + exclude_applied : bool + Currently, this parameter has no effect. It exists for compliance with + the ImageIO v3 API. + constant_framerate : bool + If True assume the video's framerate is constant. This allows for + faster seeking inside the file. If False, the video is reset before + each read and searched from the beginning. If None (default), this + value will be read from the container format. + + Returns + ------- + metadata : dict + A dictionary filled with format-specific metadata fields and their + values. + + """ + + metadata = dict() + + if index is ...: + # useful flags defined on the container and/or video stream + metadata.update( + { + "video_format": self._video_stream.codec_context.pix_fmt, + "codec": self._video_stream.codec.name, + "long_codec": self._video_stream.codec.long_name, + "profile": self._video_stream.profile, + "fps": float(self._video_stream.guessed_rate), + } + ) + if self._video_stream.duration is not None: + duration = float( + self._video_stream.duration * self._video_stream.time_base + ) + metadata.update({"duration": duration}) + + metadata.update(self.container_metadata) + metadata.update(self.video_stream_metadata) + return metadata + + if constant_framerate is None: + constant_framerate = not self._container.format.variable_fps + + self._seek(index, constant_framerate=constant_framerate) + desired_frame = next(self._decoder) + self._next_idx += 1 + + # useful flags defined on the frame + metadata.update( + { + "key_frame": bool(desired_frame.key_frame), + "time": desired_frame.time, + "interlaced_frame": bool(desired_frame.interlaced_frame), + "frame_type": desired_frame.pict_type.name, + } + ) + + # side data + metadata.update( + {item.type.name: bytes(item) for item in desired_frame.side_data} + ) + + return metadata + + def close(self) -> None: + """Close the Video.""" + + is_write = self.request.mode.io_mode == IOMode.write + if is_write and self._video_stream is not None: + self._flush_writer() + + if self._video_stream is not None: + try: + self._video_stream.close() + except ValueError: + pass # stream already closed + + if self._container is not None: + self._container.close() + + self.request.finish() + + def __enter__(self) -> "PyAVPlugin": + return super().__enter__() + + # ------------------------------ + # Add-on Interface inside imopen + # ------------------------------ + + def init_video_stream( + self, + codec: str, + *, + fps: float = 24, + pixel_format: str = None, + max_keyframe_interval: int = None, + force_keyframes: bool = None, + ) -> None: + """Initialize a new video stream. + + This function adds a new video stream to the ImageResource using the + selected encoder (codec), framerate, and colorspace. + + Parameters + ---------- + codec : str + The codec to use, e.g. ``"libx264"`` or ``"vp9"``. + fps : float + The desired framerate of the video stream (frames per second). + pixel_format : str + The pixel format to use while encoding frames. If None (default) use + the codec's default. + max_keyframe_interval : int + The maximum distance between two intra frames (I-frames). Also known + as GOP size. If unspecified use the codec's default. Note that not + every I-frame is a keyframe; see the notes for details. + force_keyframes : bool + If True, limit inter frames dependency to frames within the current + keyframe interval (GOP), i.e., force every I-frame to be a keyframe. + If unspecified, use the codec's default. + + Notes + ----- + You can usually leave ``max_keyframe_interval`` and ``force_keyframes`` + at their default values, unless you try to generate seek-optimized video + or have a similar specialist use-case. In this case, ``force_keyframes`` + controls the ability to seek to _every_ I-frame, and + ``max_keyframe_interval`` controls how close to a random frame you can + seek. Low values allow more fine-grained seek at the expense of + file-size (and thus I/O performance). + + """ + + fps = Fraction.from_float(fps) + stream = self._container.add_stream(codec, fps) + stream.time_base = Fraction(1 / fps).limit_denominator(int(2**16 - 1)) + if pixel_format is not None: + stream.pix_fmt = pixel_format + if max_keyframe_interval is not None: + stream.gop_size = max_keyframe_interval + if force_keyframes is not None: + stream.closed_gop = force_keyframes + + self._video_stream = stream + + def write_frame(self, frame: np.ndarray, *, pixel_format: str = "rgb24") -> None: + """Add a frame to the video stream. + + This function appends a new frame to the video. It assumes that the + stream previously has been initialized. I.e., ``init_video_stream`` has + to be called before calling this function for the write to succeed. + + Parameters + ---------- + frame : np.ndarray + The image to be appended/written to the video stream. + pixel_format : str + The colorspace (pixel format) of the incoming frame. + + Notes + ----- + Frames may be held in a buffer, e.g., by the filter pipeline used during + writing or by FFMPEG to batch them prior to encoding. Make sure to + ``.close()`` the plugin or to use a context manager to ensure that all + frames are written to the ImageResource. + + """ + + # manual packing of ndarray into frame + # (this should live in pyAV, but it doesn't support all the formats we + # want and PRs there are slow) + pixel_format = av.VideoFormat(pixel_format) + img_dtype = _format_to_dtype(pixel_format) + width = frame.shape[2 if pixel_format.is_planar else 1] + height = frame.shape[1 if pixel_format.is_planar else 0] + av_frame = av.VideoFrame(width, height, pixel_format.name) + if pixel_format.is_planar: + for idx, plane in enumerate(av_frame.planes): + plane_array = np.frombuffer(plane, dtype=img_dtype) + plane_array = as_strided( + plane_array, + shape=(plane.height, plane.width), + strides=(plane.line_size, img_dtype.itemsize), + ) + plane_array[...] = frame[idx] + else: + if pixel_format.name.startswith("bayer_"): + # ffmpeg doesn't describe bayer formats correctly + # see https://github.com/imageio/imageio/issues/761#issuecomment-1059318851 + # and following for details. + n_channels = 1 + else: + n_channels = len(pixel_format.components) + + plane = av_frame.planes[0] + plane_shape = (plane.height, plane.width) + plane_strides = (plane.line_size, n_channels * img_dtype.itemsize) + if n_channels > 1: + plane_shape += (n_channels,) + plane_strides += (img_dtype.itemsize,) + + plane_array = as_strided( + np.frombuffer(plane, dtype=img_dtype), + shape=plane_shape, + strides=plane_strides, + ) + plane_array[...] = frame + + stream = self._video_stream + av_frame.time_base = stream.codec_context.time_base + av_frame.pts = self.frames_written + self.frames_written += 1 + + if self._video_filter is not None: + av_frame = self._video_filter.send(av_frame) + if av_frame is None: + return + + if stream.frames == 0: + stream.width = av_frame.width + stream.height = av_frame.height + + for packet in stream.encode(av_frame): + self._container.mux(packet) + + def set_video_filter( + self, + filter_sequence: List[Tuple[str, Union[str, dict]]] = None, + filter_graph: Tuple[dict, List] = None, + ) -> None: + """Set the filter(s) to use. + + This function creates a new FFMPEG filter graph to use when reading or + writing video. In the case of reading, frames are passed through the + filter graph before begin returned and, in case of writing, frames are + passed through the filter before being written to the video. + + Parameters + ---------- + filter_sequence : List[str, str, dict] + If not None, apply the given sequence of FFmpeg filters to each + ndimage. Check the (module-level) plugin docs for details and + examples. + filter_graph : (dict, List) + If not None, apply the given graph of FFmpeg filters to each + ndimage. The graph is given as a tuple of two dicts. The first dict + contains a (named) set of nodes, and the second dict contains a set + of edges between nodes of the previous dict. Check the + (module-level) plugin docs for details and examples. + + Notes + ----- + Changing a filter graph with lag during reading or writing will + currently cause frames in the filter queue to be lost. + + """ + + if filter_sequence is None and filter_graph is None: + self._video_filter = None + return + + if filter_sequence is None: + filter_sequence = list() + + node_descriptors: Dict[str, Tuple[str, Union[str, Dict]]] + edges: List[Tuple[str, str, int, int]] + if filter_graph is None: + node_descriptors, edges = dict(), [("video_in", "video_out", 0, 0)] + else: + node_descriptors, edges = filter_graph + + graph = av.filter.Graph() + + previous_node = graph.add_buffer(template=self._video_stream) + for filter_name, argument in filter_sequence: + if isinstance(argument, str): + current_node = graph.add(filter_name, argument) + else: + current_node = graph.add(filter_name, **argument) + previous_node.link_to(current_node) + previous_node = current_node + + nodes = dict() + nodes["video_in"] = previous_node + nodes["video_out"] = graph.add("buffersink") + for name, (filter_name, arguments) in node_descriptors.items(): + if isinstance(arguments, str): + nodes[name] = graph.add(filter_name, arguments) + else: + nodes[name] = graph.add(filter_name, **arguments) + + for from_note, to_node, out_idx, in_idx in edges: + nodes[from_note].link_to(nodes[to_node], out_idx, in_idx) + + graph.configure() + + def video_filter(): + # this starts a co-routine + # send frames using graph.send() + frame = yield None + + # send and receive frames in "parallel" + while frame is not None: + graph.push(frame) + try: + frame = yield graph.pull() + except av.error.BlockingIOError: + # filter has lag and needs more frames + frame = yield None + except av.error.EOFError: + break + + try: + # send EOF in av>=9.0 + graph.push(None) + except ValueError: # pragma: no cover + # handle av<9.0 + pass + + # all frames have been sent, empty the filter + while True: + try: + yield graph.pull() + except av.error.EOFError: + break # EOF + except av.error.BlockingIOError: # pragma: no cover + # handle av<9.0 + break + + self._video_filter = video_filter() + self._video_filter.send(None) + + @property + def container_metadata(self): + """Container-specific metadata. + + A dictionary containing metadata stored at the container level. + + """ + return self._container.metadata + + @property + def video_stream_metadata(self): + """Stream-specific metadata. + + A dictionary containing metadata stored at the stream level. + + """ + return self._video_stream.metadata + + # ------------------------------- + # Internals and private functions + # ------------------------------- + + def _unpack_frame(self, frame: av.VideoFrame, *, format: str = None) -> np.ndarray: + """Convert a av.VideoFrame into a ndarray + + Parameters + ---------- + frame : av.VideoFrame + The frame to unpack. + format : str + If not None, convert the frame to the given format before unpacking. + + """ + + if format is not None: + frame = frame.reformat(format=format) + + dtype = _format_to_dtype(frame.format) + shape = _get_frame_shape(frame) + + planes = list() + for idx in range(len(frame.planes)): + n_channels = sum( + [ + x.bits // (dtype.itemsize * 8) + for x in frame.format.components + if x.plane == idx + ] + ) + av_plane = frame.planes[idx] + plane_shape = (av_plane.height, av_plane.width) + plane_strides = (av_plane.line_size, n_channels * dtype.itemsize) + if n_channels > 1: + plane_shape += (n_channels,) + plane_strides += (dtype.itemsize,) + + np_plane = as_strided( + np.frombuffer(av_plane, dtype=dtype), + shape=plane_shape, + strides=plane_strides, + ) + planes.append(np_plane) + + if len(planes) > 1: + # Note: the planes *should* exist inside a contigous memory block + # somewhere inside av.Frame however pyAV does not appear to expose this, + # so we are forced to copy the planes individually instead of wrapping + # them :( + out = np.concatenate(planes).reshape(shape) + else: + out = planes[0] + + return out + + def _seek(self, index, *, constant_framerate: bool = True) -> Generator: + """Seeks to the frame at the given index.""" + + if index == self._next_idx: + return # fast path :) + + # we must decode at least once before we seek otherwise the + # returned frames become corrupt. + if self._next_idx == 0: + next(self._decoder) + self._next_idx += 1 + + if index == self._next_idx: + return # fast path :) + + # remove this branch until I find a way to efficiently find the next + # keyframe. keeping this as a reminder + # if self._next_idx < index and index < self._next_keyframe_idx: + # frames_to_yield = index - self._next_idx + if not constant_framerate and index > self._next_idx: + frames_to_yield = index - self._next_idx + elif not constant_framerate: + # seek backwards and can't link idx and pts + self._container.seek(0) + self._decoder = self._container.decode(video=0) + self._next_idx = 0 + + frames_to_yield = index + else: + # we know that the time between consecutive frames is constant + # hence we can link index and pts + + # how many pts lie between two frames + sec_delta = 1 / self._video_stream.guessed_rate + pts_delta = sec_delta / self._video_stream.time_base + + index_pts = int(index * pts_delta) + + # this only seeks to the closed (preceeding) keyframe + self._container.seek(index_pts, stream=self._video_stream) + self._decoder = self._container.decode(video=0) + + # this may be made faster if we could get the keyframe's time without + # decoding it + keyframe = next(self._decoder) + keyframe_time = keyframe.pts * keyframe.time_base + keyframe_pts = int(keyframe_time / self._video_stream.time_base) + keyframe_index = keyframe_pts // pts_delta + + self._container.seek(index_pts, stream=self._video_stream) + self._next_idx = keyframe_index + + frames_to_yield = index - keyframe_index + + for _ in range(frames_to_yield): + next(self._decoder) + self._next_idx += 1 + + def _flush_writer(self): + """Flush the filter and encoder + + This will reset the filter to `None` and send EoF to the encoder, + i.e., after calling, no more frames may be written. + + """ + + stream = self._video_stream + + if self._video_filter is not None: + # flush encoder + for av_frame in self._video_filter: + if stream.frames == 0: + stream.width = av_frame.width + stream.height = av_frame.height + for packet in stream.encode(av_frame): + self._container.mux(packet) + self._video_filter = None + + # flush stream + for packet in stream.encode(): + self._container.mux(packet) + self._video_stream = None diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/rawpy.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/rawpy.py new file mode 100644 index 0000000000000000000000000000000000000000..bde908f20f0476f4aabffe504127a8e9acc52fbd --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/rawpy.py @@ -0,0 +1,191 @@ +""" Read/Write images using rawpy. + +rawpy is an easy-to-use Python wrapper for the LibRaw library. +It also contains some extra functionality for finding and repairing hot/dead pixels. +""" + +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union +import rawpy +import numpy as np + +from ..core.request import URI_BYTES, InitializationError, IOMode, Request +from ..core.v3_plugin_api import ImageProperties, PluginV3 +from ..typing import ArrayLike + + +class RawPyPlugin(PluginV3): + """A class representing the rawpy plugin. + + Methods + ------- + + .. autosummary:: + :toctree: _plugins/rawpy + + RawPyPlugin.read + """ + + def __init__(self, request: Request) -> None: + """Instantiates a new rawpy plugin object + + Parameters + ---------- + request: Request + A request object representing the resource to be operated on. + """ + + super().__init__(request) + + self._image_file = None + + if request.mode.io_mode == IOMode.read: + try: + self._image_file = rawpy.imread(request.get_file()) + except ( + rawpy.NotSupportedError, + rawpy.LibRawFileUnsupportedError, + rawpy.LibRawIOError, + ): + if request._uri_type == URI_BYTES: + raise InitializationError( + "RawPy can not read the provided bytes." + ) from None + else: + raise InitializationError( + f"RawPy can not read {request.raw_uri}." + ) from None + elif request.mode.io_mode == IOMode.write: + raise InitializationError("RawPy does not support writing.") from None + + def close(self) -> None: + if self._image_file: + self._image_file.close() + + self._request.finish() + + def read(self, *, index: int = 0, **kwargs) -> np.ndarray: + """Read Raw Image. + + Returns + ------- + nd_image: ndarray + The image data + """ + + nd_image: np.ndarray + + try: + nd_image = self._image_file.postprocess(**kwargs) + except Exception: + pass + + if index is Ellipsis: + nd_image = nd_image[None, ...] + + return nd_image + + def write(self, ndimage: Union[ArrayLike, List[ArrayLike]]) -> Optional[bytes]: + """RawPy does not support writing.""" + raise NotImplementedError() + + def iter(self) -> Iterator[np.ndarray]: + """Load the image. + + Returns + ------- + nd_image: ndarray + The image data + """ + + try: + yield self.read() + except Exception: + pass + + def metadata( + self, index: int = None, exclude_applied: bool = True + ) -> Dict[str, Any]: + """Read ndimage metadata. + + Parameters + ---------- + exclude_applied : bool + If True, exclude metadata fields that are applied to the image while + reading. For example, if the binary data contains a rotation flag, + the image is rotated by default and the rotation flag is excluded + from the metadata to avoid confusion. + + Returns + ------- + metadata : dict + A dictionary of format-specific metadata. + + """ + + metadata = {} + + image_size = self._image_file.sizes + + metadata["black_level_per_channel"] = self._image_file.black_level_per_channel + metadata["camera_white_level_per_channel"] = ( + self._image_file.camera_white_level_per_channel + ) + metadata["color_desc"] = self._image_file.color_desc + metadata["color_matrix"] = self._image_file.color_matrix + metadata["daylight_whitebalance"] = self._image_file.daylight_whitebalance + metadata["dtype"] = self._image_file.raw_image.dtype + metadata["flip"] = image_size.flip + metadata["num_colors"] = self._image_file.num_colors + metadata["tone_curve"] = self._image_file.tone_curve + metadata["width"] = image_size.width + metadata["height"] = image_size.height + metadata["raw_width"] = image_size.raw_width + metadata["raw_height"] = image_size.raw_height + metadata["raw_shape"] = self._image_file.raw_image.shape + metadata["iwidth"] = image_size.iwidth + metadata["iheight"] = image_size.iheight + metadata["pixel_aspect"] = image_size.pixel_aspect + metadata["white_level"] = self._image_file.white_level + + if exclude_applied: + metadata.pop("black_level_per_channel", None) + metadata.pop("camera_white_level_per_channel", None) + metadata.pop("color_desc", None) + metadata.pop("color_matrix", None) + metadata.pop("daylight_whitebalance", None) + metadata.pop("dtype", None) + metadata.pop("flip", None) + metadata.pop("num_colors", None) + metadata.pop("tone_curve", None) + metadata.pop("raw_width", None) + metadata.pop("raw_height", None) + metadata.pop("raw_shape", None) + metadata.pop("iwidth", None) + metadata.pop("iheight", None) + metadata.pop("white_level", None) + + return metadata + + def properties(self, index: int = None) -> ImageProperties: + """Standardized ndimage metadata + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + This does not decode pixel data and is fast for large images. + + """ + + ImageSize = self._image_file.sizes + + width: int = ImageSize.width + height: int = ImageSize.height + shape: Tuple[int, ...] = (height, width) + + dtype = self._image_file.raw_image.dtype + + return ImageProperties(shape=shape, dtype=dtype) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/simpleitk.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/simpleitk.py new file mode 100644 index 0000000000000000000000000000000000000000..dfaa066c12c66f521b4d0afa2687bbb6d498c227 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/simpleitk.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write images using SimpleITK. + +Backend: `Insight Toolkit `_ + +.. note:: + To use this plugin you have to install its backend:: + + pip install imageio[itk] + +The ItkFormat uses the ITK or SimpleITK library to support a range of +ITK-related formats. It also supports a few common formats (e.g. PNG and JPEG). + +Parameters +---------- +None + +""" + +from ..core import Format, has_module + +_itk = None # Defer loading to load_lib() function. + + +def load_lib(): + global _itk, _read_function, _write_function + try: + import itk as _itk + + _read_function = _itk.imread + _write_function = _itk.imwrite + except ImportError: + try: + import SimpleITK as _itk + + _read_function = _itk.ReadImage + _write_function = _itk.WriteImage + except ImportError: + raise ImportError( + "itk could not be found. " + "Please try " + " python -m pip install itk " + "or " + " python -m pip install simpleitk " + "or refer to " + " https://itkpythonpackage.readthedocs.io/ " + "for further instructions." + ) + return _itk + + +# Split up in real ITK and all supported formats. +ITK_FORMATS = ( + ".gipl", + ".ipl", + ".mha", + ".mhd", + ".nhdr", + "nia", + "hdr", + ".nrrd", + ".nii", + ".nii.gz", + ".img", + ".img.gz", + ".vtk", + "hdf5", + "lsm", + "mnc", + "mnc2", + "mgh", + "mnc", + "pic", +) +ALL_FORMATS = ITK_FORMATS + ( + ".bmp", + ".jpeg", + ".jpg", + ".png", + ".tiff", + ".tif", + ".dicom", + ".dcm", + ".gdcm", +) + + +class ItkFormat(Format): + """See :mod:`imageio.plugins.simpleitk`""" + + def _can_read(self, request): + # If the request is a format that only this plugin can handle, + # we report that we can do it; a useful error will be raised + # when simpleitk is not installed. For the more common formats + # we only report that we can read if the library is installed. + if request.extension in ITK_FORMATS: + return True + if has_module("itk.ImageIOBase") or has_module("SimpleITK"): + return request.extension in ALL_FORMATS + + def _can_write(self, request): + if request.extension in ITK_FORMATS: + return True + if has_module("itk.ImageIOBase") or has_module("SimpleITK"): + return request.extension in ALL_FORMATS + + # -- reader + + class Reader(Format.Reader): + def _open(self, pixel_type=None, fallback_only=None, **kwargs): + if not _itk: + load_lib() + args = () + if pixel_type is not None: + args += (pixel_type,) + if fallback_only is not None: + args += (fallback_only,) + self._img = _read_function(self.request.get_local_filename(), *args) + + def _get_length(self): + return 1 + + def _close(self): + pass + + def _get_data(self, index): + # Get data + if index != 0: + error_msg = "Index out of range while reading from itk file" + raise IndexError(error_msg) + + # Return array and empty meta data + return _itk.GetArrayFromImage(self._img), {} + + def _get_meta_data(self, index): + error_msg = "The itk plugin does not support meta data, currently." + raise RuntimeError(error_msg) + + # -- writer + class Writer(Format.Writer): + def _open(self): + if not _itk: + load_lib() + + def _close(self): + pass + + def _append_data(self, im, meta): + _itk_img = _itk.GetImageFromArray(im) + _write_function(_itk_img, self.request.get_local_filename()) + + def set_meta_data(self, meta): + error_msg = "The itk plugin does not support meta data, currently." + raise RuntimeError(error_msg) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/spe.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/spe.py new file mode 100644 index 0000000000000000000000000000000000000000..f56dd52eba1c8c171d1e0fe35172509c315b8ebe --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/spe.py @@ -0,0 +1,955 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read SPE files. + +This plugin supports reading files saved in the Princeton Instruments +SPE file format. + +Parameters +---------- +check_filesize : bool + The number of frames in the file is stored in the file header. However, + this number may be wrong for certain software. If this is `True` + (default), derive the number of frames also from the file size and + raise a warning if the two values do not match. +char_encoding : str + Deprecated. Exists for backwards compatibility; use ``char_encoding`` of + ``metadata`` instead. +sdt_meta : bool + Deprecated. Exists for backwards compatibility; use ``sdt_control`` of + ``metadata`` instead. + +Methods +------- +.. note:: + Check the respective function for a list of supported kwargs and detailed + documentation. + +.. autosummary:: + :toctree: + + SpePlugin.read + SpePlugin.iter + SpePlugin.properties + SpePlugin.metadata + +""" + +from datetime import datetime +import logging +import os +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) +import warnings + +import numpy as np + +from ..core.request import Request, IOMode, InitializationError +from ..core.v3_plugin_api import PluginV3, ImageProperties + + +logger = logging.getLogger(__name__) + + +class Spec: + """SPE file specification data + + Tuples of (offset, datatype, count), where offset is the offset in the SPE + file and datatype is the datatype as used in `numpy.fromfile`() + + `data_start` is the offset of actual image data. + + `dtypes` translates SPE datatypes (0...4) to numpy ones, e. g. dtypes[0] + is dtype(" Tuple[int, int]: + """Get the version of SDT-control metadata encoded in the comments + + Parameters + ---------- + comments + List of SPE file comments, typically ``metadata["comments"]``. + + Returns + ------- + Major and minor version. ``-1, -1`` if detection failed. + """ + if comments[4][70:76] != "COMVER": + return -1, -1 + try: + return int(comments[4][76:78]), int(comments[4][78:80]) + except ValueError: + return -1, -1 + + @staticmethod + def parse_comments( + comments: Sequence[str], version: Tuple[int, int] + ) -> Dict[str, Any]: + """Extract SDT-control metadata from comments + + Parameters + ---------- + comments + List of SPE file comments, typically ``metadata["comments"]``. + version + Major and minor version of SDT-control metadata format + + Returns + ------- + Dict of metadata + """ + sdt_md = {} + for minor in range(version[1] + 1): + # Metadata with same major version is backwards compatible. + # Fields are specified incrementally in `comment_fields`. + # E.g. if the file has version 5.01, `comment_fields[5, 0]` and + # `comment_fields[5, 1]` need to be decoded. + try: + cmt = __class__.comment_fields[version[0], minor] + except KeyError: + continue + for name, spec in cmt.items(): + try: + v = spec.cvt(comments[spec.n][spec.slice]) + if spec.scale is not None: + v *= spec.scale + sdt_md[name] = v + except Exception as e: + warnings.warn( + f"Failed to decode SDT-control metadata field `{name}`: {e}" + ) + sdt_md[name] = None + if version not in __class__.comment_fields: + supported_ver = ", ".join( + map(lambda x: f"{x[0]}.{x[1]:02}", __class__.comment_fields) + ) + warnings.warn( + f"Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. " + f"Only versions {supported_ver} are supported. " + "Some or all SDT-control metadata may be missing." + ) + comment = comments[0] + comments[2] + sdt_md["comment"] = comment.strip() + return sdt_md + + @staticmethod + def get_datetime(date: str, time: str) -> Union[datetime, None]: + """Turn date and time saved by SDT-control into proper datetime object + + Parameters + ---------- + date + SPE file date, typically ``metadata["date"]``. + time + SPE file date, typically ``metadata["time_local"]``. + + Returns + ------- + File's datetime if parsing was succsessful, else None. + """ + try: + month = __class__.months[date[2:5]] + return datetime( + int(date[5:9]), + month, + int(date[0:2]), + int(time[0:2]), + int(time[2:4]), + int(time[4:6]), + ) + except Exception as e: + logger.info(f"Failed to decode date from SDT-control metadata: {e}.") + + @staticmethod + def extract_metadata(meta: Mapping, char_encoding: str = "latin1"): + """Extract SDT-control metadata from SPE metadata + + SDT-control stores some metadata in comments and other fields. + Extract them and remove unused entries. + + Parameters + ---------- + meta + SPE file metadata. Modified in place. + char_encoding + Character encoding used to decode strings in the metadata. + """ + comver = __class__.get_comment_version(meta["comments"]) + if any(c < 0 for c in comver): + # This file most likely was not created by SDT-control + logger.debug("SDT-control comments not found.") + return + + sdt_meta = __class__.parse_comments(meta["comments"], comver) + meta.pop("comments") + meta.update(sdt_meta) + + # Get date and time in a usable format + dt = __class__.get_datetime(meta["date"], meta["time_local"]) + if dt: + meta["datetime"] = dt + meta.pop("date") + meta.pop("time_local") + + sp4 = meta["spare_4"] + try: + meta["modulation_script"] = sp4.decode(char_encoding) + meta.pop("spare_4") + except UnicodeDecodeError: + warnings.warn( + "Failed to decode SDT-control laser " + "modulation script. Bad char_encoding?" + ) + + # Get rid of unused data + meta.pop("time_utc") + meta.pop("exposure_sec") + + +class SpePlugin(PluginV3): + def __init__( + self, + request: Request, + check_filesize: bool = True, + char_encoding: Optional[str] = None, + sdt_meta: Optional[bool] = None, + ) -> None: + """Instantiate a new SPE file plugin object + + Parameters + ---------- + request : Request + A request object representing the resource to be operated on. + check_filesize : bool + If True, compute the number of frames from the filesize, compare it + to the frame count in the file header, and raise a warning if the + counts don't match. (Certain software may create files with + char_encoding : str + Deprecated. Exists for backwards compatibility; use ``char_encoding`` of + ``metadata`` instead. + sdt_meta : bool + Deprecated. Exists for backwards compatibility; use ``sdt_control`` of + ``metadata`` instead. + + """ + + super().__init__(request) + if request.mode.io_mode == IOMode.write: + raise InitializationError("cannot write SPE files") + + if char_encoding is not None: + warnings.warn( + "Passing `char_encoding` to the constructor is deprecated. " + "Use `char_encoding` parameter of the `metadata()` method " + "instead.", + DeprecationWarning, + ) + self._char_encoding = char_encoding + if sdt_meta is not None: + warnings.warn( + "Passing `sdt_meta` to the constructor is deprecated. " + "Use `sdt_control` parameter of the `metadata()` method " + "instead.", + DeprecationWarning, + ) + self._sdt_meta = sdt_meta + + self._file = self.request.get_file() + + try: + # Spec.basic contains no string, no need to worry about character + # encoding. + info = self._parse_header(Spec.basic, "latin1") + self._file_header_ver = info["file_header_ver"] + self._dtype = Spec.dtypes[info["datatype"]] + self._shape = (info["ydim"], info["xdim"]) + self._len = info["NumFrames"] + + if check_filesize: + # Some software writes incorrect `NumFrames` metadata. + # To determine the number of frames, check the size of the data + # segment -- until the end of the file for SPE<3, until the + # xml footer for SPE>=3. + if info["file_header_ver"] >= 3: + data_end = info["xml_footer_offset"] + else: + self._file.seek(0, os.SEEK_END) + data_end = self._file.tell() + line = data_end - Spec.data_start + line //= self._shape[0] * self._shape[1] * self._dtype.itemsize + if line != self._len: + warnings.warn( + f"The file header of {self.request.filename} claims there are " + f"{self._len} frames, but there are actually {line} frames." + ) + self._len = min(line, self._len) + self._file.seek(Spec.data_start) + except Exception: + raise InitializationError("SPE plugin cannot read the provided file.") + + def read(self, *, index: int = ...) -> np.ndarray: + """Read a frame or all frames from the file + + Parameters + ---------- + index : int + Select the index-th frame from the file. If index is `...`, + select all frames and stack them along a new axis. + + Returns + ------- + A Numpy array of pixel values. + + """ + + if index is Ellipsis: + read_offset = Spec.data_start + count = self._shape[0] * self._shape[1] * self._len + out_shape = (self._len, *self._shape) + elif index < 0: + raise IndexError(f"Index `{index}` is smaller than 0.") + elif index >= self._len: + raise IndexError( + f"Index `{index}` exceeds the number of frames stored in this file (`{self._len}`)." + ) + else: + read_offset = ( + Spec.data_start + + index * self._shape[0] * self._shape[1] * self._dtype.itemsize + ) + count = self._shape[0] * self._shape[1] + out_shape = self._shape + + self._file.seek(read_offset) + data = np.fromfile(self._file, dtype=self._dtype, count=count) + return data.reshape(out_shape) + + def iter(self) -> Iterator[np.ndarray]: + """Iterate over the frames in the file + + Yields + ------ + A Numpy array of pixel values. + """ + + return (self.read(index=i) for i in range(self._len)) + + def metadata( + self, + index: int = ..., + exclude_applied: bool = True, + char_encoding: str = "latin1", + sdt_control: bool = True, + ) -> Dict[str, Any]: + """SPE specific metadata. + + Parameters + ---------- + index : int + Ignored as SPE files only store global metadata. + exclude_applied : bool + Ignored. Exists for API compatibility. + char_encoding : str + The encoding to use when parsing strings. + sdt_control : bool + If `True`, decode special metadata written by the + SDT-control software if present. + + Returns + ------- + metadata : dict + Key-value pairs of metadata. + + Notes + ----- + SPE v3 stores metadata as XML, whereas SPE v2 uses a binary format. + + .. rubric:: Supported SPE v2 Metadata fields + + ROIs : list of dict + Regions of interest used for recording images. Each dict has the + "top_left" key containing x and y coordinates of the top left corner, + the "bottom_right" key with x and y coordinates of the bottom right + corner, and the "bin" key with number of binned pixels in x and y + directions. + comments : list of str + The SPE format allows for 5 comment strings of 80 characters each. + controller_version : int + Hardware version + logic_output : int + Definition of output BNC + amp_hi_cap_low_noise : int + Amp switching mode + mode : int + Timing mode + exp_sec : float + Alternative exposure in seconds + date : str + Date string + detector_temp : float + Detector temperature + detector_type : int + CCD / diode array type + st_diode : int + Trigger diode + delay_time : float + Used with async mode + shutter_control : int + Normal, disabled open, or disabled closed + absorb_live : bool + on / off + absorb_mode : int + Reference strip or file + can_do_virtual_chip : bool + True or False whether chip can do virtual chip + threshold_min_live : bool + on / off + threshold_min_val : float + Threshold minimum value + threshold_max_live : bool + on / off + threshold_max_val : float + Threshold maximum value + time_local : str + Experiment local time + time_utc : str + Experiment UTC time + adc_offset : int + ADC offset + adc_rate : int + ADC rate + adc_type : int + ADC type + adc_resolution : int + ADC resolution + adc_bit_adjust : int + ADC bit adjust + gain : int + gain + sw_version : str + Version of software which created this file + spare_4 : bytes + Reserved space + readout_time : float + Experiment readout time + type : str + Controller type + clockspeed_us : float + Vertical clock speed in microseconds + readout_mode : ["full frame", "frame transfer", "kinetics", ""] + Readout mode. Empty string means that this was not set by the + Software. + window_size : int + Window size for Kinetics mode + file_header_ver : float + File header version + chip_size : [int, int] + x and y dimensions of the camera chip + virt_chip_size : [int, int] + Virtual chip x and y dimensions + pre_pixels : [int, int] + Pre pixels in x and y dimensions + post_pixels : [int, int], + Post pixels in x and y dimensions + geometric : list of {"rotate", "reverse", "flip"} + Geometric operations + sdt_major_version : int + (only for files created by SDT-control) + Major version of SDT-control software + sdt_minor_version : int + (only for files created by SDT-control) + Minor version of SDT-control software + sdt_controller_name : str + (only for files created by SDT-control) + Controller name + exposure_time : float + (only for files created by SDT-control) + Exposure time in seconds + color_code : str + (only for files created by SDT-control) + Color channels used + detection_channels : int + (only for files created by SDT-control) + Number of channels + background_subtraction : bool + (only for files created by SDT-control) + Whether background subtraction war turned on + em_active : bool + (only for files created by SDT-control) + Whether EM was turned on + em_gain : int + (only for files created by SDT-control) + EM gain + modulation_active : bool + (only for files created by SDT-control) + Whether laser modulation (“attenuate”) was turned on + pixel_size : float + (only for files created by SDT-control) + Camera pixel size + sequence_type : str + (only for files created by SDT-control) + Type of sequnce (standard, TOCCSL, arbitrary, …) + grid : float + (only for files created by SDT-control) + Sequence time unit (“grid size”) in seconds + n_macro : int + (only for files created by SDT-control) + Number of macro loops + delay_macro : float + (only for files created by SDT-control) + Time between macro loops in seconds + n_mini : int + (only for files created by SDT-control) + Number of mini loops + delay_mini : float + (only for files created by SDT-control) + Time between mini loops in seconds + n_micro : int (only for files created by SDT-control) + Number of micro loops + delay_micro : float (only for files created by SDT-control) + Time between micro loops in seconds + n_subpics : int + (only for files created by SDT-control) + Number of sub-pictures + delay_shutter : float + (only for files created by SDT-control) + Camera shutter delay in seconds + delay_prebleach : float + (only for files created by SDT-control) + Pre-bleach delay in seconds + bleach_time : float + (only for files created by SDT-control) + Bleaching time in seconds + recovery_time : float + (only for files created by SDT-control) + Recovery time in seconds + comment : str + (only for files created by SDT-control) + User-entered comment. This replaces the "comments" field. + datetime : datetime.datetime + (only for files created by SDT-control) + Combines the "date" and "time_local" keys. The latter two plus + "time_utc" are removed. + modulation_script : str + (only for files created by SDT-control) + Laser modulation script. Replaces the "spare_4" key. + bleach_piezo_active : bool + (only for files created by SDT-control) + Whether piezo for bleaching was enabled + """ + + if self._file_header_ver < 3: + if self._char_encoding is not None: + char_encoding = self._char_encoding + if self._sdt_meta is not None: + sdt_control = self._sdt_meta + return self._metadata_pre_v3(char_encoding, sdt_control) + return self._metadata_post_v3() + + def _metadata_pre_v3(self, char_encoding: str, sdt_control: bool) -> Dict[str, Any]: + """Extract metadata from SPE v2 files + + Parameters + ---------- + char_encoding + String character encoding + sdt_control + If `True`, try to decode special metadata written by the + SDT-control software. + + Returns + ------- + dict mapping metadata names to values. + + """ + + m = self._parse_header(Spec.metadata, char_encoding) + + nr = m.pop("NumROI", None) + nr = 1 if nr < 1 else nr + m["ROIs"] = roi_array_to_dict(m["ROIs"][:nr]) + + # chip sizes + m["chip_size"] = [m.pop(k, None) for k in ("xDimDet", "yDimDet")] + m["virt_chip_size"] = [m.pop(k, None) for k in ("VChipXdim", "VChipYdim")] + m["pre_pixels"] = [m.pop(k, None) for k in ("XPrePixels", "YPrePixels")] + m["post_pixels"] = [m.pop(k, None) for k in ("XPostPixels", "YPostPixels")] + + # convert comments from numpy.str_ to str + m["comments"] = [str(c) for c in m["comments"]] + + # geometric operations + g = [] + f = m.pop("geometric", 0) + if f & 1: + g.append("rotate") + if f & 2: + g.append("reverse") + if f & 4: + g.append("flip") + m["geometric"] = g + + # Make some additional information more human-readable + t = m["type"] + if 1 <= t <= len(Spec.controllers): + m["type"] = Spec.controllers[t - 1] + else: + m["type"] = None + r = m["readout_mode"] + if 1 <= r <= len(Spec.readout_modes): + m["readout_mode"] = Spec.readout_modes[r - 1] + else: + m["readout_mode"] = None + + # bools + for k in ( + "absorb_live", + "can_do_virtual_chip", + "threshold_min_live", + "threshold_max_live", + ): + m[k] = bool(m[k]) + + # Extract SDT-control metadata if desired + if sdt_control: + SDTControlSpec.extract_metadata(m, char_encoding) + + return m + + def _metadata_post_v3(self) -> Dict[str, Any]: + """Extract XML metadata from SPE v3 files + + Returns + ------- + dict with key `"__xml"`, whose value is the XML metadata + """ + + info = self._parse_header(Spec.basic, "latin1") + self._file.seek(info["xml_footer_offset"]) + xml = self._file.read() + return {"__xml": xml} + + def properties(self, index: int = ...) -> ImageProperties: + """Standardized ndimage metadata. + + Parameters + ---------- + index : int + If the index is an integer, select the index-th frame and return + its properties. If index is an Ellipsis (...), return the + properties of all frames in the file stacked along a new batch + dimension. + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + """ + + if index is Ellipsis: + return ImageProperties( + shape=(self._len, *self._shape), + dtype=self._dtype, + n_images=self._len, + is_batch=True, + ) + return ImageProperties(shape=self._shape, dtype=self._dtype, is_batch=False) + + def _parse_header( + self, spec: Mapping[str, Tuple], char_encoding: str + ) -> Dict[str, Any]: + """Get information from SPE file header + + Parameters + ---------- + spec + Maps header entry name to its location, data type description and + optionally number of entries. See :py:attr:`Spec.basic` and + :py:attr:`Spec.metadata`. + char_encoding + String character encoding + + Returns + ------- + Dict mapping header entry name to its value + """ + + ret = {} + # Decode each string from the numpy array read by np.fromfile + decode = np.vectorize(lambda x: x.decode(char_encoding)) + + for name, sp in spec.items(): + self._file.seek(sp[0]) + cnt = 1 if len(sp) < 3 else sp[2] + v = np.fromfile(self._file, dtype=sp[1], count=cnt) + if v.dtype.kind == "S" and name not in Spec.no_decode: + # Silently ignore string decoding failures + try: + v = decode(v) + except Exception: + warnings.warn( + f'Failed to decode "{name}" metadata ' + "string. Check `char_encoding` parameter." + ) + + try: + # For convenience, if the array contains only one single + # entry, return this entry itself. + v = v.item() + except ValueError: + v = np.squeeze(v) + ret[name] = v + return ret + + +def roi_array_to_dict(a: np.ndarray) -> List[Dict[str, List[int]]]: + """Convert the `ROIs` structured arrays to :py:class:`dict` + + Parameters + ---------- + a + Structured array containing ROI data + + Returns + ------- + One dict per ROI. Keys are "top_left", "bottom_right", and "bin", + values are tuples whose first element is the x axis value and the + second element is the y axis value. + """ + + dict_list = [] + a = a[["startx", "starty", "endx", "endy", "groupx", "groupy"]] + for sx, sy, ex, ey, gx, gy in a: + roi_dict = { + "top_left": [int(sx), int(sy)], + "bottom_right": [int(ex), int(ey)], + "bin": [int(gx), int(gy)], + } + dict_list.append(roi_dict) + return dict_list diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/swf.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/swf.py new file mode 100644 index 0000000000000000000000000000000000000000..9d507ddeeb8576de8fc3e11b5cdb865e448db17e --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/swf.py @@ -0,0 +1,336 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write SWF files. + +Backend: internal + +Shockwave flash (SWF) is a media format designed for rich and +interactive animations. This plugin makes use of this format to +store a series of images in a lossless format with good compression +(zlib). The resulting images can be shown as an animation using +a flash player (such as the browser). + +SWF stores images in RGBA format. RGB or grayscale images are +automatically converted. SWF does not support meta data. + +Parameters for reading +---------------------- +loop : bool + If True, the video will rewind as soon as a frame is requested + beyond the last frame. Otherwise, IndexError is raised. Default False. + +Parameters for saving +--------------------- +fps : int + The speed to play the animation. Default 12. +loop : bool + If True, add a tag to the end of the file to play again from + the first frame. Most flash players will then play the movie + in a loop. Note that the imageio SWF Reader does not check this + tag. Default True. +html : bool + If the output is a file on the file system, write an html file + (in HTML5) that shows the animation. Default False. +compress : bool + Whether to compress the swf file. Default False. You probably don't + want to use this. This does not decrease the file size since + the images are already compressed. It will result in slower + read and write time. The only purpose of this feature is to + create compressed SWF files, so that we can test the + functionality to read them. + +""" + +import os +import zlib +import logging +from io import BytesIO + +import numpy as np + +from ..core import Format, read_n_bytes, image_as_uint + + +logger = logging.getLogger(__name__) + +_swf = None # lazily loaded in lib() + + +def load_lib(): + global _swf + from . import _swf + + return _swf + + +class SWFFormat(Format): + """See :mod:`imageio.plugins.swf`""" + + def _can_read(self, request): + tmp = request.firstbytes[0:3].decode("ascii", "ignore") + if tmp in ("FWS", "CWS"): + return True + + def _can_write(self, request): + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, loop=False): + if not _swf: + load_lib() + + self._arg_loop = bool(loop) + + self._fp = self.request.get_file() + + # Check file ... + tmp = self.request.firstbytes[0:3].decode("ascii", "ignore") + if tmp == "FWS": + pass # OK + elif tmp == "CWS": + # Compressed, we need to decompress + bb = self._fp.read() + bb = bb[:8] + zlib.decompress(bb[8:]) + # Wrap up in a file object + self._fp = BytesIO(bb) + else: + raise IOError("This does not look like a valid SWF file") + + # Skip first bytes. This also tests support got seeking ... + try: + self._fp.seek(8) + self._streaming_mode = False + except Exception: + self._streaming_mode = True + self._fp_read(8) + + # Skip header + # Note that the number of frames is there, which we could + # potentially use, but the number of frames does not necessarily + # correspond to the number of images. + nbits = _swf.bits2int(self._fp_read(1), 5) + nbits = 5 + nbits * 4 + Lrect = nbits / 8.0 + if Lrect % 1: + Lrect += 1 + Lrect = int(Lrect) + self._fp_read(Lrect + 3) + + # Now the rest is basically tags ... + self._imlocs = [] # tuple (loc, sze, T, L1) + if not self._streaming_mode: + # Collect locations of frame, while skipping through the data + # This does not read any of the tag *data*. + try: + while True: + isimage, sze, T, L1 = self._read_one_tag() + loc = self._fp.tell() + if isimage: + # Still need to check if the format is right + format = ord(self._fp_read(3)[2:]) + if format == 5: # RGB or RGBA lossless + self._imlocs.append((loc, sze, T, L1)) + self._fp.seek(loc + sze) # Skip over tag + except IndexError: + pass # done reading + + def _fp_read(self, n): + return read_n_bytes(self._fp, n) + + def _close(self): + pass + + def _get_length(self): + if self._streaming_mode: + return np.inf + else: + return len(self._imlocs) + + def _get_data(self, index): + # Check index + if index < 0: + raise IndexError("Index in swf file must be > 0") + if not self._streaming_mode: + if self._arg_loop and self._imlocs: + index = index % len(self._imlocs) + if index >= len(self._imlocs): + raise IndexError("Index out of bounds") + + if self._streaming_mode: + # Walk over tags until we find an image + while True: + isimage, sze, T, L1 = self._read_one_tag() + bb = self._fp_read(sze) # always read data + if isimage: + im = _swf.read_pixels(bb, 0, T, L1) # can be None + if im is not None: + return im, {} + + else: + # Go to corresponding location, read data, and convert to image + loc, sze, T, L1 = self._imlocs[index] + self._fp.seek(loc) + bb = self._fp_read(sze) + # Read_pixels should return ndarry, since we checked format + im = _swf.read_pixels(bb, 0, T, L1) + return im, {} + + def _read_one_tag(self): + """ + Return (True, loc, size, T, L1) if an image that we can read. + Return (False, loc, size, T, L1) if any other tag. + """ + + # Get head + head = self._fp_read(6) + if not head: # pragma: no cover + raise IndexError("Reached end of swf movie") + + # Determine type and length + T, L1, L2 = _swf.get_type_and_len(head) + if not L2: # pragma: no cover + raise RuntimeError("Invalid tag length, could not proceed") + + # Read data + isimage = False + sze = L2 - 6 + # bb = self._fp_read(L2 - 6) + + # Parse tag + if T == 0: + raise IndexError("Reached end of swf movie") + elif T in [20, 36]: + isimage = True + # im = _swf.read_pixels(bb, 0, T, L1) # can be None + elif T in [6, 21, 35, 90]: # pragma: no cover + logger.warning("Ignoring JPEG image: cannot read JPEG.") + else: + pass # Not an image tag + + # Done. Return image. Can be None + # return im + return isimage, sze, T, L1 + + def _get_meta_data(self, index): + return {} # This format does not support meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, fps=12, loop=True, html=False, compress=False): + if not _swf: + load_lib() + + self._arg_fps = int(fps) + self._arg_loop = bool(loop) + self._arg_html = bool(html) + self._arg_compress = bool(compress) + + self._fp = self.request.get_file() + self._framecounter = 0 + self._framesize = (100, 100) + + # For compress, we use an in-memory file object + if self._arg_compress: + self._fp_real = self._fp + self._fp = BytesIO() + + def _close(self): + self._complete() + # Get size of (uncompressed) file + sze = self._fp.tell() + # set nframes, this is in the potentially compressed region + self._fp.seek(self._location_to_save_nframes) + self._fp.write(_swf.int2uint16(self._framecounter)) + # Compress body? + if self._arg_compress: + bb = self._fp.getvalue() + self._fp = self._fp_real + self._fp.write(bb[:8]) + self._fp.write(zlib.compress(bb[8:])) + sze = self._fp.tell() # renew sze value + # set size + self._fp.seek(4) + self._fp.write(_swf.int2uint32(sze)) + self._fp = None # Disable + + # Write html? + if self._arg_html and os.path.isfile(self.request.filename): + dirname, fname = os.path.split(self.request.filename) + filename = os.path.join(dirname, fname[:-4] + ".html") + w, h = self._framesize + html = HTML % (fname, w, h, fname) + with open(filename, "wb") as f: + f.write(html.encode("utf-8")) + + def _write_header(self, framesize, fps): + self._framesize = framesize + # Called as soon as we know framesize; when we get first frame + bb = b"" + bb += "FC"[self._arg_compress].encode("ascii") + bb += "WS".encode("ascii") # signature bytes + bb += _swf.int2uint8(8) # version + bb += "0000".encode("ascii") # FileLength (leave open for now) + bb += ( + _swf.Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes() + ) + bb += _swf.int2uint8(0) + _swf.int2uint8(fps) # FrameRate + self._location_to_save_nframes = len(bb) + bb += "00".encode("ascii") # nframes (leave open for now) + self._fp.write(bb) + + # Write some initial tags + taglist = _swf.FileAttributesTag(), _swf.SetBackgroundTag(0, 0, 0) + for tag in taglist: + self._fp.write(tag.get_tag()) + + def _complete(self): + # What if no images were saved? + if not self._framecounter: + self._write_header((10, 10), self._arg_fps) + # Write stop tag if we do not loop + if not self._arg_loop: + self._fp.write(_swf.DoActionTag("stop").get_tag()) + # finish with end tag + self._fp.write("\x00\x00".encode("ascii")) + + def _append_data(self, im, meta): + # Correct shape and type + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + im = image_as_uint(im, bitdepth=8) + # Get frame size + wh = im.shape[1], im.shape[0] + # Write header on first frame + isfirstframe = False + if self._framecounter == 0: + isfirstframe = True + self._write_header(wh, self._arg_fps) + # Create tags + bm = _swf.BitmapTag(im) + sh = _swf.ShapeTag(bm.id, (0, 0), wh) + po = _swf.PlaceObjectTag(1, sh.id, move=(not isfirstframe)) + sf = _swf.ShowFrameTag() + # Write tags + for tag in [bm, sh, po, sf]: + self._fp.write(tag.get_tag()) + self._framecounter += 1 + + def set_meta_data(self, meta): + pass + + +HTML = """ + + + + Show Flash animation %s + + + + +""" diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/tifffile_v3.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/tifffile_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..054eaf1a50ceae3c056b625682af4f68ba8d3f9d --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/tifffile_v3.py @@ -0,0 +1,413 @@ +"""Read/Write TIFF files using tifffile. + +.. note:: + To use this plugin you need to have `tifffile + `_ installed:: + + pip install tifffile + +This plugin wraps tifffile, a powerful library to manipulate TIFF files. It +superseeds our previous tifffile plugin and aims to expose all the features of +tifffile. + +The plugin treats individual TIFF series as ndimages. A series is a sequence of +TIFF pages that, when combined describe a meaningful unit, e.g., a volumetric +image (where each slice is stored on an individual page) or a multi-color +staining picture (where each stain is stored on an individual page). Different +TIFF flavors/variants use series in different ways and, as such, the resulting +reading behavior may vary depending on the program used while creating a +particular TIFF file. + +Methods +------- +.. note:: + Check the respective function for a list of supported kwargs and detailed + documentation. + +.. autosummary:: + :toctree: + + TifffilePlugin.read + TifffilePlugin.iter + TifffilePlugin.write + TifffilePlugin.properties + TifffilePlugin.metadata + +Additional methods available inside the :func:`imopen ` +context: + +.. autosummary:: + :toctree: + + TifffilePlugin.iter_pages + +""" + +from io import BytesIO +from typing import Any, Dict, Optional, cast +import warnings + +import numpy as np +import tifffile + +from ..core.request import URI_BYTES, InitializationError, Request +from ..core.v3_plugin_api import ImageProperties, PluginV3 +from ..typing import ArrayLike + + +def _get_resolution(page: tifffile.TiffPage) -> Dict[str, Any]: + metadata = {} + + try: + metadata["resolution_unit"] = page.tags[296].value.value + except KeyError: + # tag 296 missing + return metadata + + try: + resolution_x = page.tags[282].value + resolution_y = page.tags[283].value + + metadata["resolution"] = ( + resolution_x[0] / resolution_x[1], + resolution_y[0] / resolution_y[1], + ) + except KeyError: + # tag 282 or 283 missing + pass + except ZeroDivisionError: + warnings.warn( + "Ignoring resolution metadata because at least one direction has a 0 " + "denominator.", + RuntimeWarning, + ) + + return metadata + + +class TifffilePlugin(PluginV3): + """Support for tifffile as backend. + + Parameters + ---------- + request : iio.Request + A request object that represents the users intent. It provides a + standard interface for a plugin to access the various ImageResources. + Check the docs for details. + kwargs : Any + Additional kwargs are forwarded to tifffile's constructor, i.e. + to ``TiffFile`` for reading or ``TiffWriter`` for writing. + + """ + + def __init__(self, request: Request, **kwargs) -> None: + super().__init__(request) + self._fh = None + + if request.mode.io_mode == "r": + try: + self._fh = tifffile.TiffFile(request.get_file(), **kwargs) + except tifffile.tifffile.TiffFileError: + raise InitializationError("Tifffile can not read this file.") + else: + self._fh = tifffile.TiffWriter(request.get_file(), **kwargs) + + # --------------------- + # Standard V3 Interface + # --------------------- + + def read(self, *, index: int = None, page: int = None, **kwargs) -> np.ndarray: + """Read a ndimage or page. + + The ndimage returned depends on the value of both ``index`` and + ``page``. ``index`` selects the series to read and ``page`` allows + selecting a single page from the selected series. If ``index=None``, + ``page`` is understood as a flat index, i.e., the selection ignores + individual series inside the file. If both ``index`` and ``page`` are + ``None``, then all the series are read and returned as a batch. + + Parameters + ---------- + index : int + If ``int``, select the ndimage (series) located at that index inside + the file and return ``page`` from it. If ``None`` and ``page`` is + ``int`` read the page located at that (flat) index inside the file. + If ``None`` and ``page=None``, read all ndimages from the file and + return them as a batch. + page : int + If ``None`` return the full selected ndimage. If ``int``, read the + page at the selected index and return it. + kwargs : Any + Additional kwargs are forwarded to TiffFile's ``as_array`` method. + + Returns + ------- + ndarray : np.ndarray + The decoded ndimage or page. + """ + + if "key" not in kwargs: + kwargs["key"] = page + elif page is not None: + raise ValueError("Can't use `page` and `key` at the same time.") + + # set plugin default for ``index`` + if index is not None and "series" in kwargs: + raise ValueError("Can't use `series` and `index` at the same time.") + elif "series" in kwargs: + index = kwargs.pop("series") + elif index is not None: + pass + else: + index = 0 + + if index is Ellipsis and page is None: + # read all series in the file and return them as a batch + ndimage = np.stack([x for x in self.iter(**kwargs)]) + else: + index = None if index is Ellipsis else index + ndimage = self._fh.asarray(series=index, **kwargs) + + return ndimage + + def iter(self, **kwargs) -> np.ndarray: + """Yield ndimages from the TIFF. + + Parameters + ---------- + kwargs : Any + Additional kwargs are forwarded to the TiffPageSeries' ``as_array`` + method. + + Yields + ------ + ndimage : np.ndarray + A decoded ndimage. + """ + + for sequence in self._fh.series: + yield sequence.asarray(**kwargs) + + def write( + self, ndimage: ArrayLike, *, is_batch: bool = False, **kwargs + ) -> Optional[bytes]: + """Save a ndimage as TIFF. + + Parameters + ---------- + ndimage : ArrayLike + The ndimage to encode and write to the ImageResource. + is_batch : bool + If True, the first dimension of the given ndimage is treated as a + batch dimension and each element will create a new series. + kwargs : Any + Additional kwargs are forwarded to TiffWriter's ``write`` method. + + Returns + ------- + encoded_image : bytes + If the ImageResource is ``""``, return the encoded bytes. + Otherwise write returns None. + + Notes + ----- + Incremental writing is supported. Subsequent calls to ``write`` will + create new series unless ``contiguous=True`` is used, in which case the + call to write will append to the current series. + + """ + + if not is_batch: + ndimage = np.asarray(ndimage)[None, :] + + for image in ndimage: + self._fh.write(image, **kwargs) + + if self._request._uri_type == URI_BYTES: + self._fh.close() + file = cast(BytesIO, self._request.get_file()) + return file.getvalue() + + def metadata( + self, *, index: int = Ellipsis, page: int = None, exclude_applied: bool = True + ) -> Dict[str, Any]: + """Format-Specific TIFF metadata. + + The metadata returned depends on the value of both ``index`` and + ``page``. ``index`` selects a series and ``page`` allows selecting a + single page from the selected series. If ``index=Ellipsis``, ``page`` is + understood as a flat index, i.e., the selection ignores individual + series inside the file. If ``index=Ellipsis`` and ``page=None`` then + global (file-level) metadata is returned. + + Parameters + ---------- + index : int + Select the series of which to extract metadata from. If Ellipsis, treat + page as a flat index into the file's pages. + page : int + If not None, select the page of which to extract metadata from. If + None, read series-level metadata or, if ``index=...`` global, + file-level metadata. + exclude_applied : bool + For API compatibility. Currently ignored. + + Returns + ------- + metadata : dict + A dictionary with information regarding the tiff flavor (file-level) + or tiff tags (page-level). + """ + + if index is not Ellipsis and page is not None: + target = self._fh.series[index].pages[page] + elif index is not Ellipsis and page is None: + # This is based on my understanding that series-level metadata is + # stored in the first TIFF page. + target = self._fh.series[index].pages[0] + elif index is Ellipsis and page is not None: + target = self._fh.pages[page] + else: + target = None + + metadata = {} + if target is None: + # return file-level metadata + metadata["byteorder"] = self._fh.byteorder + + for flag in tifffile.TIFF.FILE_FLAGS: + flag_value = getattr(self._fh, "is_" + flag) + metadata["is_" + flag] = flag_value + + if flag_value and hasattr(self._fh, flag + "_metadata"): + flavor_metadata = getattr(self._fh, flag + "_metadata") + if isinstance(flavor_metadata, tuple): + metadata.update(flavor_metadata[0]) + else: + metadata.update(flavor_metadata) + else: + # tifffile may return a TiffFrame instead of a page + target = target.keyframe + + metadata.update({tag.name: tag.value for tag in target.tags}) + metadata.update( + { + "planar_configuration": target.planarconfig, + "compression": target.compression, + "predictor": target.predictor, + "orientation": None, # TODO + "description1": target.description1, + "description": target.description, + "software": target.software, + **_get_resolution(target), + "datetime": target.datetime, + } + ) + + return metadata + + def properties(self, *, index: int = None, page: int = None) -> ImageProperties: + """Standardized metadata. + + The properties returned depend on the value of both ``index`` and + ``page``. ``index`` selects a series and ``page`` allows selecting a + single page from the selected series. If ``index=Ellipsis``, ``page`` is + understood as a flat index, i.e., the selection ignores individual + series inside the file. If ``index=Ellipsis`` and ``page=None`` then + global (file-level) properties are returned. If ``index=Ellipsis`` + and ``page=...``, file-level properties for the flattened index are + returned. + + Parameters + ---------- + index : int + If ``int``, select the ndimage (series) located at that index inside + the file. If ``Ellipsis`` and ``page`` is ``int`` extract the + properties of the page located at that (flat) index inside the file. + If ``Ellipsis`` and ``page=None``, return the properties for the + batch of all ndimages in the file. + page : int + If ``None`` return the properties of the full ndimage. If ``...`` + return the properties of the flattened index. If ``int``, + return the properties of the page at the selected index only. + + Returns + ------- + image_properties : ImageProperties + The standardized metadata (properties) of the selected ndimage or series. + + """ + index = index or 0 + page_idx = 0 if page in (None, Ellipsis) else page + + if index is Ellipsis: + target_page = self._fh.pages[page_idx] + else: + target_page = self._fh.series[index].pages[page_idx] + + if index is Ellipsis and page is None: + n_series = len(self._fh.series) + props = ImageProperties( + shape=(n_series, *target_page.shape), + dtype=target_page.dtype, + n_images=n_series, + is_batch=True, + spacing=_get_resolution(target_page).get("resolution"), + ) + elif index is Ellipsis and page is Ellipsis: + n_pages = len(self._fh.pages) + props = ImageProperties( + shape=(n_pages, *target_page.shape), + dtype=target_page.dtype, + n_images=n_pages, + is_batch=True, + spacing=_get_resolution(target_page).get("resolution"), + ) + else: + props = ImageProperties( + shape=target_page.shape, + dtype=target_page.dtype, + is_batch=False, + spacing=_get_resolution(target_page).get("resolution"), + ) + + return props + + def close(self) -> None: + if self._fh is not None: + self._fh.close() + + super().close() + + # ------------------------------ + # Add-on Interface inside imopen + # ------------------------------ + + def iter_pages(self, index=..., **kwargs): + """Yield pages from a TIFF file. + + This generator walks over the flat index of the pages inside an + ImageResource and yields them in order. + + Parameters + ---------- + index : int + The index of the series to yield pages from. If Ellipsis, walk over + the file's flat index (and ignore individual series). + kwargs : Any + Additional kwargs are passed to TiffPage's ``as_array`` method. + + Yields + ------ + page : np.ndarray + A page stored inside the TIFF file. + + """ + + if index is Ellipsis: + pages = self._fh.pages + else: + pages = self._fh.series[index] + + for page in pages: + yield page.asarray(**kwargs) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/py.typed b/minigpt2/lib/python3.10/site-packages/imageio/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minigpt2/lib/python3.10/site-packages/imageio/typing.py b/minigpt2/lib/python3.10/site-packages/imageio/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..1e97d5b1d8079059a2e3b1f4674b878e17534f24 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/typing.py @@ -0,0 +1,17 @@ +from io import BytesIO +from typing import Union, BinaryIO +from pathlib import Path + +try: + from numpy.typing import ArrayLike +except ImportError: + # numpy<1.20 fall back to using ndarray + from numpy import ndarray as ArrayLike + +ImageResource = Union[str, bytes, BytesIO, Path, BinaryIO] + + +__all__ = [ + "ArrayLike", + "ImageResource", +] diff --git a/minigpt2/lib/python3.10/site-packages/imageio/v2.py b/minigpt2/lib/python3.10/site-packages/imageio/v2.py new file mode 100644 index 0000000000000000000000000000000000000000..db2963b7cb3a634bbb9d25ddbb0f64f95dd9391a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/v2.py @@ -0,0 +1,676 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +import re +import warnings +from numbers import Number +from pathlib import Path +from typing import Dict + +import numpy as np + +from imageio.core.legacy_plugin_wrapper import LegacyPlugin +from imageio.core.util import Array +from imageio.core.v3_plugin_api import PluginV3 + +from . import formats +from .config import known_extensions, known_plugins +from .core import RETURN_BYTES +from .core.imopen import imopen + +MEMTEST_DEFAULT_MIM = "256MB" +MEMTEST_DEFAULT_MVOL = "1GB" + + +mem_re = re.compile(r"^(\d+\.?\d*)\s*([kKMGTPEZY]?i?)B?$") +sizes = {"": 1, None: 1} +for i, si in enumerate([""] + list("kMGTPEZY")): + sizes[si] = 1000**i + if si: + sizes[si.upper() + "i"] = 1024**i + + +def to_nbytes(arg, default=None): + if not arg: + arg = float("inf") + + if arg is True: + arg = default + + if isinstance(arg, Number): + return arg + + match = mem_re.match(arg) + if match is None: + raise ValueError( + "Memory size could not be parsed " + "(is your capitalisation correct?): {}".format(arg) + ) + + num, unit = match.groups() + + try: + return float(num) * sizes[unit] + except KeyError: # pragma: no cover + # Note: I don't think we can reach this + raise ValueError( + "Memory size unit not recognised " + "(is your capitalisation correct?): {}".format(unit) + ) + + +def help(name=None): + """help(name=None) + + Print the documentation of the format specified by name, or a list + of supported formats if name is omitted. + + Parameters + ---------- + name : str + Can be the name of a format, a filename extension, or a full + filename. See also the :doc:`formats page <../formats/index>`. + """ + if not name: + print(formats) + else: + print(formats[name]) + + +def decypher_format_arg(format_name: str) -> Dict[str, str]: + """Split format into plugin and format + + The V2 API aliases plugins and supported formats. This function + splits these so that they can be fed separately to `iio.imopen`. + + """ + + plugin = None + extension = None + + if format_name is None: + pass # nothing to do + elif Path(format_name).suffix.lower() in known_extensions: + extension = Path(format_name).suffix.lower() + elif format_name in known_plugins: + plugin = format_name + elif format_name.upper() in known_plugins: + plugin = format_name.upper() + elif format_name.lower() in known_extensions: + extension = format_name.lower() + elif "." + format_name.lower() in known_extensions: + extension = "." + format_name.lower() + else: + raise IndexError(f"No format known by name `{plugin}`.") + + return {"plugin": plugin, "extension": extension} + + +class LegacyReader: + def __init__(self, plugin_instance: PluginV3, **kwargs): + self.instance = plugin_instance + self.last_index = 0 + self.closed = False + + if ( + type(self.instance).__name__ == "PillowPlugin" + and kwargs.get("pilmode") is not None + ): + kwargs["mode"] = kwargs["pilmode"] + del kwargs["pilmode"] + + self.read_args = kwargs + + def close(self): + if not self.closed: + self.instance.close() + self.closed = True + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __del__(self): + self.close() + + @property + def request(self): + return self.instance.request + + @property + def format(self): + raise TypeError("V3 Plugins don't have a format.") + + def get_length(self): + return self.instance.properties(index=...).n_images + + def get_data(self, index): + self.last_index = index + img = self.instance.read(index=index, **self.read_args) + metadata = self.instance.metadata(index=index, exclude_applied=False) + return Array(img, metadata) + + def get_next_data(self): + return self.get_data(self.last_index + 1) + + def set_image_index(self, index): + self.last_index = index - 1 + + def get_meta_data(self, index=None): + return self.instance.metadata(index=index, exclude_applied=False) + + def iter_data(self): + for idx, img in enumerate(self.instance.iter()): + metadata = self.instance.metadata(index=idx, exclude_applied=False) + yield Array(img, metadata) + + def __iter__(self): + return self.iter_data() + + def __len__(self): + return self.get_length() + + +class LegacyWriter: + def __init__(self, plugin_instance: PluginV3, **kwargs): + self.instance = plugin_instance + self.last_index = 0 + self.closed = False + + if type(self.instance).__name__ == "PillowPlugin" and "pilmode" in kwargs: + kwargs["mode"] = kwargs["pilmode"] + del kwargs["pilmode"] + + self.write_args = kwargs + + def close(self): + if not self.closed: + self.instance.close() + self.closed = True + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __del__(self): + self.close() + + @property + def request(self): + return self.instance.request + + @property + def format(self): + raise TypeError("V3 Plugins don't have a format.") + + def append_data(self, im, meta=None): + # TODO: write metadata in the future; there is currently no + # generic way to do this with v3 plugins :( + if meta is not None: + warnings.warn( + "V3 Plugins currently don't have a uniform way to" + " write metadata, so any metadata is ignored." + ) + + # total_meta = dict() + # if meta is None: + # meta = {} + # if hasattr(im, "meta") and isinstance(im.meta, dict): + # total_meta.update(im.meta) + # total_meta.update(meta) + + return self.instance.write(im, **self.write_args) + + def set_meta_data(self, meta): + # TODO: write metadata + raise NotImplementedError( + "V3 Plugins don't have a uniform way to write metadata (yet)." + ) + + +def is_batch(ndimage): + if isinstance(ndimage, (list, tuple)): + return True + + ndimage = np.asarray(ndimage) + if ndimage.ndim <= 2: + return False + elif ndimage.ndim == 3 and ndimage.shape[2] < 5: + return False + + return True + + +def is_volume(ndimage): + ndimage = np.asarray(ndimage) + if not is_batch(ndimage): + return False + + if ndimage.ndim == 3 and ndimage.shape[2] >= 5: + return True + elif ndimage.ndim == 4 and ndimage.shape[3] < 5: + return True + else: + return False + + +# Base functions that return a reader/writer + + +def get_reader(uri, format=None, mode="?", **kwargs): + """get_reader(uri, format=None, mode='?', **kwargs) + + Returns a :class:`.Reader` object which can be used to read data + and meta data from the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + mode : {'i', 'I', 'v', 'V', '?'} + Used to give the reader a hint on what the user expects (default "?"): + "i" for an image, "I" for multiple images, "v" for a volume, + "V" for multiple volumes, "?" for don't care. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + + image_file = imopen(uri, "r" + mode, **imopen_args) + + if isinstance(image_file, LegacyPlugin): + return image_file.legacy_get_reader(**kwargs) + else: + return LegacyReader(image_file, **kwargs) + + +def get_writer(uri, format=None, mode="?", **kwargs): + """get_writer(uri, format=None, mode='?', **kwargs) + + Returns a :class:`.Writer` object which can be used to write data + and meta data to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + format : str + The format to use to write the file. By default imageio selects + the appropriate for you based on the filename. + mode : {'i', 'I', 'v', 'V', '?'} + Used to give the writer a hint on what the user expects (default '?'): + "i" for an image, "I" for multiple images, "v" for a volume, + "V" for multiple volumes, "?" for don't care. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + + image_file = imopen(uri, "w" + mode, **imopen_args) + if isinstance(image_file, LegacyPlugin): + return image_file.legacy_get_writer(**kwargs) + else: + return LegacyWriter(image_file, **kwargs) + + +# Images + + +def imread(uri, format=None, **kwargs): + """imread(uri, format=None, **kwargs) + + Reads an image from the specified file. Returns a numpy array, which + comes with a dict of meta data at its 'meta' attribute. + + Note that the image data is returned as-is, and may not always have + a dtype of uint8 (and thus may differ from what e.g. PIL returns). + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + + with imopen(uri, "ri", **imopen_args) as file: + result = file.read(index=0, **kwargs) + + return result + + +def imwrite(uri, im, format=None, **kwargs): + """imwrite(uri, im, format=None, **kwargs) + + Write an image to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + im : numpy.ndarray + The image data. Must be NxM, NxMx3 or NxMx4. + format : str + The format to use to write the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Test image + imt = type(im) + im = np.asarray(im) + if not np.issubdtype(im.dtype, np.number): + raise ValueError("Image is not numeric, but {}.".format(imt.__name__)) + + if is_batch(im) or im.ndim < 2: + raise ValueError("Image must be 2D (grayscale, RGB, or RGBA).") + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "wi", **imopen_args) as file: + return file.write(im, **kwargs) + + +# Multiple images + + +def mimread(uri, format=None, memtest=MEMTEST_DEFAULT_MIM, **kwargs): + """mimread(uri, format=None, memtest="256MB", **kwargs) + + Reads multiple images from the specified file. Returns a list of + numpy arrays, each with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the images from, e.g. a filename,pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + memtest : {bool, int, float, str} + If truthy, this function will raise an error if the resulting + list of images consumes greater than the amount of memory specified. + This is to protect the system from using so much memory that it needs + to resort to swapping, and thereby stall the computer. E.g. + ``mimread('hunger_games.avi')``. + + If the argument is a number, that will be used as the threshold number + of bytes. + + If the argument is a string, it will be interpreted as a number of bytes with + SI/IEC prefixed units (e.g. '1kB', '250MiB', '80.3YB'). + + - Units are case sensitive + - k, M etc. represent a 1000-fold change, where Ki, Mi etc. represent 1024-fold + - The "B" is optional, but if present, must be capitalised + + If the argument is True, the default will be used, for compatibility reasons. + + Default: '256MB' + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # used for mimread and mvolread + nbyte_limit = to_nbytes(memtest, MEMTEST_DEFAULT_MIM) + + images = list() + nbytes = 0 + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "rI", **imopen_args) as file: + for image in file.iter(**kwargs): + images.append(image) + nbytes += image.nbytes + if nbytes > nbyte_limit: + raise RuntimeError( + "imageio.mimread() has read over {}B of " + "image data.\nStopped to avoid memory problems." + " Use imageio.get_reader(), increase threshold, or memtest=False".format( + int(nbyte_limit) + ) + ) + + if len(images) == 1 and is_batch(images[0]): + images = [*images[0]] + + return images + + +def mimwrite(uri, ims, format=None, **kwargs): + """mimwrite(uri, ims, format=None, **kwargs) + + Write multiple images to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the images to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + ims : sequence of numpy arrays + The image data. Each array must be NxM, NxMx3 or NxMx4. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + if not is_batch(ims): + raise ValueError("Image data must be a sequence of ndimages.") + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "wI", **imopen_args) as file: + return file.write(ims, is_batch=True, **kwargs) + + +# Volumes + + +def volread(uri, format=None, **kwargs): + """volread(uri, format=None, **kwargs) + + Reads a volume from the specified file. Returns a numpy array, which + comes with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the volume from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "rv", **imopen_args) as file: + return file.read(index=0, **kwargs) + + +def volwrite(uri, im, format=None, **kwargs): + """volwrite(uri, vol, format=None, **kwargs) + + Write a volume to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + vol : numpy.ndarray + The image data. Must be NxMxL (or NxMxLxK if each voxel is a tuple). + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Test image + im = np.asarray(im) + if not is_volume(im): + raise ValueError("Image must be 3D, or 4D if each voxel is a tuple.") + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + + with imopen(uri, "wv", **imopen_args) as file: + return file.write(im, is_batch=False, **kwargs) + + +# Multiple volumes + + +def mvolread(uri, format=None, memtest=MEMTEST_DEFAULT_MVOL, **kwargs): + """mvolread(uri, format=None, memtest='1GB', **kwargs) + + Reads multiple volumes from the specified file. Returns a list of + numpy arrays, each with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the volumes from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + memtest : {bool, int, float, str} + If truthy, this function will raise an error if the resulting + list of images consumes greater than the amount of memory specified. + This is to protect the system from using so much memory that it needs + to resort to swapping, and thereby stall the computer. E.g. + ``mimread('hunger_games.avi')``. + + If the argument is a number, that will be used as the threshold number + of bytes. + + If the argument is a string, it will be interpreted as a number of bytes with + SI/IEC prefixed units (e.g. '1kB', '250MiB', '80.3YB'). + + - Units are case sensitive + - k, M etc. represent a 1000-fold change, where Ki, Mi etc. represent 1024-fold + - The "B" is optional, but if present, must be capitalised + + If the argument is True, the default will be used, for compatibility reasons. + + Default: '1GB' + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # used for mimread and mvolread + nbyte_limit = to_nbytes(memtest, MEMTEST_DEFAULT_MVOL) + + images = list() + nbytes = 0 + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "rV", **imopen_args) as file: + for image in file.iter(**kwargs): + images.append(image) + nbytes += image.nbytes + if nbytes > nbyte_limit: + raise RuntimeError( + "imageio.mimread() has read over {}B of " + "image data.\nStopped to avoid memory problems." + " Use imageio.get_reader(), increase threshold, or memtest=False".format( + int(nbyte_limit) + ) + ) + + return images + + +def mvolwrite(uri, ims, format=None, **kwargs): + """mvolwrite(uri, vols, format=None, **kwargs) + + Write multiple volumes to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the volumes to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + ims : sequence of numpy arrays + The image data. Each array must be NxMxL (or NxMxLxK if each + voxel is a tuple). + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + for im in ims: + if not is_volume(im): + raise ValueError("Image must be 3D, or 4D if each voxel is a tuple.") + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "wV", **imopen_args) as file: + return file.write(ims, is_batch=True, **kwargs) + + +# aliases +read = get_reader +save = get_writer +imsave = imwrite +mimsave = mimwrite +volsave = volwrite +mvolsave = mvolwrite + +__all__ = [ + "imread", + "mimread", + "volread", + "mvolread", + "imwrite", + "mimwrite", + "volwrite", + "mvolwrite", + # misc + "help", + "get_reader", + "get_writer", + "RETURN_BYTES", +] diff --git a/minigpt2/lib/python3.10/site-packages/imageio/v2.pyi b/minigpt2/lib/python3.10/site-packages/imageio/v2.pyi new file mode 100644 index 0000000000000000000000000000000000000000..19adc32ef07c8864679a9d70578342bfd7823037 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/v2.pyi @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +from typing import Dict, Literal, Union, List, overload + +import numpy as np + +from .core.imopen import imopen +from .core import RETURN_BYTES, Array +from .typing import ImageResource, ArrayLike +from .core.format import Format + +MEMTEST_DEFAULT_MIM = "256MB" +MEMTEST_DEFAULT_MVOL = "1GB" + +def to_nbytes(arg: float = None, default=None) -> float: ... +def help(name=None) -> None: ... +def decypher_format_arg(format_name: Union[str, None]) -> Dict[str, str]: ... +def get_reader( + uri: ImageResource, format: Format = None, mode: str = "?", **kwargs +) -> Format.Reader: ... +def get_writer( + uri: ImageResource, format: Format = None, mode: str = "?", **kwargs +) -> Format.Writer: ... +def imread(uri: ImageResource, format: Format = None, **kwargs) -> Array: ... +@overload +def imwrite( + uri: Literal[""], im: ArrayLike, format: Format = None, **kwargs +) -> bytes: ... +@overload +def imwrite( + uri: ImageResource, im: ArrayLike, format: Format = None, **kwargs +) -> None: ... +def mimread( + uri: ImageResource, format: Format = None, memtest=MEMTEST_DEFAULT_MIM, **kwargs +) -> List[Array]: ... +@overload +def mimwrite( + uri: Literal[""], ims: List[ArrayLike], format: Format = None, **kwargs +) -> bytes: ... +@overload +def mimwrite( + uri: ImageResource, ims: List[ArrayLike], format: Format = None, **kwargs +) -> None: ... +def volread(uri: ImageResource, format: Format = None, **kwargs) -> Array: ... +@overload +def volwrite( + uri: Literal[""], im: ArrayLike, format: Format = None, **kwargs +) -> bytes: ... +@overload +def volwrite( + uri: ImageResource, im: ArrayLike, format: Format = None, **kwargs +) -> None: ... +def mvolread( + uri: ImageResource, + format: Format = None, + memtest: float = MEMTEST_DEFAULT_MVOL, + **kwargs +) -> List[Array]: ... +@overload +def mvolwrite( + uri: Literal[""], ims: List[ArrayLike], format: Format = None, **kwargs +) -> bytes: ... +@overload +def mvolwrite( + uri: ImageResource, ims: List[ArrayLike], format: Format = None, **kwargs +) -> None: ... + +# aliases +read = get_reader +save = get_writer +imsave = imwrite +mimsave = mimwrite +volsave = volwrite +mvolsave = mvolwrite diff --git a/minigpt2/lib/python3.10/site-packages/imageio/v3.py b/minigpt2/lib/python3.10/site-packages/imageio/v3.py new file mode 100644 index 0000000000000000000000000000000000000000..65d36e57fbf6f935981cbeeb710f078df8872206 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/v3.py @@ -0,0 +1,259 @@ +import numpy as np + +from .core.imopen import imopen + + +def imread(uri, *, index=None, plugin=None, extension=None, format_hint=None, **kwargs): + """Read an ndimage from a URI. + + Opens the given URI and reads an ndimage from it. The exact behavior + depends on both the file type and plugin used to open the file. To learn + about the exact behavior, check the documentation of the relevant plugin. + Typically, imread attempts to read all data stored in the URI. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + index : {int, Ellipsis, None} + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return it. If + index is an ellipsis (...), read all ndimages in the file and stack them + along a new batch dimension. If index is None, let the plugin decide. + plugin : {str, None} + The plugin to use. If set to None (default) imread will perform a + search for a matching plugin. If not None, this takes priority over + the provided format hint (if present). + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered. + format_hint : str + Deprecated. Use `extension` instead. + **kwargs : + Additional keyword arguments will be passed to the plugin's read call. + + Returns + ------- + image : ndimage + The ndimage located at the given URI. + """ + + plugin_kwargs = { + "legacy_mode": False, + "plugin": plugin, + "format_hint": format_hint, + "extension": extension, + } + + call_kwargs = kwargs + if index is not None: + call_kwargs["index"] = index + + with imopen(uri, "r", **plugin_kwargs) as img_file: + return np.asarray(img_file.read(**call_kwargs)) + + +def imiter(uri, *, plugin=None, extension=None, format_hint=None, **kwargs): + """Read a sequence of ndimages from a URI. + + Returns an iterable that yields ndimages from the given URI. The exact + behavior depends on both, the file type and plugin used to open the file. + To learn about the exact behavior, check the documentation of the relevant + plugin. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + plugin : {str, None} + The plugin to use. If set to None (default) imiter will perform a + search for a matching plugin. If not None, this takes priority over + the provided format hint (if present). + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered. + format_hint : str + Deprecated. Use `extension` instead. + **kwargs : + Additional keyword arguments will be passed to the plugin's ``iter`` + call. + + Yields + ------ + image : ndimage + The next ndimage located at the given URI. + + """ + + with imopen( + uri, + "r", + legacy_mode=False, + plugin=plugin, + format_hint=format_hint, + extension=extension, + ) as img_file: + for image in img_file.iter(**kwargs): + # Note: casting to ndarray here to ensure compatibility + # with the v2.9 API + yield np.asarray(image) + + +def imwrite(uri, image, *, plugin=None, extension=None, format_hint=None, **kwargs): + """Write an ndimage to the given URI. + + The exact behavior depends on the file type and plugin used. To learn about + the exact behavior, check the documentation of the relevant plugin. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to save the image to, e.g. a filename, pathlib.Path, + http address or file object, check the docs for more info. + image : np.ndarray + The image to write to disk. + plugin : {str, None} + The plugin to use. If set to None (default) imwrite will perform a + search for a matching plugin. If not None, this takes priority over + the provided format hint (if present). + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered, and + may also influence the format used when encoding. + format_hint : str + Deprecated. Use `extension` instead. + **kwargs : + Additional keyword arguments will be passed to the plugin's ``write`` + call. + + Returns + ------- + encoded_image : None or Bytes + Returns ``None`` in all cases, except when ``uri`` is set to ````. + In this case it returns the encoded ndimage as a bytes string. + + """ + + with imopen( + uri, + "w", + legacy_mode=False, + plugin=plugin, + format_hint=format_hint, + extension=extension, + ) as img_file: + encoded = img_file.write(image, **kwargs) + + return encoded + + +def improps(uri, *, index=None, plugin=None, extension=None, **kwargs): + """Read standardized metadata. + + Opens the given URI and reads the properties of an ndimage from it. The + properties represent standardized metadata. This means that they will have + the same name regardless of the format being read or plugin/backend being + used. Further, any field will be, where possible, populated with a sensible + default (may be `None`) if the ImageResource does not declare a value in its + metadata. + + Parameters + ---------- + index : int + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return its + properties. If index is an ellipsis (...), read all ndimages in the file + and stack them along a new batch dimension and return their properties. + If index is None, let the plugin decide. + plugin : {str, None} + The plugin to be used. If None, performs a search for a matching + plugin. + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered. + **kwargs : + Additional keyword arguments will be passed to the plugin's ``properties`` + call. + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + Where possible, this will avoid loading pixel data. + + See Also + -------- + imageio.core.v3_plugin_api.ImageProperties + + """ + + plugin_kwargs = {"legacy_mode": False, "plugin": plugin, "extension": extension} + + call_kwargs = kwargs + if index is not None: + call_kwargs["index"] = index + + with imopen(uri, "r", **plugin_kwargs) as img_file: + properties = img_file.properties(**call_kwargs) + + return properties + + +def immeta( + uri, *, index=None, plugin=None, extension=None, exclude_applied=True, **kwargs +): + """Read format-specific metadata. + + Opens the given URI and reads metadata for an ndimage from it. The contents + of the returned metadata dictionary is specific to both the image format and + plugin used to open the ImageResource. To learn about the exact behavior, + check the documentation of the relevant plugin. Typically, immeta returns a + dictionary specific to the image format, where keys match metadata field + names and values are a field's contents. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, http + address or file object, see the docs for more info. + index : {int, None} + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return its + metadata. If index is an ellipsis (...), return global metadata. If + index is None, let the plugin decide the default. + plugin : {str, None} + The plugin to be used. If None (default), performs a search for a + matching plugin. + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered. + **kwargs : + Additional keyword arguments will be passed to the plugin's metadata + method. + + Returns + ------- + image : ndimage + The ndimage located at the given URI. + + """ + + plugin_kwargs = {"legacy_mode": False, "plugin": plugin, "extension": extension} + + call_kwargs = kwargs + call_kwargs["exclude_applied"] = exclude_applied + if index is not None: + call_kwargs["index"] = index + + with imopen(uri, "r", **plugin_kwargs) as img_file: + metadata = img_file.metadata(**call_kwargs) + + return metadata + + +__all__ = ["imopen", "imread", "imwrite", "imiter", "improps", "immeta"] diff --git a/minigpt2/lib/python3.10/site-packages/imageio/v3.pyi b/minigpt2/lib/python3.10/site-packages/imageio/v3.pyi new file mode 100644 index 0000000000000000000000000000000000000000..339e33e12f67af086b671ccf4f2e638ae914799e --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/v3.pyi @@ -0,0 +1,62 @@ +from typing import Any, Dict, Iterator, List, Literal, Optional, Union, overload + +import numpy as np + +from .core.imopen import imopen as imopen +from .core.v3_plugin_api import ImageProperties +from .typing import ArrayLike, ImageResource + +def imread( + uri: ImageResource, + *, + index: Optional[int] = 0, + plugin: str = None, + extension: str = None, + format_hint: str = None, + **kwargs +) -> np.ndarray: ... +def imiter( + uri: ImageResource, + *, + plugin: str = None, + extension: str = None, + format_hint: str = None, + **kwargs +) -> Iterator[np.ndarray]: ... +@overload +def imwrite( + uri: Literal[""], + image: Union[ArrayLike, List[ArrayLike]], + *, + plugin: str = None, + extension: str = None, + format_hint: str = None, + **kwargs +) -> bytes: ... +@overload +def imwrite( + uri: ImageResource, + image: Union[ArrayLike, List[ArrayLike]], + *, + plugin: str = None, + extension: str = None, + format_hint: str = None, + **kwargs +) -> None: ... +def improps( + uri, + *, + index: Optional[int] = 0, + plugin: str = None, + extension: str = None, + **kwargs +) -> ImageProperties: ... +def immeta( + uri, + *, + index: Optional[int] = 0, + plugin: str = None, + extension: str = None, + exclude_applied: bool = True, + **kwargs +) -> Dict[str, Any]: ... diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b8429105a12055bcbfa3a506560cc18aaff502c6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor +inline at::Tensor _embedding_bag_per_sample_weights_backward(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx); +} + +// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _embedding_bag_per_sample_weights_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_per_sample_weights_backward_out::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); +} +// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _embedding_bag_per_sample_weights_backward_outf(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) { + return at::_ops::_embedding_bag_per_sample_weights_backward_out::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..250445f32634ea6c8fe06505f34896dd927f307b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & argsort_out(at::Tensor & out, const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false); +TORCH_API at::Tensor & argsort_outf(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/dequantize_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/dequantize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b81ad3fd3b89f9c5dff5b427637d34c36cc0ac16 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/dequantize_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API dequantize_self { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dequantize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dequantize.self(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API dequantize_tensors { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dequantize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensors") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dequantize.tensors(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API dequantize_self_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dequantize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API dequantize_tensors_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dequantize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensors_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList tensors, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/empty_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/empty_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..35d3ccb227a3bdb84a8418d5f377667f7c52b8b1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/empty_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor empty(at::IntArrayRef size, at::TensorOptions options={}, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor empty(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); +TORCH_API at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options={}, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor empty_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h new file mode 100644 index 0000000000000000000000000000000000000000..400a0c43b0e2438bf15859fafb6bf07003da32e7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gather_out : public at::meta::structured_gather { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, const at::Tensor & out); +}; +TORCH_API at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..bcdad03cdda8622c06d8a3c1c8e715bb299ce9f7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor +inline at::Tensor pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor) { + return at::_ops::pixel_unshuffle::call(self, downscale_factor); +} + +// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pixel_unshuffle_out(at::Tensor & out, const at::Tensor & self, int64_t downscale_factor) { + return at::_ops::pixel_unshuffle_out::call(self, downscale_factor, out); +} +// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pixel_unshuffle_outf(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) { + return at::_ops::pixel_unshuffle_out::call(self, downscale_factor, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e7e2983dd345e6837f5d82a5b626491e7f40e7b9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_erfinv { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_erfinv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_erfinv(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_erfinv_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_erfinv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops