Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- my_container_sandbox/workspace/anaconda3/lib/libnpps.so.11 +3 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/BlpImagePlugin.py +497 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/DcxImagePlugin.py +89 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/FitsImagePlugin.py +71 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/GdImageFile.py +90 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/GifImagePlugin.py +1038 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImImagePlugin.py +376 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageCms.py +1029 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageFilter.py +538 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageMorph.py +245 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageShow.py +417 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageTk.py +301 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageTransform.py +102 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/McIdasImagePlugin.py +75 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/PyAccess.py +353 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/SgiImagePlugin.py +230 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/WebPImagePlugin.py +353 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/WmfImagePlugin.py +177 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/__init__.py +80 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/_imagingft.cpython-38-x86_64-linux-gnu.so +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/_imagingmorph.cpython-38-x86_64-linux-gnu.so +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_http_writer.cpython-38-x86_64-linux-gnu.so +3 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/__init__.cpython-38.pyc +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/api.cpython-38.pyc +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/cd.cpython-38.pyc +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/constant.cpython-38.pyc +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/legacy.cpython-38.pyc +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/md.cpython-38.pyc +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/models.cpython-38.pyc +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/version.cpython-38.pyc +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/cli/__init__.py +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/cli/normalizer.py +290 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/INSTALLER +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/LICENSE.txt +33 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/METADATA +257 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/RECORD +16 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/REQUESTED +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/WHEEL +6 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/top_level.txt +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/__init__.py +737 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/badness.py +392 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/chardata.py +315 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/cli.py +143 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/fixes.py +504 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/formatting.py +164 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/py.typed +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google_auth-2.9.0.dist-info/METADATA +109 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google_auth-2.9.0.dist-info/RECORD +121 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google_auth-2.9.0.dist-info/WHEEL +6 -0
.gitattributes
CHANGED
|
@@ -318,3 +318,7 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow.libs
|
|
| 318 |
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/builder.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 319 |
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow.libs/libjpeg-1b553ed5.so.62.3.0 filter=lfs diff=lfs merge=lfs -text
|
| 320 |
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/_elementpath.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/builder.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 319 |
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow.libs/libjpeg-1b553ed5.so.62.3.0 filter=lfs diff=lfs merge=lfs -text
|
| 320 |
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/_elementpath.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 321 |
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/sax.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 322 |
+
my_container_sandbox/workspace/anaconda3/lib/libnpps.so.11 filter=lfs diff=lfs merge=lfs -text
|
| 323 |
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_image.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 324 |
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_http_writer.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
my_container_sandbox/workspace/anaconda3/lib/libnpps.so.11
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1823027a9c5f03cd1c9a4ff5a44275bbe4a294ece529643c06d17b64bc2dafb6
|
| 3 |
+
size 17033384
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/BlpImagePlugin.py
ADDED
|
@@ -0,0 +1,497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Blizzard Mipmap Format (.blp)
|
| 3 |
+
Jerome Leclanche <jerome@leclan.ch>
|
| 4 |
+
|
| 5 |
+
The contents of this file are hereby released in the public domain (CC0)
|
| 6 |
+
Full text of the CC0 license:
|
| 7 |
+
https://creativecommons.org/publicdomain/zero/1.0/
|
| 8 |
+
|
| 9 |
+
BLP1 files, used mostly in Warcraft III, are not fully supported.
|
| 10 |
+
All types of BLP2 files used in World of Warcraft are supported.
|
| 11 |
+
|
| 12 |
+
The BLP file structure consists of a header, up to 16 mipmaps of the
|
| 13 |
+
texture
|
| 14 |
+
|
| 15 |
+
Texture sizes must be powers of two, though the two dimensions do
|
| 16 |
+
not have to be equal; 512x256 is valid, but 512x200 is not.
|
| 17 |
+
The first mipmap (mipmap #0) is the full size image; each subsequent
|
| 18 |
+
mipmap halves both dimensions. The final mipmap should be 1x1.
|
| 19 |
+
|
| 20 |
+
BLP files come in many different flavours:
|
| 21 |
+
* JPEG-compressed (type == 0) - only supported for BLP1.
|
| 22 |
+
* RAW images (type == 1, encoding == 1). Each mipmap is stored as an
|
| 23 |
+
array of 8-bit values, one per pixel, left to right, top to bottom.
|
| 24 |
+
Each value is an index to the palette.
|
| 25 |
+
* DXT-compressed (type == 1, encoding == 2):
|
| 26 |
+
- DXT1 compression is used if alpha_encoding == 0.
|
| 27 |
+
- An additional alpha bit is used if alpha_depth == 1.
|
| 28 |
+
- DXT3 compression is used if alpha_encoding == 1.
|
| 29 |
+
- DXT5 compression is used if alpha_encoding == 7.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
import os
|
| 33 |
+
import struct
|
| 34 |
+
import warnings
|
| 35 |
+
from enum import IntEnum
|
| 36 |
+
from io import BytesIO
|
| 37 |
+
|
| 38 |
+
from . import Image, ImageFile
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Format(IntEnum):
|
| 42 |
+
JPEG = 0
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class Encoding(IntEnum):
|
| 46 |
+
UNCOMPRESSED = 1
|
| 47 |
+
DXT = 2
|
| 48 |
+
UNCOMPRESSED_RAW_BGRA = 3
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class AlphaEncoding(IntEnum):
|
| 52 |
+
DXT1 = 0
|
| 53 |
+
DXT3 = 1
|
| 54 |
+
DXT5 = 7
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def __getattr__(name):
|
| 58 |
+
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
| 59 |
+
for enum, prefix in {
|
| 60 |
+
Format: "BLP_FORMAT_",
|
| 61 |
+
Encoding: "BLP_ENCODING_",
|
| 62 |
+
AlphaEncoding: "BLP_ALPHA_ENCODING_",
|
| 63 |
+
}.items():
|
| 64 |
+
if name.startswith(prefix):
|
| 65 |
+
name = name[len(prefix) :]
|
| 66 |
+
if name in enum.__members__:
|
| 67 |
+
warnings.warn(
|
| 68 |
+
prefix
|
| 69 |
+
+ name
|
| 70 |
+
+ " is "
|
| 71 |
+
+ deprecated
|
| 72 |
+
+ "Use "
|
| 73 |
+
+ enum.__name__
|
| 74 |
+
+ "."
|
| 75 |
+
+ name
|
| 76 |
+
+ " instead.",
|
| 77 |
+
DeprecationWarning,
|
| 78 |
+
stacklevel=2,
|
| 79 |
+
)
|
| 80 |
+
return enum[name]
|
| 81 |
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def unpack_565(i):
|
| 85 |
+
return (((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def decode_dxt1(data, alpha=False):
|
| 89 |
+
"""
|
| 90 |
+
input: one "row" of data (i.e. will produce 4*width pixels)
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
blocks = len(data) // 8 # number of blocks in row
|
| 94 |
+
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
| 95 |
+
|
| 96 |
+
for block in range(blocks):
|
| 97 |
+
# Decode next 8-byte block.
|
| 98 |
+
idx = block * 8
|
| 99 |
+
color0, color1, bits = struct.unpack_from("<HHI", data, idx)
|
| 100 |
+
|
| 101 |
+
r0, g0, b0 = unpack_565(color0)
|
| 102 |
+
r1, g1, b1 = unpack_565(color1)
|
| 103 |
+
|
| 104 |
+
# Decode this block into 4x4 pixels
|
| 105 |
+
# Accumulate the results onto our 4 row accumulators
|
| 106 |
+
for j in range(4):
|
| 107 |
+
for i in range(4):
|
| 108 |
+
# get next control op and generate a pixel
|
| 109 |
+
|
| 110 |
+
control = bits & 3
|
| 111 |
+
bits = bits >> 2
|
| 112 |
+
|
| 113 |
+
a = 0xFF
|
| 114 |
+
if control == 0:
|
| 115 |
+
r, g, b = r0, g0, b0
|
| 116 |
+
elif control == 1:
|
| 117 |
+
r, g, b = r1, g1, b1
|
| 118 |
+
elif control == 2:
|
| 119 |
+
if color0 > color1:
|
| 120 |
+
r = (2 * r0 + r1) // 3
|
| 121 |
+
g = (2 * g0 + g1) // 3
|
| 122 |
+
b = (2 * b0 + b1) // 3
|
| 123 |
+
else:
|
| 124 |
+
r = (r0 + r1) // 2
|
| 125 |
+
g = (g0 + g1) // 2
|
| 126 |
+
b = (b0 + b1) // 2
|
| 127 |
+
elif control == 3:
|
| 128 |
+
if color0 > color1:
|
| 129 |
+
r = (2 * r1 + r0) // 3
|
| 130 |
+
g = (2 * g1 + g0) // 3
|
| 131 |
+
b = (2 * b1 + b0) // 3
|
| 132 |
+
else:
|
| 133 |
+
r, g, b, a = 0, 0, 0, 0
|
| 134 |
+
|
| 135 |
+
if alpha:
|
| 136 |
+
ret[j].extend([r, g, b, a])
|
| 137 |
+
else:
|
| 138 |
+
ret[j].extend([r, g, b])
|
| 139 |
+
|
| 140 |
+
return ret
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def decode_dxt3(data):
|
| 144 |
+
"""
|
| 145 |
+
input: one "row" of data (i.e. will produce 4*width pixels)
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
blocks = len(data) // 16 # number of blocks in row
|
| 149 |
+
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
| 150 |
+
|
| 151 |
+
for block in range(blocks):
|
| 152 |
+
idx = block * 16
|
| 153 |
+
block = data[idx : idx + 16]
|
| 154 |
+
# Decode next 16-byte block.
|
| 155 |
+
bits = struct.unpack_from("<8B", block)
|
| 156 |
+
color0, color1 = struct.unpack_from("<HH", block, 8)
|
| 157 |
+
|
| 158 |
+
(code,) = struct.unpack_from("<I", block, 12)
|
| 159 |
+
|
| 160 |
+
r0, g0, b0 = unpack_565(color0)
|
| 161 |
+
r1, g1, b1 = unpack_565(color1)
|
| 162 |
+
|
| 163 |
+
for j in range(4):
|
| 164 |
+
high = False # Do we want the higher bits?
|
| 165 |
+
for i in range(4):
|
| 166 |
+
alphacode_index = (4 * j + i) // 2
|
| 167 |
+
a = bits[alphacode_index]
|
| 168 |
+
if high:
|
| 169 |
+
high = False
|
| 170 |
+
a >>= 4
|
| 171 |
+
else:
|
| 172 |
+
high = True
|
| 173 |
+
a &= 0xF
|
| 174 |
+
a *= 17 # We get a value between 0 and 15
|
| 175 |
+
|
| 176 |
+
color_code = (code >> 2 * (4 * j + i)) & 0x03
|
| 177 |
+
|
| 178 |
+
if color_code == 0:
|
| 179 |
+
r, g, b = r0, g0, b0
|
| 180 |
+
elif color_code == 1:
|
| 181 |
+
r, g, b = r1, g1, b1
|
| 182 |
+
elif color_code == 2:
|
| 183 |
+
r = (2 * r0 + r1) // 3
|
| 184 |
+
g = (2 * g0 + g1) // 3
|
| 185 |
+
b = (2 * b0 + b1) // 3
|
| 186 |
+
elif color_code == 3:
|
| 187 |
+
r = (2 * r1 + r0) // 3
|
| 188 |
+
g = (2 * g1 + g0) // 3
|
| 189 |
+
b = (2 * b1 + b0) // 3
|
| 190 |
+
|
| 191 |
+
ret[j].extend([r, g, b, a])
|
| 192 |
+
|
| 193 |
+
return ret
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def decode_dxt5(data):
|
| 197 |
+
"""
|
| 198 |
+
input: one "row" of data (i.e. will produce 4 * width pixels)
|
| 199 |
+
"""
|
| 200 |
+
|
| 201 |
+
blocks = len(data) // 16 # number of blocks in row
|
| 202 |
+
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
| 203 |
+
|
| 204 |
+
for block in range(blocks):
|
| 205 |
+
idx = block * 16
|
| 206 |
+
block = data[idx : idx + 16]
|
| 207 |
+
# Decode next 16-byte block.
|
| 208 |
+
a0, a1 = struct.unpack_from("<BB", block)
|
| 209 |
+
|
| 210 |
+
bits = struct.unpack_from("<6B", block, 2)
|
| 211 |
+
alphacode1 = bits[2] | (bits[3] << 8) | (bits[4] << 16) | (bits[5] << 24)
|
| 212 |
+
alphacode2 = bits[0] | (bits[1] << 8)
|
| 213 |
+
|
| 214 |
+
color0, color1 = struct.unpack_from("<HH", block, 8)
|
| 215 |
+
|
| 216 |
+
(code,) = struct.unpack_from("<I", block, 12)
|
| 217 |
+
|
| 218 |
+
r0, g0, b0 = unpack_565(color0)
|
| 219 |
+
r1, g1, b1 = unpack_565(color1)
|
| 220 |
+
|
| 221 |
+
for j in range(4):
|
| 222 |
+
for i in range(4):
|
| 223 |
+
# get next control op and generate a pixel
|
| 224 |
+
alphacode_index = 3 * (4 * j + i)
|
| 225 |
+
|
| 226 |
+
if alphacode_index <= 12:
|
| 227 |
+
alphacode = (alphacode2 >> alphacode_index) & 0x07
|
| 228 |
+
elif alphacode_index == 15:
|
| 229 |
+
alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06)
|
| 230 |
+
else: # alphacode_index >= 18 and alphacode_index <= 45
|
| 231 |
+
alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07
|
| 232 |
+
|
| 233 |
+
if alphacode == 0:
|
| 234 |
+
a = a0
|
| 235 |
+
elif alphacode == 1:
|
| 236 |
+
a = a1
|
| 237 |
+
elif a0 > a1:
|
| 238 |
+
a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7
|
| 239 |
+
elif alphacode == 6:
|
| 240 |
+
a = 0
|
| 241 |
+
elif alphacode == 7:
|
| 242 |
+
a = 255
|
| 243 |
+
else:
|
| 244 |
+
a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5
|
| 245 |
+
|
| 246 |
+
color_code = (code >> 2 * (4 * j + i)) & 0x03
|
| 247 |
+
|
| 248 |
+
if color_code == 0:
|
| 249 |
+
r, g, b = r0, g0, b0
|
| 250 |
+
elif color_code == 1:
|
| 251 |
+
r, g, b = r1, g1, b1
|
| 252 |
+
elif color_code == 2:
|
| 253 |
+
r = (2 * r0 + r1) // 3
|
| 254 |
+
g = (2 * g0 + g1) // 3
|
| 255 |
+
b = (2 * b0 + b1) // 3
|
| 256 |
+
elif color_code == 3:
|
| 257 |
+
r = (2 * r1 + r0) // 3
|
| 258 |
+
g = (2 * g1 + g0) // 3
|
| 259 |
+
b = (2 * b1 + b0) // 3
|
| 260 |
+
|
| 261 |
+
ret[j].extend([r, g, b, a])
|
| 262 |
+
|
| 263 |
+
return ret
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class BLPFormatError(NotImplementedError):
|
| 267 |
+
pass
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def _accept(prefix):
|
| 271 |
+
return prefix[:4] in (b"BLP1", b"BLP2")
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
class BlpImageFile(ImageFile.ImageFile):
|
| 275 |
+
"""
|
| 276 |
+
Blizzard Mipmap Format
|
| 277 |
+
"""
|
| 278 |
+
|
| 279 |
+
format = "BLP"
|
| 280 |
+
format_description = "Blizzard Mipmap Format"
|
| 281 |
+
|
| 282 |
+
def _open(self):
|
| 283 |
+
self.magic = self.fp.read(4)
|
| 284 |
+
|
| 285 |
+
self.fp.seek(5, os.SEEK_CUR)
|
| 286 |
+
(self._blp_alpha_depth,) = struct.unpack("<b", self.fp.read(1))
|
| 287 |
+
|
| 288 |
+
self.fp.seek(2, os.SEEK_CUR)
|
| 289 |
+
self._size = struct.unpack("<II", self.fp.read(8))
|
| 290 |
+
|
| 291 |
+
if self.magic in (b"BLP1", b"BLP2"):
|
| 292 |
+
decoder = self.magic.decode()
|
| 293 |
+
else:
|
| 294 |
+
raise BLPFormatError(f"Bad BLP magic {repr(self.magic)}")
|
| 295 |
+
|
| 296 |
+
self.mode = "RGBA" if self._blp_alpha_depth else "RGB"
|
| 297 |
+
self.tile = [(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))]
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
class _BLPBaseDecoder(ImageFile.PyDecoder):
|
| 301 |
+
_pulls_fd = True
|
| 302 |
+
|
| 303 |
+
def decode(self, buffer):
|
| 304 |
+
try:
|
| 305 |
+
self._read_blp_header()
|
| 306 |
+
self._load()
|
| 307 |
+
except struct.error as e:
|
| 308 |
+
raise OSError("Truncated BLP file") from e
|
| 309 |
+
return -1, 0
|
| 310 |
+
|
| 311 |
+
def _read_blp_header(self):
|
| 312 |
+
self.fd.seek(4)
|
| 313 |
+
(self._blp_compression,) = struct.unpack("<i", self._safe_read(4))
|
| 314 |
+
|
| 315 |
+
(self._blp_encoding,) = struct.unpack("<b", self._safe_read(1))
|
| 316 |
+
(self._blp_alpha_depth,) = struct.unpack("<b", self._safe_read(1))
|
| 317 |
+
(self._blp_alpha_encoding,) = struct.unpack("<b", self._safe_read(1))
|
| 318 |
+
self.fd.seek(1, os.SEEK_CUR) # mips
|
| 319 |
+
|
| 320 |
+
self.size = struct.unpack("<II", self._safe_read(8))
|
| 321 |
+
|
| 322 |
+
if isinstance(self, BLP1Decoder):
|
| 323 |
+
# Only present for BLP1
|
| 324 |
+
(self._blp_encoding,) = struct.unpack("<i", self._safe_read(4))
|
| 325 |
+
self.fd.seek(4, os.SEEK_CUR) # subtype
|
| 326 |
+
|
| 327 |
+
self._blp_offsets = struct.unpack("<16I", self._safe_read(16 * 4))
|
| 328 |
+
self._blp_lengths = struct.unpack("<16I", self._safe_read(16 * 4))
|
| 329 |
+
|
| 330 |
+
def _safe_read(self, length):
|
| 331 |
+
return ImageFile._safe_read(self.fd, length)
|
| 332 |
+
|
| 333 |
+
def _read_palette(self):
|
| 334 |
+
ret = []
|
| 335 |
+
for i in range(256):
|
| 336 |
+
try:
|
| 337 |
+
b, g, r, a = struct.unpack("<4B", self._safe_read(4))
|
| 338 |
+
except struct.error:
|
| 339 |
+
break
|
| 340 |
+
ret.append((b, g, r, a))
|
| 341 |
+
return ret
|
| 342 |
+
|
| 343 |
+
def _read_bgra(self, palette):
|
| 344 |
+
data = bytearray()
|
| 345 |
+
_data = BytesIO(self._safe_read(self._blp_lengths[0]))
|
| 346 |
+
while True:
|
| 347 |
+
try:
|
| 348 |
+
(offset,) = struct.unpack("<B", _data.read(1))
|
| 349 |
+
except struct.error:
|
| 350 |
+
break
|
| 351 |
+
b, g, r, a = palette[offset]
|
| 352 |
+
d = (r, g, b)
|
| 353 |
+
if self._blp_alpha_depth:
|
| 354 |
+
d += (a,)
|
| 355 |
+
data.extend(d)
|
| 356 |
+
return data
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
class BLP1Decoder(_BLPBaseDecoder):
|
| 360 |
+
def _load(self):
|
| 361 |
+
if self._blp_compression == Format.JPEG:
|
| 362 |
+
self._decode_jpeg_stream()
|
| 363 |
+
|
| 364 |
+
elif self._blp_compression == 1:
|
| 365 |
+
if self._blp_encoding in (4, 5):
|
| 366 |
+
palette = self._read_palette()
|
| 367 |
+
data = self._read_bgra(palette)
|
| 368 |
+
self.set_as_raw(bytes(data))
|
| 369 |
+
else:
|
| 370 |
+
raise BLPFormatError(
|
| 371 |
+
f"Unsupported BLP encoding {repr(self._blp_encoding)}"
|
| 372 |
+
)
|
| 373 |
+
else:
|
| 374 |
+
raise BLPFormatError(
|
| 375 |
+
f"Unsupported BLP compression {repr(self._blp_encoding)}"
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
def _decode_jpeg_stream(self):
|
| 379 |
+
from .JpegImagePlugin import JpegImageFile
|
| 380 |
+
|
| 381 |
+
(jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
|
| 382 |
+
jpeg_header = self._safe_read(jpeg_header_size)
|
| 383 |
+
self._safe_read(self._blp_offsets[0] - self.fd.tell()) # What IS this?
|
| 384 |
+
data = self._safe_read(self._blp_lengths[0])
|
| 385 |
+
data = jpeg_header + data
|
| 386 |
+
data = BytesIO(data)
|
| 387 |
+
image = JpegImageFile(data)
|
| 388 |
+
Image._decompression_bomb_check(image.size)
|
| 389 |
+
image.mode = "RGB"
|
| 390 |
+
image.tile = [("jpeg", (0, 0) + self.size, 0, ("BGRX", ""))]
|
| 391 |
+
self.set_as_raw(image.tobytes())
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
class BLP2Decoder(_BLPBaseDecoder):
|
| 395 |
+
def _load(self):
|
| 396 |
+
palette = self._read_palette()
|
| 397 |
+
|
| 398 |
+
self.fd.seek(self._blp_offsets[0])
|
| 399 |
+
|
| 400 |
+
if self._blp_compression == 1:
|
| 401 |
+
# Uncompressed or DirectX compression
|
| 402 |
+
|
| 403 |
+
if self._blp_encoding == Encoding.UNCOMPRESSED:
|
| 404 |
+
data = self._read_bgra(palette)
|
| 405 |
+
|
| 406 |
+
elif self._blp_encoding == Encoding.DXT:
|
| 407 |
+
data = bytearray()
|
| 408 |
+
if self._blp_alpha_encoding == AlphaEncoding.DXT1:
|
| 409 |
+
linesize = (self.size[0] + 3) // 4 * 8
|
| 410 |
+
for yb in range((self.size[1] + 3) // 4):
|
| 411 |
+
for d in decode_dxt1(
|
| 412 |
+
self._safe_read(linesize), alpha=bool(self._blp_alpha_depth)
|
| 413 |
+
):
|
| 414 |
+
data += d
|
| 415 |
+
|
| 416 |
+
elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
|
| 417 |
+
linesize = (self.size[0] + 3) // 4 * 16
|
| 418 |
+
for yb in range((self.size[1] + 3) // 4):
|
| 419 |
+
for d in decode_dxt3(self._safe_read(linesize)):
|
| 420 |
+
data += d
|
| 421 |
+
|
| 422 |
+
elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
|
| 423 |
+
linesize = (self.size[0] + 3) // 4 * 16
|
| 424 |
+
for yb in range((self.size[1] + 3) // 4):
|
| 425 |
+
for d in decode_dxt5(self._safe_read(linesize)):
|
| 426 |
+
data += d
|
| 427 |
+
else:
|
| 428 |
+
raise BLPFormatError(
|
| 429 |
+
f"Unsupported alpha encoding {repr(self._blp_alpha_encoding)}"
|
| 430 |
+
)
|
| 431 |
+
else:
|
| 432 |
+
raise BLPFormatError(f"Unknown BLP encoding {repr(self._blp_encoding)}")
|
| 433 |
+
|
| 434 |
+
else:
|
| 435 |
+
raise BLPFormatError(
|
| 436 |
+
f"Unknown BLP compression {repr(self._blp_compression)}"
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
self.set_as_raw(bytes(data))
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
class BLPEncoder(ImageFile.PyEncoder):
|
| 443 |
+
_pushes_fd = True
|
| 444 |
+
|
| 445 |
+
def _write_palette(self):
|
| 446 |
+
data = b""
|
| 447 |
+
palette = self.im.getpalette("RGBA", "RGBA")
|
| 448 |
+
for i in range(256):
|
| 449 |
+
r, g, b, a = palette[i * 4 : (i + 1) * 4]
|
| 450 |
+
data += struct.pack("<4B", b, g, r, a)
|
| 451 |
+
return data
|
| 452 |
+
|
| 453 |
+
def encode(self, bufsize):
|
| 454 |
+
palette_data = self._write_palette()
|
| 455 |
+
|
| 456 |
+
offset = 20 + 16 * 4 * 2 + len(palette_data)
|
| 457 |
+
data = struct.pack("<16I", offset, *((0,) * 15))
|
| 458 |
+
|
| 459 |
+
w, h = self.im.size
|
| 460 |
+
data += struct.pack("<16I", w * h, *((0,) * 15))
|
| 461 |
+
|
| 462 |
+
data += palette_data
|
| 463 |
+
|
| 464 |
+
for y in range(h):
|
| 465 |
+
for x in range(w):
|
| 466 |
+
data += struct.pack("<B", self.im.getpixel((x, y)))
|
| 467 |
+
|
| 468 |
+
return len(data), 0, data
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
def _save(im, fp, filename, save_all=False):
|
| 472 |
+
if im.mode != "P":
|
| 473 |
+
raise ValueError("Unsupported BLP image mode")
|
| 474 |
+
|
| 475 |
+
magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2"
|
| 476 |
+
fp.write(magic)
|
| 477 |
+
|
| 478 |
+
fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
|
| 479 |
+
fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
|
| 480 |
+
fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0))
|
| 481 |
+
fp.write(struct.pack("<b", 0)) # alpha encoding
|
| 482 |
+
fp.write(struct.pack("<b", 0)) # mips
|
| 483 |
+
fp.write(struct.pack("<II", *im.size))
|
| 484 |
+
if magic == b"BLP1":
|
| 485 |
+
fp.write(struct.pack("<i", 5))
|
| 486 |
+
fp.write(struct.pack("<i", 0))
|
| 487 |
+
|
| 488 |
+
ImageFile._save(im, fp, [("BLP", (0, 0) + im.size, 0, im.mode)])
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
Image.register_open(BlpImageFile.format, BlpImageFile, _accept)
|
| 492 |
+
Image.register_extension(BlpImageFile.format, ".blp")
|
| 493 |
+
Image.register_decoder("BLP1", BLP1Decoder)
|
| 494 |
+
Image.register_decoder("BLP2", BLP2Decoder)
|
| 495 |
+
|
| 496 |
+
Image.register_save(BlpImageFile.format, _save)
|
| 497 |
+
Image.register_encoder("BLP", BLPEncoder)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/DcxImagePlugin.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# DCX file handling
|
| 6 |
+
#
|
| 7 |
+
# DCX is a container file format defined by Intel, commonly used
|
| 8 |
+
# for fax applications. Each DCX file consists of a directory
|
| 9 |
+
# (a list of file offsets) followed by a set of (usually 1-bit)
|
| 10 |
+
# PCX files.
|
| 11 |
+
#
|
| 12 |
+
# History:
|
| 13 |
+
# 1995-09-09 fl Created
|
| 14 |
+
# 1996-03-20 fl Properly derived from PcxImageFile.
|
| 15 |
+
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
| 16 |
+
# 2002-07-30 fl Fixed file handling
|
| 17 |
+
#
|
| 18 |
+
# Copyright (c) 1997-98 by Secret Labs AB.
|
| 19 |
+
# Copyright (c) 1995-96 by Fredrik Lundh.
|
| 20 |
+
#
|
| 21 |
+
# See the README file for information on usage and redistribution.
|
| 22 |
+
#
|
| 23 |
+
|
| 24 |
+
from . import Image
|
| 25 |
+
from ._binary import i32le as i32
|
| 26 |
+
from .PcxImagePlugin import PcxImageFile
|
| 27 |
+
|
| 28 |
+
MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _accept(prefix):
|
| 32 |
+
return len(prefix) >= 4 and i32(prefix) == MAGIC
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
##
|
| 36 |
+
# Image plugin for the Intel DCX format.
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class DcxImageFile(PcxImageFile):
|
| 40 |
+
|
| 41 |
+
format = "DCX"
|
| 42 |
+
format_description = "Intel DCX"
|
| 43 |
+
_close_exclusive_fp_after_loading = False
|
| 44 |
+
|
| 45 |
+
def _open(self):
|
| 46 |
+
|
| 47 |
+
# Header
|
| 48 |
+
s = self.fp.read(4)
|
| 49 |
+
if not _accept(s):
|
| 50 |
+
raise SyntaxError("not a DCX file")
|
| 51 |
+
|
| 52 |
+
# Component directory
|
| 53 |
+
self._offset = []
|
| 54 |
+
for i in range(1024):
|
| 55 |
+
offset = i32(self.fp.read(4))
|
| 56 |
+
if not offset:
|
| 57 |
+
break
|
| 58 |
+
self._offset.append(offset)
|
| 59 |
+
|
| 60 |
+
self.__fp = self.fp
|
| 61 |
+
self.frame = None
|
| 62 |
+
self.n_frames = len(self._offset)
|
| 63 |
+
self.is_animated = self.n_frames > 1
|
| 64 |
+
self.seek(0)
|
| 65 |
+
|
| 66 |
+
def seek(self, frame):
|
| 67 |
+
if not self._seek_check(frame):
|
| 68 |
+
return
|
| 69 |
+
self.frame = frame
|
| 70 |
+
self.fp = self.__fp
|
| 71 |
+
self.fp.seek(self._offset[frame])
|
| 72 |
+
PcxImageFile._open(self)
|
| 73 |
+
|
| 74 |
+
def tell(self):
|
| 75 |
+
return self.frame
|
| 76 |
+
|
| 77 |
+
def _close__fp(self):
|
| 78 |
+
try:
|
| 79 |
+
if self.__fp != self.fp:
|
| 80 |
+
self.__fp.close()
|
| 81 |
+
except AttributeError:
|
| 82 |
+
pass
|
| 83 |
+
finally:
|
| 84 |
+
self.__fp = None
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
Image.register_open(DcxImageFile.format, DcxImageFile, _accept)
|
| 88 |
+
|
| 89 |
+
Image.register_extension(DcxImageFile.format, ".dcx")
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/FitsImagePlugin.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# FITS file handling
|
| 6 |
+
#
|
| 7 |
+
# Copyright (c) 1998-2003 by Fredrik Lundh
|
| 8 |
+
#
|
| 9 |
+
# See the README file for information on usage and redistribution.
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
import math
|
| 13 |
+
|
| 14 |
+
from . import Image, ImageFile
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _accept(prefix):
|
| 18 |
+
return prefix[:6] == b"SIMPLE"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class FitsImageFile(ImageFile.ImageFile):
|
| 22 |
+
|
| 23 |
+
format = "FITS"
|
| 24 |
+
format_description = "FITS"
|
| 25 |
+
|
| 26 |
+
def _open(self):
|
| 27 |
+
headers = {}
|
| 28 |
+
while True:
|
| 29 |
+
header = self.fp.read(80)
|
| 30 |
+
if not header:
|
| 31 |
+
raise OSError("Truncated FITS file")
|
| 32 |
+
keyword = header[:8].strip()
|
| 33 |
+
if keyword == b"END":
|
| 34 |
+
break
|
| 35 |
+
value = header[8:].strip()
|
| 36 |
+
if value.startswith(b"="):
|
| 37 |
+
value = value[1:].strip()
|
| 38 |
+
if not headers and (not _accept(keyword) or value != b"T"):
|
| 39 |
+
raise SyntaxError("Not a FITS file")
|
| 40 |
+
headers[keyword] = value
|
| 41 |
+
|
| 42 |
+
naxis = int(headers[b"NAXIS"])
|
| 43 |
+
if naxis == 0:
|
| 44 |
+
raise ValueError("No image data")
|
| 45 |
+
elif naxis == 1:
|
| 46 |
+
self._size = 1, int(headers[b"NAXIS1"])
|
| 47 |
+
else:
|
| 48 |
+
self._size = int(headers[b"NAXIS1"]), int(headers[b"NAXIS2"])
|
| 49 |
+
|
| 50 |
+
number_of_bits = int(headers[b"BITPIX"])
|
| 51 |
+
if number_of_bits == 8:
|
| 52 |
+
self.mode = "L"
|
| 53 |
+
elif number_of_bits == 16:
|
| 54 |
+
self.mode = "I"
|
| 55 |
+
# rawmode = "I;16S"
|
| 56 |
+
elif number_of_bits == 32:
|
| 57 |
+
self.mode = "I"
|
| 58 |
+
elif number_of_bits in (-32, -64):
|
| 59 |
+
self.mode = "F"
|
| 60 |
+
# rawmode = "F" if number_of_bits == -32 else "F;64F"
|
| 61 |
+
|
| 62 |
+
offset = math.ceil(self.fp.tell() / 2880) * 2880
|
| 63 |
+
self.tile = [("raw", (0, 0) + self.size, offset, (self.mode, 0, -1))]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# --------------------------------------------------------------------
|
| 67 |
+
# Registry
|
| 68 |
+
|
| 69 |
+
Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
|
| 70 |
+
|
| 71 |
+
Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/GdImageFile.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# GD file handling
|
| 6 |
+
#
|
| 7 |
+
# History:
|
| 8 |
+
# 1996-04-12 fl Created
|
| 9 |
+
#
|
| 10 |
+
# Copyright (c) 1997 by Secret Labs AB.
|
| 11 |
+
# Copyright (c) 1996 by Fredrik Lundh.
|
| 12 |
+
#
|
| 13 |
+
# See the README file for information on usage and redistribution.
|
| 14 |
+
#
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
"""
|
| 18 |
+
.. note::
|
| 19 |
+
This format cannot be automatically recognized, so the
|
| 20 |
+
class is not registered for use with :py:func:`PIL.Image.open()`. To open a
|
| 21 |
+
gd file, use the :py:func:`PIL.GdImageFile.open()` function instead.
|
| 22 |
+
|
| 23 |
+
.. warning::
|
| 24 |
+
THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
|
| 25 |
+
implementation is provided for convenience and demonstrational
|
| 26 |
+
purposes only.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
from . import ImageFile, ImagePalette, UnidentifiedImageError
|
| 31 |
+
from ._binary import i16be as i16
|
| 32 |
+
from ._binary import i32be as i32
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class GdImageFile(ImageFile.ImageFile):
|
| 36 |
+
"""
|
| 37 |
+
Image plugin for the GD uncompressed format. Note that this format
|
| 38 |
+
is not supported by the standard :py:func:`PIL.Image.open()` function. To use
|
| 39 |
+
this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and
|
| 40 |
+
use the :py:func:`PIL.GdImageFile.open()` function.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
format = "GD"
|
| 44 |
+
format_description = "GD uncompressed images"
|
| 45 |
+
|
| 46 |
+
def _open(self):
|
| 47 |
+
|
| 48 |
+
# Header
|
| 49 |
+
s = self.fp.read(1037)
|
| 50 |
+
|
| 51 |
+
if not i16(s) in [65534, 65535]:
|
| 52 |
+
raise SyntaxError("Not a valid GD 2.x .gd file")
|
| 53 |
+
|
| 54 |
+
self.mode = "L" # FIXME: "P"
|
| 55 |
+
self._size = i16(s, 2), i16(s, 4)
|
| 56 |
+
|
| 57 |
+
trueColor = s[6]
|
| 58 |
+
trueColorOffset = 2 if trueColor else 0
|
| 59 |
+
|
| 60 |
+
# transparency index
|
| 61 |
+
tindex = i32(s, 7 + trueColorOffset)
|
| 62 |
+
if tindex < 256:
|
| 63 |
+
self.info["transparency"] = tindex
|
| 64 |
+
|
| 65 |
+
self.palette = ImagePalette.raw(
|
| 66 |
+
"XBGR", s[7 + trueColorOffset + 4 : 7 + trueColorOffset + 4 + 256 * 4]
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
self.tile = [
|
| 70 |
+
("raw", (0, 0) + self.size, 7 + trueColorOffset + 4 + 256 * 4, ("L", 0, 1))
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def open(fp, mode="r"):
|
| 75 |
+
"""
|
| 76 |
+
Load texture from a GD image file.
|
| 77 |
+
|
| 78 |
+
:param filename: GD file name, or an opened file handle.
|
| 79 |
+
:param mode: Optional mode. In this version, if the mode argument
|
| 80 |
+
is given, it must be "r".
|
| 81 |
+
:returns: An image instance.
|
| 82 |
+
:raises OSError: If the image could not be read.
|
| 83 |
+
"""
|
| 84 |
+
if mode != "r":
|
| 85 |
+
raise ValueError("bad mode")
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
return GdImageFile(fp)
|
| 89 |
+
except SyntaxError as e:
|
| 90 |
+
raise UnidentifiedImageError("cannot identify this image file") from e
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/GifImagePlugin.py
ADDED
|
@@ -0,0 +1,1038 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# GIF file handling
|
| 6 |
+
#
|
| 7 |
+
# History:
|
| 8 |
+
# 1995-09-01 fl Created
|
| 9 |
+
# 1996-12-14 fl Added interlace support
|
| 10 |
+
# 1996-12-30 fl Added animation support
|
| 11 |
+
# 1997-01-05 fl Added write support, fixed local colour map bug
|
| 12 |
+
# 1997-02-23 fl Make sure to load raster data in getdata()
|
| 13 |
+
# 1997-07-05 fl Support external decoder (0.4)
|
| 14 |
+
# 1998-07-09 fl Handle all modes when saving (0.5)
|
| 15 |
+
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
| 16 |
+
# 2001-04-16 fl Added rewind support (seek to frame 0) (0.6)
|
| 17 |
+
# 2001-04-17 fl Added palette optimization (0.7)
|
| 18 |
+
# 2002-06-06 fl Added transparency support for save (0.8)
|
| 19 |
+
# 2004-02-24 fl Disable interlacing for small images
|
| 20 |
+
#
|
| 21 |
+
# Copyright (c) 1997-2004 by Secret Labs AB
|
| 22 |
+
# Copyright (c) 1995-2004 by Fredrik Lundh
|
| 23 |
+
#
|
| 24 |
+
# See the README file for information on usage and redistribution.
|
| 25 |
+
#
|
| 26 |
+
|
| 27 |
+
import itertools
|
| 28 |
+
import math
|
| 29 |
+
import os
|
| 30 |
+
import subprocess
|
| 31 |
+
from enum import IntEnum
|
| 32 |
+
|
| 33 |
+
from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence
|
| 34 |
+
from ._binary import i16le as i16
|
| 35 |
+
from ._binary import o8
|
| 36 |
+
from ._binary import o16le as o16
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class LoadingStrategy(IntEnum):
|
| 40 |
+
""".. versionadded:: 9.1.0"""
|
| 41 |
+
|
| 42 |
+
RGB_AFTER_FIRST = 0
|
| 43 |
+
RGB_AFTER_DIFFERENT_PALETTE_ONLY = 1
|
| 44 |
+
RGB_ALWAYS = 2
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
#: .. versionadded:: 9.1.0
|
| 48 |
+
LOADING_STRATEGY = LoadingStrategy.RGB_AFTER_FIRST
|
| 49 |
+
|
| 50 |
+
# --------------------------------------------------------------------
|
| 51 |
+
# Identify/read GIF files
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _accept(prefix):
|
| 55 |
+
return prefix[:6] in [b"GIF87a", b"GIF89a"]
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
##
|
| 59 |
+
# Image plugin for GIF images. This plugin supports both GIF87 and
|
| 60 |
+
# GIF89 images.
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class GifImageFile(ImageFile.ImageFile):
|
| 64 |
+
|
| 65 |
+
format = "GIF"
|
| 66 |
+
format_description = "Compuserve GIF"
|
| 67 |
+
_close_exclusive_fp_after_loading = False
|
| 68 |
+
|
| 69 |
+
global_palette = None
|
| 70 |
+
|
| 71 |
+
def data(self):
|
| 72 |
+
s = self.fp.read(1)
|
| 73 |
+
if s and s[0]:
|
| 74 |
+
return self.fp.read(s[0])
|
| 75 |
+
return None
|
| 76 |
+
|
| 77 |
+
def _is_palette_needed(self, p):
|
| 78 |
+
for i in range(0, len(p), 3):
|
| 79 |
+
if not (i // 3 == p[i] == p[i + 1] == p[i + 2]):
|
| 80 |
+
return True
|
| 81 |
+
return False
|
| 82 |
+
|
| 83 |
+
def _open(self):
|
| 84 |
+
|
| 85 |
+
# Screen
|
| 86 |
+
s = self.fp.read(13)
|
| 87 |
+
if not _accept(s):
|
| 88 |
+
raise SyntaxError("not a GIF file")
|
| 89 |
+
|
| 90 |
+
self.info["version"] = s[:6]
|
| 91 |
+
self._size = i16(s, 6), i16(s, 8)
|
| 92 |
+
self.tile = []
|
| 93 |
+
flags = s[10]
|
| 94 |
+
bits = (flags & 7) + 1
|
| 95 |
+
|
| 96 |
+
if flags & 128:
|
| 97 |
+
# get global palette
|
| 98 |
+
self.info["background"] = s[11]
|
| 99 |
+
# check if palette contains colour indices
|
| 100 |
+
p = self.fp.read(3 << bits)
|
| 101 |
+
if self._is_palette_needed(p):
|
| 102 |
+
p = ImagePalette.raw("RGB", p)
|
| 103 |
+
self.global_palette = self.palette = p
|
| 104 |
+
|
| 105 |
+
self.__fp = self.fp # FIXME: hack
|
| 106 |
+
self.__rewind = self.fp.tell()
|
| 107 |
+
self._n_frames = None
|
| 108 |
+
self._is_animated = None
|
| 109 |
+
self._seek(0) # get ready to read first frame
|
| 110 |
+
|
| 111 |
+
@property
|
| 112 |
+
def n_frames(self):
|
| 113 |
+
if self._n_frames is None:
|
| 114 |
+
current = self.tell()
|
| 115 |
+
try:
|
| 116 |
+
while True:
|
| 117 |
+
self._seek(self.tell() + 1, False)
|
| 118 |
+
except EOFError:
|
| 119 |
+
self._n_frames = self.tell() + 1
|
| 120 |
+
self.seek(current)
|
| 121 |
+
return self._n_frames
|
| 122 |
+
|
| 123 |
+
@property
|
| 124 |
+
def is_animated(self):
|
| 125 |
+
if self._is_animated is None:
|
| 126 |
+
if self._n_frames is not None:
|
| 127 |
+
self._is_animated = self._n_frames != 1
|
| 128 |
+
else:
|
| 129 |
+
current = self.tell()
|
| 130 |
+
if current:
|
| 131 |
+
self._is_animated = True
|
| 132 |
+
else:
|
| 133 |
+
try:
|
| 134 |
+
self._seek(1, False)
|
| 135 |
+
self._is_animated = True
|
| 136 |
+
except EOFError:
|
| 137 |
+
self._is_animated = False
|
| 138 |
+
|
| 139 |
+
self.seek(current)
|
| 140 |
+
return self._is_animated
|
| 141 |
+
|
| 142 |
+
def seek(self, frame):
|
| 143 |
+
if not self._seek_check(frame):
|
| 144 |
+
return
|
| 145 |
+
if frame < self.__frame:
|
| 146 |
+
self.im = None
|
| 147 |
+
self._seek(0)
|
| 148 |
+
|
| 149 |
+
last_frame = self.__frame
|
| 150 |
+
for f in range(self.__frame + 1, frame + 1):
|
| 151 |
+
try:
|
| 152 |
+
self._seek(f)
|
| 153 |
+
except EOFError as e:
|
| 154 |
+
self.seek(last_frame)
|
| 155 |
+
raise EOFError("no more images in GIF file") from e
|
| 156 |
+
|
| 157 |
+
def _seek(self, frame, update_image=True):
|
| 158 |
+
|
| 159 |
+
if frame == 0:
|
| 160 |
+
# rewind
|
| 161 |
+
self.__offset = 0
|
| 162 |
+
self.dispose = None
|
| 163 |
+
self.__frame = -1
|
| 164 |
+
self.__fp.seek(self.__rewind)
|
| 165 |
+
self.disposal_method = 0
|
| 166 |
+
else:
|
| 167 |
+
# ensure that the previous frame was loaded
|
| 168 |
+
if self.tile and update_image:
|
| 169 |
+
self.load()
|
| 170 |
+
|
| 171 |
+
if frame != self.__frame + 1:
|
| 172 |
+
raise ValueError(f"cannot seek to frame {frame}")
|
| 173 |
+
|
| 174 |
+
self.fp = self.__fp
|
| 175 |
+
if self.__offset:
|
| 176 |
+
# backup to last frame
|
| 177 |
+
self.fp.seek(self.__offset)
|
| 178 |
+
while self.data():
|
| 179 |
+
pass
|
| 180 |
+
self.__offset = 0
|
| 181 |
+
|
| 182 |
+
s = self.fp.read(1)
|
| 183 |
+
if not s or s == b";":
|
| 184 |
+
raise EOFError
|
| 185 |
+
|
| 186 |
+
self.__frame = frame
|
| 187 |
+
|
| 188 |
+
self.tile = []
|
| 189 |
+
|
| 190 |
+
palette = None
|
| 191 |
+
|
| 192 |
+
info = {}
|
| 193 |
+
frame_transparency = None
|
| 194 |
+
interlace = None
|
| 195 |
+
frame_dispose_extent = None
|
| 196 |
+
while True:
|
| 197 |
+
|
| 198 |
+
if not s:
|
| 199 |
+
s = self.fp.read(1)
|
| 200 |
+
if not s or s == b";":
|
| 201 |
+
break
|
| 202 |
+
|
| 203 |
+
elif s == b"!":
|
| 204 |
+
#
|
| 205 |
+
# extensions
|
| 206 |
+
#
|
| 207 |
+
s = self.fp.read(1)
|
| 208 |
+
block = self.data()
|
| 209 |
+
if s[0] == 249:
|
| 210 |
+
#
|
| 211 |
+
# graphic control extension
|
| 212 |
+
#
|
| 213 |
+
flags = block[0]
|
| 214 |
+
if flags & 1:
|
| 215 |
+
frame_transparency = block[3]
|
| 216 |
+
info["duration"] = i16(block, 1) * 10
|
| 217 |
+
|
| 218 |
+
# disposal method - find the value of bits 4 - 6
|
| 219 |
+
dispose_bits = 0b00011100 & flags
|
| 220 |
+
dispose_bits = dispose_bits >> 2
|
| 221 |
+
if dispose_bits:
|
| 222 |
+
# only set the dispose if it is not
|
| 223 |
+
# unspecified. I'm not sure if this is
|
| 224 |
+
# correct, but it seems to prevent the last
|
| 225 |
+
# frame from looking odd for some animations
|
| 226 |
+
self.disposal_method = dispose_bits
|
| 227 |
+
elif s[0] == 254:
|
| 228 |
+
#
|
| 229 |
+
# comment extension
|
| 230 |
+
#
|
| 231 |
+
while block:
|
| 232 |
+
if "comment" in info:
|
| 233 |
+
info["comment"] += block
|
| 234 |
+
else:
|
| 235 |
+
info["comment"] = block
|
| 236 |
+
block = self.data()
|
| 237 |
+
s = None
|
| 238 |
+
continue
|
| 239 |
+
elif s[0] == 255:
|
| 240 |
+
#
|
| 241 |
+
# application extension
|
| 242 |
+
#
|
| 243 |
+
info["extension"] = block, self.fp.tell()
|
| 244 |
+
if block[:11] == b"NETSCAPE2.0":
|
| 245 |
+
block = self.data()
|
| 246 |
+
if len(block) >= 3 and block[0] == 1:
|
| 247 |
+
info["loop"] = i16(block, 1)
|
| 248 |
+
while self.data():
|
| 249 |
+
pass
|
| 250 |
+
|
| 251 |
+
elif s == b",":
|
| 252 |
+
#
|
| 253 |
+
# local image
|
| 254 |
+
#
|
| 255 |
+
s = self.fp.read(9)
|
| 256 |
+
|
| 257 |
+
# extent
|
| 258 |
+
x0, y0 = i16(s, 0), i16(s, 2)
|
| 259 |
+
x1, y1 = x0 + i16(s, 4), y0 + i16(s, 6)
|
| 260 |
+
if (x1 > self.size[0] or y1 > self.size[1]) and update_image:
|
| 261 |
+
self._size = max(x1, self.size[0]), max(y1, self.size[1])
|
| 262 |
+
frame_dispose_extent = x0, y0, x1, y1
|
| 263 |
+
flags = s[8]
|
| 264 |
+
|
| 265 |
+
interlace = (flags & 64) != 0
|
| 266 |
+
|
| 267 |
+
if flags & 128:
|
| 268 |
+
bits = (flags & 7) + 1
|
| 269 |
+
p = self.fp.read(3 << bits)
|
| 270 |
+
if self._is_palette_needed(p):
|
| 271 |
+
palette = ImagePalette.raw("RGB", p)
|
| 272 |
+
|
| 273 |
+
# image data
|
| 274 |
+
bits = self.fp.read(1)[0]
|
| 275 |
+
self.__offset = self.fp.tell()
|
| 276 |
+
break
|
| 277 |
+
|
| 278 |
+
else:
|
| 279 |
+
pass
|
| 280 |
+
# raise OSError, "illegal GIF tag `%x`" % s[0]
|
| 281 |
+
s = None
|
| 282 |
+
|
| 283 |
+
if interlace is None:
|
| 284 |
+
# self.__fp = None
|
| 285 |
+
raise EOFError
|
| 286 |
+
if not update_image:
|
| 287 |
+
return
|
| 288 |
+
|
| 289 |
+
if self.dispose:
|
| 290 |
+
self.im.paste(self.dispose, self.dispose_extent)
|
| 291 |
+
|
| 292 |
+
self._frame_palette = palette or self.global_palette
|
| 293 |
+
if frame == 0:
|
| 294 |
+
if self._frame_palette:
|
| 295 |
+
self.mode = (
|
| 296 |
+
"RGB" if LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS else "P"
|
| 297 |
+
)
|
| 298 |
+
else:
|
| 299 |
+
self.mode = "L"
|
| 300 |
+
|
| 301 |
+
if not palette and self.global_palette:
|
| 302 |
+
from copy import copy
|
| 303 |
+
|
| 304 |
+
palette = copy(self.global_palette)
|
| 305 |
+
self.palette = palette
|
| 306 |
+
else:
|
| 307 |
+
self._frame_transparency = frame_transparency
|
| 308 |
+
if self.mode == "P":
|
| 309 |
+
if (
|
| 310 |
+
LOADING_STRATEGY != LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
|
| 311 |
+
or palette
|
| 312 |
+
):
|
| 313 |
+
self.pyaccess = None
|
| 314 |
+
if "transparency" in self.info:
|
| 315 |
+
self.im.putpalettealpha(self.info["transparency"], 0)
|
| 316 |
+
self.im = self.im.convert("RGBA", Image.Dither.FLOYDSTEINBERG)
|
| 317 |
+
self.mode = "RGBA"
|
| 318 |
+
del self.info["transparency"]
|
| 319 |
+
else:
|
| 320 |
+
self.mode = "RGB"
|
| 321 |
+
self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG)
|
| 322 |
+
|
| 323 |
+
def _rgb(color):
|
| 324 |
+
if self._frame_palette:
|
| 325 |
+
color = tuple(self._frame_palette.palette[color * 3 : color * 3 + 3])
|
| 326 |
+
else:
|
| 327 |
+
color = (color, color, color)
|
| 328 |
+
return color
|
| 329 |
+
|
| 330 |
+
self.dispose_extent = frame_dispose_extent
|
| 331 |
+
try:
|
| 332 |
+
if self.disposal_method < 2:
|
| 333 |
+
# do not dispose or none specified
|
| 334 |
+
self.dispose = None
|
| 335 |
+
elif self.disposal_method == 2:
|
| 336 |
+
# replace with background colour
|
| 337 |
+
|
| 338 |
+
# only dispose the extent in this frame
|
| 339 |
+
x0, y0, x1, y1 = self.dispose_extent
|
| 340 |
+
dispose_size = (x1 - x0, y1 - y0)
|
| 341 |
+
|
| 342 |
+
Image._decompression_bomb_check(dispose_size)
|
| 343 |
+
|
| 344 |
+
# by convention, attempt to use transparency first
|
| 345 |
+
dispose_mode = "P"
|
| 346 |
+
color = self.info.get("transparency", frame_transparency)
|
| 347 |
+
if color is not None:
|
| 348 |
+
if self.mode in ("RGB", "RGBA"):
|
| 349 |
+
dispose_mode = "RGBA"
|
| 350 |
+
color = _rgb(color) + (0,)
|
| 351 |
+
else:
|
| 352 |
+
color = self.info.get("background", 0)
|
| 353 |
+
if self.mode in ("RGB", "RGBA"):
|
| 354 |
+
dispose_mode = "RGB"
|
| 355 |
+
color = _rgb(color)
|
| 356 |
+
self.dispose = Image.core.fill(dispose_mode, dispose_size, color)
|
| 357 |
+
else:
|
| 358 |
+
# replace with previous contents
|
| 359 |
+
if self.im is not None:
|
| 360 |
+
# only dispose the extent in this frame
|
| 361 |
+
self.dispose = self._crop(self.im, self.dispose_extent)
|
| 362 |
+
elif frame_transparency is not None:
|
| 363 |
+
x0, y0, x1, y1 = self.dispose_extent
|
| 364 |
+
dispose_size = (x1 - x0, y1 - y0)
|
| 365 |
+
|
| 366 |
+
Image._decompression_bomb_check(dispose_size)
|
| 367 |
+
dispose_mode = "P"
|
| 368 |
+
color = frame_transparency
|
| 369 |
+
if self.mode in ("RGB", "RGBA"):
|
| 370 |
+
dispose_mode = "RGBA"
|
| 371 |
+
color = _rgb(frame_transparency) + (0,)
|
| 372 |
+
self.dispose = Image.core.fill(dispose_mode, dispose_size, color)
|
| 373 |
+
except AttributeError:
|
| 374 |
+
pass
|
| 375 |
+
|
| 376 |
+
if interlace is not None:
|
| 377 |
+
transparency = -1
|
| 378 |
+
if frame_transparency is not None:
|
| 379 |
+
if frame == 0:
|
| 380 |
+
self.info["transparency"] = frame_transparency
|
| 381 |
+
elif self.mode not in ("RGB", "RGBA"):
|
| 382 |
+
transparency = frame_transparency
|
| 383 |
+
self.tile = [
|
| 384 |
+
(
|
| 385 |
+
"gif",
|
| 386 |
+
(x0, y0, x1, y1),
|
| 387 |
+
self.__offset,
|
| 388 |
+
(bits, interlace, transparency),
|
| 389 |
+
)
|
| 390 |
+
]
|
| 391 |
+
|
| 392 |
+
for k in ["duration", "comment", "extension", "loop"]:
|
| 393 |
+
if k in info:
|
| 394 |
+
self.info[k] = info[k]
|
| 395 |
+
elif k in self.info:
|
| 396 |
+
del self.info[k]
|
| 397 |
+
|
| 398 |
+
def load_prepare(self):
|
| 399 |
+
temp_mode = "P" if self._frame_palette else "L"
|
| 400 |
+
self._prev_im = None
|
| 401 |
+
if self.__frame == 0:
|
| 402 |
+
if "transparency" in self.info:
|
| 403 |
+
self.im = Image.core.fill(
|
| 404 |
+
temp_mode, self.size, self.info["transparency"]
|
| 405 |
+
)
|
| 406 |
+
elif self.mode in ("RGB", "RGBA"):
|
| 407 |
+
self._prev_im = self.im
|
| 408 |
+
if self._frame_palette:
|
| 409 |
+
self.im = Image.core.fill("P", self.size, self._frame_transparency or 0)
|
| 410 |
+
self.im.putpalette(*self._frame_palette.getdata())
|
| 411 |
+
else:
|
| 412 |
+
self.im = None
|
| 413 |
+
self.mode = temp_mode
|
| 414 |
+
self._frame_palette = None
|
| 415 |
+
|
| 416 |
+
super().load_prepare()
|
| 417 |
+
|
| 418 |
+
def load_end(self):
|
| 419 |
+
if self.__frame == 0:
|
| 420 |
+
if self.mode == "P" and LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS:
|
| 421 |
+
self.mode = "RGB"
|
| 422 |
+
self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG)
|
| 423 |
+
return
|
| 424 |
+
if self.mode == "P" and self._prev_im:
|
| 425 |
+
if self._frame_transparency is not None:
|
| 426 |
+
self.im.putpalettealpha(self._frame_transparency, 0)
|
| 427 |
+
frame_im = self.im.convert("RGBA")
|
| 428 |
+
else:
|
| 429 |
+
frame_im = self.im.convert("RGB")
|
| 430 |
+
else:
|
| 431 |
+
if not self._prev_im:
|
| 432 |
+
return
|
| 433 |
+
frame_im = self.im
|
| 434 |
+
frame_im = self._crop(frame_im, self.dispose_extent)
|
| 435 |
+
|
| 436 |
+
self.im = self._prev_im
|
| 437 |
+
self.mode = self.im.mode
|
| 438 |
+
if frame_im.mode == "RGBA":
|
| 439 |
+
self.im.paste(frame_im, self.dispose_extent, frame_im)
|
| 440 |
+
else:
|
| 441 |
+
self.im.paste(frame_im, self.dispose_extent)
|
| 442 |
+
|
| 443 |
+
def tell(self):
|
| 444 |
+
return self.__frame
|
| 445 |
+
|
| 446 |
+
def _close__fp(self):
|
| 447 |
+
try:
|
| 448 |
+
if self.__fp != self.fp:
|
| 449 |
+
self.__fp.close()
|
| 450 |
+
except AttributeError:
|
| 451 |
+
pass
|
| 452 |
+
finally:
|
| 453 |
+
self.__fp = None
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
# --------------------------------------------------------------------
|
| 457 |
+
# Write GIF files
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
RAWMODE = {"1": "L", "L": "L", "P": "P"}
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def _normalize_mode(im):
|
| 464 |
+
"""
|
| 465 |
+
Takes an image (or frame), returns an image in a mode that is appropriate
|
| 466 |
+
for saving in a Gif.
|
| 467 |
+
|
| 468 |
+
It may return the original image, or it may return an image converted to
|
| 469 |
+
palette or 'L' mode.
|
| 470 |
+
|
| 471 |
+
:param im: Image object
|
| 472 |
+
:returns: Image object
|
| 473 |
+
"""
|
| 474 |
+
if im.mode in RAWMODE:
|
| 475 |
+
im.load()
|
| 476 |
+
return im
|
| 477 |
+
if Image.getmodebase(im.mode) == "RGB":
|
| 478 |
+
im = im.convert("P", palette=Image.Palette.ADAPTIVE)
|
| 479 |
+
if im.palette.mode == "RGBA":
|
| 480 |
+
for rgba in im.palette.colors.keys():
|
| 481 |
+
if rgba[3] == 0:
|
| 482 |
+
im.info["transparency"] = im.palette.colors[rgba]
|
| 483 |
+
break
|
| 484 |
+
return im
|
| 485 |
+
return im.convert("L")
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def _normalize_palette(im, palette, info):
|
| 489 |
+
"""
|
| 490 |
+
Normalizes the palette for image.
|
| 491 |
+
- Sets the palette to the incoming palette, if provided.
|
| 492 |
+
- Ensures that there's a palette for L mode images
|
| 493 |
+
- Optimizes the palette if necessary/desired.
|
| 494 |
+
|
| 495 |
+
:param im: Image object
|
| 496 |
+
:param palette: bytes object containing the source palette, or ....
|
| 497 |
+
:param info: encoderinfo
|
| 498 |
+
:returns: Image object
|
| 499 |
+
"""
|
| 500 |
+
source_palette = None
|
| 501 |
+
if palette:
|
| 502 |
+
# a bytes palette
|
| 503 |
+
if isinstance(palette, (bytes, bytearray, list)):
|
| 504 |
+
source_palette = bytearray(palette[:768])
|
| 505 |
+
if isinstance(palette, ImagePalette.ImagePalette):
|
| 506 |
+
source_palette = bytearray(palette.palette)
|
| 507 |
+
|
| 508 |
+
if im.mode == "P":
|
| 509 |
+
if not source_palette:
|
| 510 |
+
source_palette = im.im.getpalette("RGB")[:768]
|
| 511 |
+
else: # L-mode
|
| 512 |
+
if not source_palette:
|
| 513 |
+
source_palette = bytearray(i // 3 for i in range(768))
|
| 514 |
+
im.palette = ImagePalette.ImagePalette("RGB", palette=source_palette)
|
| 515 |
+
|
| 516 |
+
if palette:
|
| 517 |
+
used_palette_colors = []
|
| 518 |
+
for i in range(0, len(source_palette), 3):
|
| 519 |
+
source_color = tuple(source_palette[i : i + 3])
|
| 520 |
+
try:
|
| 521 |
+
index = im.palette.colors[source_color]
|
| 522 |
+
except KeyError:
|
| 523 |
+
index = None
|
| 524 |
+
used_palette_colors.append(index)
|
| 525 |
+
for i, index in enumerate(used_palette_colors):
|
| 526 |
+
if index is None:
|
| 527 |
+
for j in range(len(used_palette_colors)):
|
| 528 |
+
if j not in used_palette_colors:
|
| 529 |
+
used_palette_colors[i] = j
|
| 530 |
+
break
|
| 531 |
+
im = im.remap_palette(used_palette_colors)
|
| 532 |
+
else:
|
| 533 |
+
used_palette_colors = _get_optimize(im, info)
|
| 534 |
+
if used_palette_colors is not None:
|
| 535 |
+
return im.remap_palette(used_palette_colors, source_palette)
|
| 536 |
+
|
| 537 |
+
im.palette.palette = source_palette
|
| 538 |
+
return im
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def _write_single_frame(im, fp, palette):
|
| 542 |
+
im_out = _normalize_mode(im)
|
| 543 |
+
for k, v in im_out.info.items():
|
| 544 |
+
im.encoderinfo.setdefault(k, v)
|
| 545 |
+
im_out = _normalize_palette(im_out, palette, im.encoderinfo)
|
| 546 |
+
|
| 547 |
+
for s in _get_global_header(im_out, im.encoderinfo):
|
| 548 |
+
fp.write(s)
|
| 549 |
+
|
| 550 |
+
# local image header
|
| 551 |
+
flags = 0
|
| 552 |
+
if get_interlace(im):
|
| 553 |
+
flags = flags | 64
|
| 554 |
+
_write_local_header(fp, im, (0, 0), flags)
|
| 555 |
+
|
| 556 |
+
im_out.encoderconfig = (8, get_interlace(im))
|
| 557 |
+
ImageFile._save(im_out, fp, [("gif", (0, 0) + im.size, 0, RAWMODE[im_out.mode])])
|
| 558 |
+
|
| 559 |
+
fp.write(b"\0") # end of image data
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def _write_multiple_frames(im, fp, palette):
|
| 563 |
+
|
| 564 |
+
duration = im.encoderinfo.get("duration", im.info.get("duration"))
|
| 565 |
+
disposal = im.encoderinfo.get("disposal", im.info.get("disposal"))
|
| 566 |
+
|
| 567 |
+
im_frames = []
|
| 568 |
+
frame_count = 0
|
| 569 |
+
background_im = None
|
| 570 |
+
for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])):
|
| 571 |
+
for im_frame in ImageSequence.Iterator(imSequence):
|
| 572 |
+
# a copy is required here since seek can still mutate the image
|
| 573 |
+
im_frame = _normalize_mode(im_frame.copy())
|
| 574 |
+
if frame_count == 0:
|
| 575 |
+
for k, v in im_frame.info.items():
|
| 576 |
+
im.encoderinfo.setdefault(k, v)
|
| 577 |
+
im_frame = _normalize_palette(im_frame, palette, im.encoderinfo)
|
| 578 |
+
|
| 579 |
+
encoderinfo = im.encoderinfo.copy()
|
| 580 |
+
if isinstance(duration, (list, tuple)):
|
| 581 |
+
encoderinfo["duration"] = duration[frame_count]
|
| 582 |
+
if isinstance(disposal, (list, tuple)):
|
| 583 |
+
encoderinfo["disposal"] = disposal[frame_count]
|
| 584 |
+
frame_count += 1
|
| 585 |
+
|
| 586 |
+
if im_frames:
|
| 587 |
+
# delta frame
|
| 588 |
+
previous = im_frames[-1]
|
| 589 |
+
if encoderinfo.get("disposal") == 2:
|
| 590 |
+
if background_im is None:
|
| 591 |
+
color = im.encoderinfo.get(
|
| 592 |
+
"transparency", im.info.get("transparency", (0, 0, 0))
|
| 593 |
+
)
|
| 594 |
+
background = _get_background(im_frame, color)
|
| 595 |
+
background_im = Image.new("P", im_frame.size, background)
|
| 596 |
+
background_im.putpalette(im_frames[0]["im"].palette)
|
| 597 |
+
base_im = background_im
|
| 598 |
+
else:
|
| 599 |
+
base_im = previous["im"]
|
| 600 |
+
if _get_palette_bytes(im_frame) == _get_palette_bytes(base_im):
|
| 601 |
+
delta = ImageChops.subtract_modulo(im_frame, base_im)
|
| 602 |
+
else:
|
| 603 |
+
delta = ImageChops.subtract_modulo(
|
| 604 |
+
im_frame.convert("RGB"), base_im.convert("RGB")
|
| 605 |
+
)
|
| 606 |
+
bbox = delta.getbbox()
|
| 607 |
+
if not bbox:
|
| 608 |
+
# This frame is identical to the previous frame
|
| 609 |
+
if duration:
|
| 610 |
+
previous["encoderinfo"]["duration"] += encoderinfo["duration"]
|
| 611 |
+
continue
|
| 612 |
+
else:
|
| 613 |
+
bbox = None
|
| 614 |
+
im_frames.append({"im": im_frame, "bbox": bbox, "encoderinfo": encoderinfo})
|
| 615 |
+
|
| 616 |
+
if len(im_frames) > 1:
|
| 617 |
+
for frame_data in im_frames:
|
| 618 |
+
im_frame = frame_data["im"]
|
| 619 |
+
if not frame_data["bbox"]:
|
| 620 |
+
# global header
|
| 621 |
+
for s in _get_global_header(im_frame, frame_data["encoderinfo"]):
|
| 622 |
+
fp.write(s)
|
| 623 |
+
offset = (0, 0)
|
| 624 |
+
else:
|
| 625 |
+
# compress difference
|
| 626 |
+
if not palette:
|
| 627 |
+
frame_data["encoderinfo"]["include_color_table"] = True
|
| 628 |
+
|
| 629 |
+
im_frame = im_frame.crop(frame_data["bbox"])
|
| 630 |
+
offset = frame_data["bbox"][:2]
|
| 631 |
+
_write_frame_data(fp, im_frame, offset, frame_data["encoderinfo"])
|
| 632 |
+
return True
|
| 633 |
+
elif "duration" in im.encoderinfo and isinstance(
|
| 634 |
+
im.encoderinfo["duration"], (list, tuple)
|
| 635 |
+
):
|
| 636 |
+
# Since multiple frames will not be written, add together the frame durations
|
| 637 |
+
im.encoderinfo["duration"] = sum(im.encoderinfo["duration"])
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def _save_all(im, fp, filename):
|
| 641 |
+
_save(im, fp, filename, save_all=True)
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
def _save(im, fp, filename, save_all=False):
|
| 645 |
+
# header
|
| 646 |
+
if "palette" in im.encoderinfo or "palette" in im.info:
|
| 647 |
+
palette = im.encoderinfo.get("palette", im.info.get("palette"))
|
| 648 |
+
else:
|
| 649 |
+
palette = None
|
| 650 |
+
im.encoderinfo["optimize"] = im.encoderinfo.get("optimize", True)
|
| 651 |
+
|
| 652 |
+
if not save_all or not _write_multiple_frames(im, fp, palette):
|
| 653 |
+
_write_single_frame(im, fp, palette)
|
| 654 |
+
|
| 655 |
+
fp.write(b";") # end of file
|
| 656 |
+
|
| 657 |
+
if hasattr(fp, "flush"):
|
| 658 |
+
fp.flush()
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
def get_interlace(im):
|
| 662 |
+
interlace = im.encoderinfo.get("interlace", 1)
|
| 663 |
+
|
| 664 |
+
# workaround for @PIL153
|
| 665 |
+
if min(im.size) < 16:
|
| 666 |
+
interlace = 0
|
| 667 |
+
|
| 668 |
+
return interlace
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def _write_local_header(fp, im, offset, flags):
|
| 672 |
+
transparent_color_exists = False
|
| 673 |
+
try:
|
| 674 |
+
if "transparency" in im.encoderinfo:
|
| 675 |
+
transparency = im.encoderinfo["transparency"]
|
| 676 |
+
else:
|
| 677 |
+
transparency = im.info["transparency"]
|
| 678 |
+
transparency = int(transparency)
|
| 679 |
+
except (KeyError, ValueError):
|
| 680 |
+
pass
|
| 681 |
+
else:
|
| 682 |
+
# optimize the block away if transparent color is not used
|
| 683 |
+
transparent_color_exists = True
|
| 684 |
+
|
| 685 |
+
used_palette_colors = _get_optimize(im, im.encoderinfo)
|
| 686 |
+
if used_palette_colors is not None:
|
| 687 |
+
# adjust the transparency index after optimize
|
| 688 |
+
try:
|
| 689 |
+
transparency = used_palette_colors.index(transparency)
|
| 690 |
+
except ValueError:
|
| 691 |
+
transparent_color_exists = False
|
| 692 |
+
|
| 693 |
+
if "duration" in im.encoderinfo:
|
| 694 |
+
duration = int(im.encoderinfo["duration"] / 10)
|
| 695 |
+
else:
|
| 696 |
+
duration = 0
|
| 697 |
+
|
| 698 |
+
disposal = int(im.encoderinfo.get("disposal", 0))
|
| 699 |
+
|
| 700 |
+
if transparent_color_exists or duration != 0 or disposal:
|
| 701 |
+
packed_flag = 1 if transparent_color_exists else 0
|
| 702 |
+
packed_flag |= disposal << 2
|
| 703 |
+
if not transparent_color_exists:
|
| 704 |
+
transparency = 0
|
| 705 |
+
|
| 706 |
+
fp.write(
|
| 707 |
+
b"!"
|
| 708 |
+
+ o8(249) # extension intro
|
| 709 |
+
+ o8(4) # length
|
| 710 |
+
+ o8(packed_flag) # packed fields
|
| 711 |
+
+ o16(duration) # duration
|
| 712 |
+
+ o8(transparency) # transparency index
|
| 713 |
+
+ o8(0)
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
if "comment" in im.encoderinfo and 1 <= len(im.encoderinfo["comment"]):
|
| 717 |
+
fp.write(b"!" + o8(254)) # extension intro
|
| 718 |
+
comment = im.encoderinfo["comment"]
|
| 719 |
+
if isinstance(comment, str):
|
| 720 |
+
comment = comment.encode()
|
| 721 |
+
for i in range(0, len(comment), 255):
|
| 722 |
+
subblock = comment[i : i + 255]
|
| 723 |
+
fp.write(o8(len(subblock)) + subblock)
|
| 724 |
+
fp.write(o8(0))
|
| 725 |
+
if "loop" in im.encoderinfo:
|
| 726 |
+
number_of_loops = im.encoderinfo["loop"]
|
| 727 |
+
fp.write(
|
| 728 |
+
b"!"
|
| 729 |
+
+ o8(255) # extension intro
|
| 730 |
+
+ o8(11)
|
| 731 |
+
+ b"NETSCAPE2.0"
|
| 732 |
+
+ o8(3)
|
| 733 |
+
+ o8(1)
|
| 734 |
+
+ o16(number_of_loops) # number of loops
|
| 735 |
+
+ o8(0)
|
| 736 |
+
)
|
| 737 |
+
include_color_table = im.encoderinfo.get("include_color_table")
|
| 738 |
+
if include_color_table:
|
| 739 |
+
palette_bytes = _get_palette_bytes(im)
|
| 740 |
+
color_table_size = _get_color_table_size(palette_bytes)
|
| 741 |
+
if color_table_size:
|
| 742 |
+
flags = flags | 128 # local color table flag
|
| 743 |
+
flags = flags | color_table_size
|
| 744 |
+
|
| 745 |
+
fp.write(
|
| 746 |
+
b","
|
| 747 |
+
+ o16(offset[0]) # offset
|
| 748 |
+
+ o16(offset[1])
|
| 749 |
+
+ o16(im.size[0]) # size
|
| 750 |
+
+ o16(im.size[1])
|
| 751 |
+
+ o8(flags) # flags
|
| 752 |
+
)
|
| 753 |
+
if include_color_table and color_table_size:
|
| 754 |
+
fp.write(_get_header_palette(palette_bytes))
|
| 755 |
+
fp.write(o8(8)) # bits
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
def _save_netpbm(im, fp, filename):
|
| 759 |
+
|
| 760 |
+
# Unused by default.
|
| 761 |
+
# To use, uncomment the register_save call at the end of the file.
|
| 762 |
+
#
|
| 763 |
+
# If you need real GIF compression and/or RGB quantization, you
|
| 764 |
+
# can use the external NETPBM/PBMPLUS utilities. See comments
|
| 765 |
+
# below for information on how to enable this.
|
| 766 |
+
tempfile = im._dump()
|
| 767 |
+
|
| 768 |
+
try:
|
| 769 |
+
with open(filename, "wb") as f:
|
| 770 |
+
if im.mode != "RGB":
|
| 771 |
+
subprocess.check_call(
|
| 772 |
+
["ppmtogif", tempfile], stdout=f, stderr=subprocess.DEVNULL
|
| 773 |
+
)
|
| 774 |
+
else:
|
| 775 |
+
# Pipe ppmquant output into ppmtogif
|
| 776 |
+
# "ppmquant 256 %s | ppmtogif > %s" % (tempfile, filename)
|
| 777 |
+
quant_cmd = ["ppmquant", "256", tempfile]
|
| 778 |
+
togif_cmd = ["ppmtogif"]
|
| 779 |
+
quant_proc = subprocess.Popen(
|
| 780 |
+
quant_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
|
| 781 |
+
)
|
| 782 |
+
togif_proc = subprocess.Popen(
|
| 783 |
+
togif_cmd,
|
| 784 |
+
stdin=quant_proc.stdout,
|
| 785 |
+
stdout=f,
|
| 786 |
+
stderr=subprocess.DEVNULL,
|
| 787 |
+
)
|
| 788 |
+
|
| 789 |
+
# Allow ppmquant to receive SIGPIPE if ppmtogif exits
|
| 790 |
+
quant_proc.stdout.close()
|
| 791 |
+
|
| 792 |
+
retcode = quant_proc.wait()
|
| 793 |
+
if retcode:
|
| 794 |
+
raise subprocess.CalledProcessError(retcode, quant_cmd)
|
| 795 |
+
|
| 796 |
+
retcode = togif_proc.wait()
|
| 797 |
+
if retcode:
|
| 798 |
+
raise subprocess.CalledProcessError(retcode, togif_cmd)
|
| 799 |
+
finally:
|
| 800 |
+
try:
|
| 801 |
+
os.unlink(tempfile)
|
| 802 |
+
except OSError:
|
| 803 |
+
pass
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
# Force optimization so that we can test performance against
|
| 807 |
+
# cases where it took lots of memory and time previously.
|
| 808 |
+
_FORCE_OPTIMIZE = False
|
| 809 |
+
|
| 810 |
+
|
| 811 |
+
def _get_optimize(im, info):
|
| 812 |
+
"""
|
| 813 |
+
Palette optimization is a potentially expensive operation.
|
| 814 |
+
|
| 815 |
+
This function determines if the palette should be optimized using
|
| 816 |
+
some heuristics, then returns the list of palette entries in use.
|
| 817 |
+
|
| 818 |
+
:param im: Image object
|
| 819 |
+
:param info: encoderinfo
|
| 820 |
+
:returns: list of indexes of palette entries in use, or None
|
| 821 |
+
"""
|
| 822 |
+
if im.mode in ("P", "L") and info and info.get("optimize", 0):
|
| 823 |
+
# Potentially expensive operation.
|
| 824 |
+
|
| 825 |
+
# The palette saves 3 bytes per color not used, but palette
|
| 826 |
+
# lengths are restricted to 3*(2**N) bytes. Max saving would
|
| 827 |
+
# be 768 -> 6 bytes if we went all the way down to 2 colors.
|
| 828 |
+
# * If we're over 128 colors, we can't save any space.
|
| 829 |
+
# * If there aren't any holes, it's not worth collapsing.
|
| 830 |
+
# * If we have a 'large' image, the palette is in the noise.
|
| 831 |
+
|
| 832 |
+
# create the new palette if not every color is used
|
| 833 |
+
optimise = _FORCE_OPTIMIZE or im.mode == "L"
|
| 834 |
+
if optimise or im.width * im.height < 512 * 512:
|
| 835 |
+
# check which colors are used
|
| 836 |
+
used_palette_colors = []
|
| 837 |
+
for i, count in enumerate(im.histogram()):
|
| 838 |
+
if count:
|
| 839 |
+
used_palette_colors.append(i)
|
| 840 |
+
|
| 841 |
+
if optimise or (
|
| 842 |
+
len(used_palette_colors) <= 128
|
| 843 |
+
and max(used_palette_colors) > len(used_palette_colors)
|
| 844 |
+
):
|
| 845 |
+
return used_palette_colors
|
| 846 |
+
|
| 847 |
+
|
| 848 |
+
def _get_color_table_size(palette_bytes):
|
| 849 |
+
# calculate the palette size for the header
|
| 850 |
+
if not palette_bytes:
|
| 851 |
+
return 0
|
| 852 |
+
elif len(palette_bytes) < 9:
|
| 853 |
+
return 1
|
| 854 |
+
else:
|
| 855 |
+
return math.ceil(math.log(len(palette_bytes) // 3, 2)) - 1
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def _get_header_palette(palette_bytes):
|
| 859 |
+
"""
|
| 860 |
+
Returns the palette, null padded to the next power of 2 (*3) bytes
|
| 861 |
+
suitable for direct inclusion in the GIF header
|
| 862 |
+
|
| 863 |
+
:param palette_bytes: Unpadded palette bytes, in RGBRGB form
|
| 864 |
+
:returns: Null padded palette
|
| 865 |
+
"""
|
| 866 |
+
color_table_size = _get_color_table_size(palette_bytes)
|
| 867 |
+
|
| 868 |
+
# add the missing amount of bytes
|
| 869 |
+
# the palette has to be 2<<n in size
|
| 870 |
+
actual_target_size_diff = (2 << color_table_size) - len(palette_bytes) // 3
|
| 871 |
+
if actual_target_size_diff > 0:
|
| 872 |
+
palette_bytes += o8(0) * 3 * actual_target_size_diff
|
| 873 |
+
return palette_bytes
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
def _get_palette_bytes(im):
|
| 877 |
+
"""
|
| 878 |
+
Gets the palette for inclusion in the gif header
|
| 879 |
+
|
| 880 |
+
:param im: Image object
|
| 881 |
+
:returns: Bytes, len<=768 suitable for inclusion in gif header
|
| 882 |
+
"""
|
| 883 |
+
return im.palette.palette
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
def _get_background(im, infoBackground):
|
| 887 |
+
background = 0
|
| 888 |
+
if infoBackground:
|
| 889 |
+
background = infoBackground
|
| 890 |
+
if isinstance(background, tuple):
|
| 891 |
+
# WebPImagePlugin stores an RGBA value in info["background"]
|
| 892 |
+
# So it must be converted to the same format as GifImagePlugin's
|
| 893 |
+
# info["background"] - a global color table index
|
| 894 |
+
try:
|
| 895 |
+
background = im.palette.getcolor(background, im)
|
| 896 |
+
except ValueError as e:
|
| 897 |
+
if str(e) == "cannot allocate more than 256 colors":
|
| 898 |
+
# If all 256 colors are in use,
|
| 899 |
+
# then there is no need for the background color
|
| 900 |
+
return 0
|
| 901 |
+
else:
|
| 902 |
+
raise
|
| 903 |
+
return background
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
def _get_global_header(im, info):
|
| 907 |
+
"""Return a list of strings representing a GIF header"""
|
| 908 |
+
|
| 909 |
+
# Header Block
|
| 910 |
+
# https://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp
|
| 911 |
+
|
| 912 |
+
version = b"87a"
|
| 913 |
+
for extensionKey in ["transparency", "duration", "loop", "comment"]:
|
| 914 |
+
if info and extensionKey in info:
|
| 915 |
+
if (extensionKey == "duration" and info[extensionKey] == 0) or (
|
| 916 |
+
extensionKey == "comment" and not (1 <= len(info[extensionKey]) <= 255)
|
| 917 |
+
):
|
| 918 |
+
continue
|
| 919 |
+
version = b"89a"
|
| 920 |
+
break
|
| 921 |
+
else:
|
| 922 |
+
if im.info.get("version") == b"89a":
|
| 923 |
+
version = b"89a"
|
| 924 |
+
|
| 925 |
+
background = _get_background(im, info.get("background"))
|
| 926 |
+
|
| 927 |
+
palette_bytes = _get_palette_bytes(im)
|
| 928 |
+
color_table_size = _get_color_table_size(palette_bytes)
|
| 929 |
+
|
| 930 |
+
return [
|
| 931 |
+
b"GIF" # signature
|
| 932 |
+
+ version # version
|
| 933 |
+
+ o16(im.size[0]) # canvas width
|
| 934 |
+
+ o16(im.size[1]), # canvas height
|
| 935 |
+
# Logical Screen Descriptor
|
| 936 |
+
# size of global color table + global color table flag
|
| 937 |
+
o8(color_table_size + 128), # packed fields
|
| 938 |
+
# background + reserved/aspect
|
| 939 |
+
o8(background) + o8(0),
|
| 940 |
+
# Global Color Table
|
| 941 |
+
_get_header_palette(palette_bytes),
|
| 942 |
+
]
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
def _write_frame_data(fp, im_frame, offset, params):
|
| 946 |
+
try:
|
| 947 |
+
im_frame.encoderinfo = params
|
| 948 |
+
|
| 949 |
+
# local image header
|
| 950 |
+
_write_local_header(fp, im_frame, offset, 0)
|
| 951 |
+
|
| 952 |
+
ImageFile._save(
|
| 953 |
+
im_frame, fp, [("gif", (0, 0) + im_frame.size, 0, RAWMODE[im_frame.mode])]
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
fp.write(b"\0") # end of image data
|
| 957 |
+
finally:
|
| 958 |
+
del im_frame.encoderinfo
|
| 959 |
+
|
| 960 |
+
|
| 961 |
+
# --------------------------------------------------------------------
|
| 962 |
+
# Legacy GIF utilities
|
| 963 |
+
|
| 964 |
+
|
| 965 |
+
def getheader(im, palette=None, info=None):
|
| 966 |
+
"""
|
| 967 |
+
Legacy Method to get Gif data from image.
|
| 968 |
+
|
| 969 |
+
Warning:: May modify image data.
|
| 970 |
+
|
| 971 |
+
:param im: Image object
|
| 972 |
+
:param palette: bytes object containing the source palette, or ....
|
| 973 |
+
:param info: encoderinfo
|
| 974 |
+
:returns: tuple of(list of header items, optimized palette)
|
| 975 |
+
|
| 976 |
+
"""
|
| 977 |
+
used_palette_colors = _get_optimize(im, info)
|
| 978 |
+
|
| 979 |
+
if info is None:
|
| 980 |
+
info = {}
|
| 981 |
+
|
| 982 |
+
if "background" not in info and "background" in im.info:
|
| 983 |
+
info["background"] = im.info["background"]
|
| 984 |
+
|
| 985 |
+
im_mod = _normalize_palette(im, palette, info)
|
| 986 |
+
im.palette = im_mod.palette
|
| 987 |
+
im.im = im_mod.im
|
| 988 |
+
header = _get_global_header(im, info)
|
| 989 |
+
|
| 990 |
+
return header, used_palette_colors
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
# To specify duration, add the time in milliseconds to getdata(),
|
| 994 |
+
# e.g. getdata(im_frame, duration=1000)
|
| 995 |
+
def getdata(im, offset=(0, 0), **params):
|
| 996 |
+
"""
|
| 997 |
+
Legacy Method
|
| 998 |
+
|
| 999 |
+
Return a list of strings representing this image.
|
| 1000 |
+
The first string is a local image header, the rest contains
|
| 1001 |
+
encoded image data.
|
| 1002 |
+
|
| 1003 |
+
:param im: Image object
|
| 1004 |
+
:param offset: Tuple of (x, y) pixels. Defaults to (0,0)
|
| 1005 |
+
:param \\**params: E.g. duration or other encoder info parameters
|
| 1006 |
+
:returns: List of Bytes containing gif encoded frame data
|
| 1007 |
+
|
| 1008 |
+
"""
|
| 1009 |
+
|
| 1010 |
+
class Collector:
|
| 1011 |
+
data = []
|
| 1012 |
+
|
| 1013 |
+
def write(self, data):
|
| 1014 |
+
self.data.append(data)
|
| 1015 |
+
|
| 1016 |
+
im.load() # make sure raster data is available
|
| 1017 |
+
|
| 1018 |
+
fp = Collector()
|
| 1019 |
+
|
| 1020 |
+
_write_frame_data(fp, im, offset, params)
|
| 1021 |
+
|
| 1022 |
+
return fp.data
|
| 1023 |
+
|
| 1024 |
+
|
| 1025 |
+
# --------------------------------------------------------------------
|
| 1026 |
+
# Registry
|
| 1027 |
+
|
| 1028 |
+
Image.register_open(GifImageFile.format, GifImageFile, _accept)
|
| 1029 |
+
Image.register_save(GifImageFile.format, _save)
|
| 1030 |
+
Image.register_save_all(GifImageFile.format, _save_all)
|
| 1031 |
+
Image.register_extension(GifImageFile.format, ".gif")
|
| 1032 |
+
Image.register_mime(GifImageFile.format, "image/gif")
|
| 1033 |
+
|
| 1034 |
+
#
|
| 1035 |
+
# Uncomment the following line if you wish to use NETPBM/PBMPLUS
|
| 1036 |
+
# instead of the built-in "uncompressed" GIF encoder
|
| 1037 |
+
|
| 1038 |
+
# Image.register_save(GifImageFile.format, _save_netpbm)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImImagePlugin.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# IFUNC IM file handling for PIL
|
| 6 |
+
#
|
| 7 |
+
# history:
|
| 8 |
+
# 1995-09-01 fl Created.
|
| 9 |
+
# 1997-01-03 fl Save palette images
|
| 10 |
+
# 1997-01-08 fl Added sequence support
|
| 11 |
+
# 1997-01-23 fl Added P and RGB save support
|
| 12 |
+
# 1997-05-31 fl Read floating point images
|
| 13 |
+
# 1997-06-22 fl Save floating point images
|
| 14 |
+
# 1997-08-27 fl Read and save 1-bit images
|
| 15 |
+
# 1998-06-25 fl Added support for RGB+LUT images
|
| 16 |
+
# 1998-07-02 fl Added support for YCC images
|
| 17 |
+
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
| 18 |
+
# 1998-12-29 fl Added I;16 support
|
| 19 |
+
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
|
| 20 |
+
# 2003-09-26 fl Added LA/PA support
|
| 21 |
+
#
|
| 22 |
+
# Copyright (c) 1997-2003 by Secret Labs AB.
|
| 23 |
+
# Copyright (c) 1995-2001 by Fredrik Lundh.
|
| 24 |
+
#
|
| 25 |
+
# See the README file for information on usage and redistribution.
|
| 26 |
+
#
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
import os
|
| 30 |
+
import re
|
| 31 |
+
|
| 32 |
+
from . import Image, ImageFile, ImagePalette
|
| 33 |
+
|
| 34 |
+
# --------------------------------------------------------------------
|
| 35 |
+
# Standard tags
|
| 36 |
+
|
| 37 |
+
COMMENT = "Comment"
|
| 38 |
+
DATE = "Date"
|
| 39 |
+
EQUIPMENT = "Digitalization equipment"
|
| 40 |
+
FRAMES = "File size (no of images)"
|
| 41 |
+
LUT = "Lut"
|
| 42 |
+
NAME = "Name"
|
| 43 |
+
SCALE = "Scale (x,y)"
|
| 44 |
+
SIZE = "Image size (x*y)"
|
| 45 |
+
MODE = "Image type"
|
| 46 |
+
|
| 47 |
+
TAGS = {
|
| 48 |
+
COMMENT: 0,
|
| 49 |
+
DATE: 0,
|
| 50 |
+
EQUIPMENT: 0,
|
| 51 |
+
FRAMES: 0,
|
| 52 |
+
LUT: 0,
|
| 53 |
+
NAME: 0,
|
| 54 |
+
SCALE: 0,
|
| 55 |
+
SIZE: 0,
|
| 56 |
+
MODE: 0,
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
OPEN = {
|
| 60 |
+
# ifunc93/p3cfunc formats
|
| 61 |
+
"0 1 image": ("1", "1"),
|
| 62 |
+
"L 1 image": ("1", "1"),
|
| 63 |
+
"Greyscale image": ("L", "L"),
|
| 64 |
+
"Grayscale image": ("L", "L"),
|
| 65 |
+
"RGB image": ("RGB", "RGB;L"),
|
| 66 |
+
"RLB image": ("RGB", "RLB"),
|
| 67 |
+
"RYB image": ("RGB", "RLB"),
|
| 68 |
+
"B1 image": ("1", "1"),
|
| 69 |
+
"B2 image": ("P", "P;2"),
|
| 70 |
+
"B4 image": ("P", "P;4"),
|
| 71 |
+
"X 24 image": ("RGB", "RGB"),
|
| 72 |
+
"L 32 S image": ("I", "I;32"),
|
| 73 |
+
"L 32 F image": ("F", "F;32"),
|
| 74 |
+
# old p3cfunc formats
|
| 75 |
+
"RGB3 image": ("RGB", "RGB;T"),
|
| 76 |
+
"RYB3 image": ("RGB", "RYB;T"),
|
| 77 |
+
# extensions
|
| 78 |
+
"LA image": ("LA", "LA;L"),
|
| 79 |
+
"PA image": ("LA", "PA;L"),
|
| 80 |
+
"RGBA image": ("RGBA", "RGBA;L"),
|
| 81 |
+
"RGBX image": ("RGBX", "RGBX;L"),
|
| 82 |
+
"CMYK image": ("CMYK", "CMYK;L"),
|
| 83 |
+
"YCC image": ("YCbCr", "YCbCr;L"),
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
# ifunc95 extensions
|
| 87 |
+
for i in ["8", "8S", "16", "16S", "32", "32F"]:
|
| 88 |
+
OPEN[f"L {i} image"] = ("F", f"F;{i}")
|
| 89 |
+
OPEN[f"L*{i} image"] = ("F", f"F;{i}")
|
| 90 |
+
for i in ["16", "16L", "16B"]:
|
| 91 |
+
OPEN[f"L {i} image"] = (f"I;{i}", f"I;{i}")
|
| 92 |
+
OPEN[f"L*{i} image"] = (f"I;{i}", f"I;{i}")
|
| 93 |
+
for i in ["32S"]:
|
| 94 |
+
OPEN[f"L {i} image"] = ("I", f"I;{i}")
|
| 95 |
+
OPEN[f"L*{i} image"] = ("I", f"I;{i}")
|
| 96 |
+
for i in range(2, 33):
|
| 97 |
+
OPEN[f"L*{i} image"] = ("F", f"F;{i}")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# --------------------------------------------------------------------
|
| 101 |
+
# Read IM directory
|
| 102 |
+
|
| 103 |
+
split = re.compile(rb"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$")
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def number(s):
|
| 107 |
+
try:
|
| 108 |
+
return int(s)
|
| 109 |
+
except ValueError:
|
| 110 |
+
return float(s)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
##
|
| 114 |
+
# Image plugin for the IFUNC IM file format.
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class ImImageFile(ImageFile.ImageFile):
|
| 118 |
+
|
| 119 |
+
format = "IM"
|
| 120 |
+
format_description = "IFUNC Image Memory"
|
| 121 |
+
_close_exclusive_fp_after_loading = False
|
| 122 |
+
|
| 123 |
+
def _open(self):
|
| 124 |
+
|
| 125 |
+
# Quick rejection: if there's not an LF among the first
|
| 126 |
+
# 100 bytes, this is (probably) not a text header.
|
| 127 |
+
|
| 128 |
+
if b"\n" not in self.fp.read(100):
|
| 129 |
+
raise SyntaxError("not an IM file")
|
| 130 |
+
self.fp.seek(0)
|
| 131 |
+
|
| 132 |
+
n = 0
|
| 133 |
+
|
| 134 |
+
# Default values
|
| 135 |
+
self.info[MODE] = "L"
|
| 136 |
+
self.info[SIZE] = (512, 512)
|
| 137 |
+
self.info[FRAMES] = 1
|
| 138 |
+
|
| 139 |
+
self.rawmode = "L"
|
| 140 |
+
|
| 141 |
+
while True:
|
| 142 |
+
|
| 143 |
+
s = self.fp.read(1)
|
| 144 |
+
|
| 145 |
+
# Some versions of IFUNC uses \n\r instead of \r\n...
|
| 146 |
+
if s == b"\r":
|
| 147 |
+
continue
|
| 148 |
+
|
| 149 |
+
if not s or s == b"\0" or s == b"\x1A":
|
| 150 |
+
break
|
| 151 |
+
|
| 152 |
+
# FIXME: this may read whole file if not a text file
|
| 153 |
+
s = s + self.fp.readline()
|
| 154 |
+
|
| 155 |
+
if len(s) > 100:
|
| 156 |
+
raise SyntaxError("not an IM file")
|
| 157 |
+
|
| 158 |
+
if s[-2:] == b"\r\n":
|
| 159 |
+
s = s[:-2]
|
| 160 |
+
elif s[-1:] == b"\n":
|
| 161 |
+
s = s[:-1]
|
| 162 |
+
|
| 163 |
+
try:
|
| 164 |
+
m = split.match(s)
|
| 165 |
+
except re.error as e:
|
| 166 |
+
raise SyntaxError("not an IM file") from e
|
| 167 |
+
|
| 168 |
+
if m:
|
| 169 |
+
|
| 170 |
+
k, v = m.group(1, 2)
|
| 171 |
+
|
| 172 |
+
# Don't know if this is the correct encoding,
|
| 173 |
+
# but a decent guess (I guess)
|
| 174 |
+
k = k.decode("latin-1", "replace")
|
| 175 |
+
v = v.decode("latin-1", "replace")
|
| 176 |
+
|
| 177 |
+
# Convert value as appropriate
|
| 178 |
+
if k in [FRAMES, SCALE, SIZE]:
|
| 179 |
+
v = v.replace("*", ",")
|
| 180 |
+
v = tuple(map(number, v.split(",")))
|
| 181 |
+
if len(v) == 1:
|
| 182 |
+
v = v[0]
|
| 183 |
+
elif k == MODE and v in OPEN:
|
| 184 |
+
v, self.rawmode = OPEN[v]
|
| 185 |
+
|
| 186 |
+
# Add to dictionary. Note that COMMENT tags are
|
| 187 |
+
# combined into a list of strings.
|
| 188 |
+
if k == COMMENT:
|
| 189 |
+
if k in self.info:
|
| 190 |
+
self.info[k].append(v)
|
| 191 |
+
else:
|
| 192 |
+
self.info[k] = [v]
|
| 193 |
+
else:
|
| 194 |
+
self.info[k] = v
|
| 195 |
+
|
| 196 |
+
if k in TAGS:
|
| 197 |
+
n += 1
|
| 198 |
+
|
| 199 |
+
else:
|
| 200 |
+
|
| 201 |
+
raise SyntaxError(
|
| 202 |
+
"Syntax error in IM header: " + s.decode("ascii", "replace")
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
if not n:
|
| 206 |
+
raise SyntaxError("Not an IM file")
|
| 207 |
+
|
| 208 |
+
# Basic attributes
|
| 209 |
+
self._size = self.info[SIZE]
|
| 210 |
+
self.mode = self.info[MODE]
|
| 211 |
+
|
| 212 |
+
# Skip forward to start of image data
|
| 213 |
+
while s and s[0:1] != b"\x1A":
|
| 214 |
+
s = self.fp.read(1)
|
| 215 |
+
if not s:
|
| 216 |
+
raise SyntaxError("File truncated")
|
| 217 |
+
|
| 218 |
+
if LUT in self.info:
|
| 219 |
+
# convert lookup table to palette or lut attribute
|
| 220 |
+
palette = self.fp.read(768)
|
| 221 |
+
greyscale = 1 # greyscale palette
|
| 222 |
+
linear = 1 # linear greyscale palette
|
| 223 |
+
for i in range(256):
|
| 224 |
+
if palette[i] == palette[i + 256] == palette[i + 512]:
|
| 225 |
+
if palette[i] != i:
|
| 226 |
+
linear = 0
|
| 227 |
+
else:
|
| 228 |
+
greyscale = 0
|
| 229 |
+
if self.mode in ["L", "LA", "P", "PA"]:
|
| 230 |
+
if greyscale:
|
| 231 |
+
if not linear:
|
| 232 |
+
self.lut = list(palette[:256])
|
| 233 |
+
else:
|
| 234 |
+
if self.mode in ["L", "P"]:
|
| 235 |
+
self.mode = self.rawmode = "P"
|
| 236 |
+
elif self.mode in ["LA", "PA"]:
|
| 237 |
+
self.mode = "PA"
|
| 238 |
+
self.rawmode = "PA;L"
|
| 239 |
+
self.palette = ImagePalette.raw("RGB;L", palette)
|
| 240 |
+
elif self.mode == "RGB":
|
| 241 |
+
if not greyscale or not linear:
|
| 242 |
+
self.lut = list(palette)
|
| 243 |
+
|
| 244 |
+
self.frame = 0
|
| 245 |
+
|
| 246 |
+
self.__offset = offs = self.fp.tell()
|
| 247 |
+
|
| 248 |
+
self.__fp = self.fp # FIXME: hack
|
| 249 |
+
|
| 250 |
+
if self.rawmode[:2] == "F;":
|
| 251 |
+
|
| 252 |
+
# ifunc95 formats
|
| 253 |
+
try:
|
| 254 |
+
# use bit decoder (if necessary)
|
| 255 |
+
bits = int(self.rawmode[2:])
|
| 256 |
+
if bits not in [8, 16, 32]:
|
| 257 |
+
self.tile = [("bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1))]
|
| 258 |
+
return
|
| 259 |
+
except ValueError:
|
| 260 |
+
pass
|
| 261 |
+
|
| 262 |
+
if self.rawmode in ["RGB;T", "RYB;T"]:
|
| 263 |
+
# Old LabEye/3PC files. Would be very surprised if anyone
|
| 264 |
+
# ever stumbled upon such a file ;-)
|
| 265 |
+
size = self.size[0] * self.size[1]
|
| 266 |
+
self.tile = [
|
| 267 |
+
("raw", (0, 0) + self.size, offs, ("G", 0, -1)),
|
| 268 |
+
("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)),
|
| 269 |
+
("raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)),
|
| 270 |
+
]
|
| 271 |
+
else:
|
| 272 |
+
# LabEye/IFUNC files
|
| 273 |
+
self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))]
|
| 274 |
+
|
| 275 |
+
@property
|
| 276 |
+
def n_frames(self):
|
| 277 |
+
return self.info[FRAMES]
|
| 278 |
+
|
| 279 |
+
@property
|
| 280 |
+
def is_animated(self):
|
| 281 |
+
return self.info[FRAMES] > 1
|
| 282 |
+
|
| 283 |
+
def seek(self, frame):
|
| 284 |
+
if not self._seek_check(frame):
|
| 285 |
+
return
|
| 286 |
+
|
| 287 |
+
self.frame = frame
|
| 288 |
+
|
| 289 |
+
if self.mode == "1":
|
| 290 |
+
bits = 1
|
| 291 |
+
else:
|
| 292 |
+
bits = 8 * len(self.mode)
|
| 293 |
+
|
| 294 |
+
size = ((self.size[0] * bits + 7) // 8) * self.size[1]
|
| 295 |
+
offs = self.__offset + frame * size
|
| 296 |
+
|
| 297 |
+
self.fp = self.__fp
|
| 298 |
+
|
| 299 |
+
self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))]
|
| 300 |
+
|
| 301 |
+
def tell(self):
|
| 302 |
+
return self.frame
|
| 303 |
+
|
| 304 |
+
def _close__fp(self):
|
| 305 |
+
try:
|
| 306 |
+
if self.__fp != self.fp:
|
| 307 |
+
self.__fp.close()
|
| 308 |
+
except AttributeError:
|
| 309 |
+
pass
|
| 310 |
+
finally:
|
| 311 |
+
self.__fp = None
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
#
|
| 315 |
+
# --------------------------------------------------------------------
|
| 316 |
+
# Save IM files
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
SAVE = {
|
| 320 |
+
# mode: (im type, raw mode)
|
| 321 |
+
"1": ("0 1", "1"),
|
| 322 |
+
"L": ("Greyscale", "L"),
|
| 323 |
+
"LA": ("LA", "LA;L"),
|
| 324 |
+
"P": ("Greyscale", "P"),
|
| 325 |
+
"PA": ("LA", "PA;L"),
|
| 326 |
+
"I": ("L 32S", "I;32S"),
|
| 327 |
+
"I;16": ("L 16", "I;16"),
|
| 328 |
+
"I;16L": ("L 16L", "I;16L"),
|
| 329 |
+
"I;16B": ("L 16B", "I;16B"),
|
| 330 |
+
"F": ("L 32F", "F;32F"),
|
| 331 |
+
"RGB": ("RGB", "RGB;L"),
|
| 332 |
+
"RGBA": ("RGBA", "RGBA;L"),
|
| 333 |
+
"RGBX": ("RGBX", "RGBX;L"),
|
| 334 |
+
"CMYK": ("CMYK", "CMYK;L"),
|
| 335 |
+
"YCbCr": ("YCC", "YCbCr;L"),
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def _save(im, fp, filename):
|
| 340 |
+
|
| 341 |
+
try:
|
| 342 |
+
image_type, rawmode = SAVE[im.mode]
|
| 343 |
+
except KeyError as e:
|
| 344 |
+
raise ValueError(f"Cannot save {im.mode} images as IM") from e
|
| 345 |
+
|
| 346 |
+
frames = im.encoderinfo.get("frames", 1)
|
| 347 |
+
|
| 348 |
+
fp.write(f"Image type: {image_type} image\r\n".encode("ascii"))
|
| 349 |
+
if filename:
|
| 350 |
+
# Each line must be 100 characters or less,
|
| 351 |
+
# or: SyntaxError("not an IM file")
|
| 352 |
+
# 8 characters are used for "Name: " and "\r\n"
|
| 353 |
+
# Keep just the filename, ditch the potentially overlong path
|
| 354 |
+
name, ext = os.path.splitext(os.path.basename(filename))
|
| 355 |
+
name = "".join([name[: 92 - len(ext)], ext])
|
| 356 |
+
|
| 357 |
+
fp.write(f"Name: {name}\r\n".encode("ascii"))
|
| 358 |
+
fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode("ascii"))
|
| 359 |
+
fp.write(f"File size (no of images): {frames}\r\n".encode("ascii"))
|
| 360 |
+
if im.mode in ["P", "PA"]:
|
| 361 |
+
fp.write(b"Lut: 1\r\n")
|
| 362 |
+
fp.write(b"\000" * (511 - fp.tell()) + b"\032")
|
| 363 |
+
if im.mode in ["P", "PA"]:
|
| 364 |
+
fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes
|
| 365 |
+
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))])
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
#
|
| 369 |
+
# --------------------------------------------------------------------
|
| 370 |
+
# Registry
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
Image.register_open(ImImageFile.format, ImImageFile)
|
| 374 |
+
Image.register_save(ImImageFile.format, _save)
|
| 375 |
+
|
| 376 |
+
Image.register_extension(ImImageFile.format, ".im")
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageCms.py
ADDED
|
@@ -0,0 +1,1029 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# The Python Imaging Library.
|
| 2 |
+
# $Id$
|
| 3 |
+
|
| 4 |
+
# Optional color management support, based on Kevin Cazabon's PyCMS
|
| 5 |
+
# library.
|
| 6 |
+
|
| 7 |
+
# History:
|
| 8 |
+
|
| 9 |
+
# 2009-03-08 fl Added to PIL.
|
| 10 |
+
|
| 11 |
+
# Copyright (C) 2002-2003 Kevin Cazabon
|
| 12 |
+
# Copyright (c) 2009 by Fredrik Lundh
|
| 13 |
+
# Copyright (c) 2013 by Eric Soroos
|
| 14 |
+
|
| 15 |
+
# See the README file for information on usage and redistribution. See
|
| 16 |
+
# below for the original description.
|
| 17 |
+
|
| 18 |
+
import sys
|
| 19 |
+
import warnings
|
| 20 |
+
from enum import IntEnum
|
| 21 |
+
|
| 22 |
+
from PIL import Image
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
from PIL import _imagingcms
|
| 26 |
+
except ImportError as ex:
|
| 27 |
+
# Allow error import for doc purposes, but error out when accessing
|
| 28 |
+
# anything in core.
|
| 29 |
+
from ._util import deferred_error
|
| 30 |
+
|
| 31 |
+
_imagingcms = deferred_error(ex)
|
| 32 |
+
|
| 33 |
+
DESCRIPTION = """
|
| 34 |
+
pyCMS
|
| 35 |
+
|
| 36 |
+
a Python / PIL interface to the littleCMS ICC Color Management System
|
| 37 |
+
Copyright (C) 2002-2003 Kevin Cazabon
|
| 38 |
+
kevin@cazabon.com
|
| 39 |
+
https://www.cazabon.com
|
| 40 |
+
|
| 41 |
+
pyCMS home page: https://www.cazabon.com/pyCMS
|
| 42 |
+
littleCMS home page: https://www.littlecms.com
|
| 43 |
+
(littleCMS is Copyright (C) 1998-2001 Marti Maria)
|
| 44 |
+
|
| 45 |
+
Originally released under LGPL. Graciously donated to PIL in
|
| 46 |
+
March 2009, for distribution under the standard PIL license
|
| 47 |
+
|
| 48 |
+
The pyCMS.py module provides a "clean" interface between Python/PIL and
|
| 49 |
+
pyCMSdll, taking care of some of the more complex handling of the direct
|
| 50 |
+
pyCMSdll functions, as well as error-checking and making sure that all
|
| 51 |
+
relevant data is kept together.
|
| 52 |
+
|
| 53 |
+
While it is possible to call pyCMSdll functions directly, it's not highly
|
| 54 |
+
recommended.
|
| 55 |
+
|
| 56 |
+
Version History:
|
| 57 |
+
|
| 58 |
+
1.0.0 pil Oct 2013 Port to LCMS 2.
|
| 59 |
+
|
| 60 |
+
0.1.0 pil mod March 10, 2009
|
| 61 |
+
|
| 62 |
+
Renamed display profile to proof profile. The proof
|
| 63 |
+
profile is the profile of the device that is being
|
| 64 |
+
simulated, not the profile of the device which is
|
| 65 |
+
actually used to display/print the final simulation
|
| 66 |
+
(that'd be the output profile) - also see LCMSAPI.txt
|
| 67 |
+
input colorspace -> using 'renderingIntent' -> proof
|
| 68 |
+
colorspace -> using 'proofRenderingIntent' -> output
|
| 69 |
+
colorspace
|
| 70 |
+
|
| 71 |
+
Added LCMS FLAGS support.
|
| 72 |
+
Added FLAGS["SOFTPROOFING"] as default flag for
|
| 73 |
+
buildProofTransform (otherwise the proof profile/intent
|
| 74 |
+
would be ignored).
|
| 75 |
+
|
| 76 |
+
0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
|
| 77 |
+
|
| 78 |
+
0.0.2 alpha Jan 6, 2002
|
| 79 |
+
|
| 80 |
+
Added try/except statements around type() checks of
|
| 81 |
+
potential CObjects... Python won't let you use type()
|
| 82 |
+
on them, and raises a TypeError (stupid, if you ask
|
| 83 |
+
me!)
|
| 84 |
+
|
| 85 |
+
Added buildProofTransformFromOpenProfiles() function.
|
| 86 |
+
Additional fixes in DLL, see DLL code for details.
|
| 87 |
+
|
| 88 |
+
0.0.1 alpha first public release, Dec. 26, 2002
|
| 89 |
+
|
| 90 |
+
Known to-do list with current version (of Python interface, not pyCMSdll):
|
| 91 |
+
|
| 92 |
+
none
|
| 93 |
+
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
VERSION = "1.0.0 pil"
|
| 97 |
+
|
| 98 |
+
# --------------------------------------------------------------------.
|
| 99 |
+
|
| 100 |
+
core = _imagingcms
|
| 101 |
+
|
| 102 |
+
#
|
| 103 |
+
# intent/direction values
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class Intent(IntEnum):
|
| 107 |
+
PERCEPTUAL = 0
|
| 108 |
+
RELATIVE_COLORIMETRIC = 1
|
| 109 |
+
SATURATION = 2
|
| 110 |
+
ABSOLUTE_COLORIMETRIC = 3
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class Direction(IntEnum):
|
| 114 |
+
INPUT = 0
|
| 115 |
+
OUTPUT = 1
|
| 116 |
+
PROOF = 2
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def __getattr__(name):
|
| 120 |
+
deprecated = "deprecated and will be removed in Pillow 10 (2023-07-01). "
|
| 121 |
+
for enum, prefix in {Intent: "INTENT_", Direction: "DIRECTION_"}.items():
|
| 122 |
+
if name.startswith(prefix):
|
| 123 |
+
name = name[len(prefix) :]
|
| 124 |
+
if name in enum.__members__:
|
| 125 |
+
warnings.warn(
|
| 126 |
+
prefix
|
| 127 |
+
+ name
|
| 128 |
+
+ " is "
|
| 129 |
+
+ deprecated
|
| 130 |
+
+ "Use "
|
| 131 |
+
+ enum.__name__
|
| 132 |
+
+ "."
|
| 133 |
+
+ name
|
| 134 |
+
+ " instead.",
|
| 135 |
+
DeprecationWarning,
|
| 136 |
+
stacklevel=2,
|
| 137 |
+
)
|
| 138 |
+
return enum[name]
|
| 139 |
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
#
|
| 143 |
+
# flags
|
| 144 |
+
|
| 145 |
+
FLAGS = {
|
| 146 |
+
"MATRIXINPUT": 1,
|
| 147 |
+
"MATRIXOUTPUT": 2,
|
| 148 |
+
"MATRIXONLY": (1 | 2),
|
| 149 |
+
"NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
|
| 150 |
+
# Don't create prelinearization tables on precalculated transforms
|
| 151 |
+
# (internal use):
|
| 152 |
+
"NOPRELINEARIZATION": 16,
|
| 153 |
+
"GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
|
| 154 |
+
"NOTCACHE": 64, # Inhibit 1-pixel cache
|
| 155 |
+
"NOTPRECALC": 256,
|
| 156 |
+
"NULLTRANSFORM": 512, # Don't transform anyway
|
| 157 |
+
"HIGHRESPRECALC": 1024, # Use more memory to give better accuracy
|
| 158 |
+
"LOWRESPRECALC": 2048, # Use less memory to minimize resources
|
| 159 |
+
"WHITEBLACKCOMPENSATION": 8192,
|
| 160 |
+
"BLACKPOINTCOMPENSATION": 8192,
|
| 161 |
+
"GAMUTCHECK": 4096, # Out of Gamut alarm
|
| 162 |
+
"SOFTPROOFING": 16384, # Do softproofing
|
| 163 |
+
"PRESERVEBLACK": 32768, # Black preservation
|
| 164 |
+
"NODEFAULTRESOURCEDEF": 16777216, # CRD special
|
| 165 |
+
"GRIDPOINTS": lambda n: ((n) & 0xFF) << 16, # Gridpoints
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
_MAX_FLAG = 0
|
| 169 |
+
for flag in FLAGS.values():
|
| 170 |
+
if isinstance(flag, int):
|
| 171 |
+
_MAX_FLAG = _MAX_FLAG | flag
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
# --------------------------------------------------------------------.
|
| 175 |
+
# Experimental PIL-level API
|
| 176 |
+
# --------------------------------------------------------------------.
|
| 177 |
+
|
| 178 |
+
##
|
| 179 |
+
# Profile.
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class ImageCmsProfile:
|
| 183 |
+
def __init__(self, profile):
|
| 184 |
+
"""
|
| 185 |
+
:param profile: Either a string representing a filename,
|
| 186 |
+
a file like object containing a profile or a
|
| 187 |
+
low-level profile object
|
| 188 |
+
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
if isinstance(profile, str):
|
| 192 |
+
if sys.platform == "win32":
|
| 193 |
+
profile_bytes_path = profile.encode()
|
| 194 |
+
try:
|
| 195 |
+
profile_bytes_path.decode("ascii")
|
| 196 |
+
except UnicodeDecodeError:
|
| 197 |
+
with open(profile, "rb") as f:
|
| 198 |
+
self._set(core.profile_frombytes(f.read()))
|
| 199 |
+
return
|
| 200 |
+
self._set(core.profile_open(profile), profile)
|
| 201 |
+
elif hasattr(profile, "read"):
|
| 202 |
+
self._set(core.profile_frombytes(profile.read()))
|
| 203 |
+
elif isinstance(profile, _imagingcms.CmsProfile):
|
| 204 |
+
self._set(profile)
|
| 205 |
+
else:
|
| 206 |
+
raise TypeError("Invalid type for Profile")
|
| 207 |
+
|
| 208 |
+
def _set(self, profile, filename=None):
|
| 209 |
+
self.profile = profile
|
| 210 |
+
self.filename = filename
|
| 211 |
+
if profile:
|
| 212 |
+
self.product_name = None # profile.product_name
|
| 213 |
+
self.product_info = None # profile.product_info
|
| 214 |
+
else:
|
| 215 |
+
self.product_name = None
|
| 216 |
+
self.product_info = None
|
| 217 |
+
|
| 218 |
+
def tobytes(self):
|
| 219 |
+
"""
|
| 220 |
+
Returns the profile in a format suitable for embedding in
|
| 221 |
+
saved images.
|
| 222 |
+
|
| 223 |
+
:returns: a bytes object containing the ICC profile.
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
return core.profile_tobytes(self.profile)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
class ImageCmsTransform(Image.ImagePointHandler):
|
| 230 |
+
|
| 231 |
+
"""
|
| 232 |
+
Transform. This can be used with the procedural API, or with the standard
|
| 233 |
+
:py:func:`~PIL.Image.Image.point` method.
|
| 234 |
+
|
| 235 |
+
Will return the output profile in the ``output.info['icc_profile']``.
|
| 236 |
+
"""
|
| 237 |
+
|
| 238 |
+
def __init__(
|
| 239 |
+
self,
|
| 240 |
+
input,
|
| 241 |
+
output,
|
| 242 |
+
input_mode,
|
| 243 |
+
output_mode,
|
| 244 |
+
intent=Intent.PERCEPTUAL,
|
| 245 |
+
proof=None,
|
| 246 |
+
proof_intent=Intent.ABSOLUTE_COLORIMETRIC,
|
| 247 |
+
flags=0,
|
| 248 |
+
):
|
| 249 |
+
if proof is None:
|
| 250 |
+
self.transform = core.buildTransform(
|
| 251 |
+
input.profile, output.profile, input_mode, output_mode, intent, flags
|
| 252 |
+
)
|
| 253 |
+
else:
|
| 254 |
+
self.transform = core.buildProofTransform(
|
| 255 |
+
input.profile,
|
| 256 |
+
output.profile,
|
| 257 |
+
proof.profile,
|
| 258 |
+
input_mode,
|
| 259 |
+
output_mode,
|
| 260 |
+
intent,
|
| 261 |
+
proof_intent,
|
| 262 |
+
flags,
|
| 263 |
+
)
|
| 264 |
+
# Note: inputMode and outputMode are for pyCMS compatibility only
|
| 265 |
+
self.input_mode = self.inputMode = input_mode
|
| 266 |
+
self.output_mode = self.outputMode = output_mode
|
| 267 |
+
|
| 268 |
+
self.output_profile = output
|
| 269 |
+
|
| 270 |
+
def point(self, im):
|
| 271 |
+
return self.apply(im)
|
| 272 |
+
|
| 273 |
+
def apply(self, im, imOut=None):
|
| 274 |
+
im.load()
|
| 275 |
+
if imOut is None:
|
| 276 |
+
imOut = Image.new(self.output_mode, im.size, None)
|
| 277 |
+
self.transform.apply(im.im.id, imOut.im.id)
|
| 278 |
+
imOut.info["icc_profile"] = self.output_profile.tobytes()
|
| 279 |
+
return imOut
|
| 280 |
+
|
| 281 |
+
def apply_in_place(self, im):
|
| 282 |
+
im.load()
|
| 283 |
+
if im.mode != self.output_mode:
|
| 284 |
+
raise ValueError("mode mismatch") # wrong output mode
|
| 285 |
+
self.transform.apply(im.im.id, im.im.id)
|
| 286 |
+
im.info["icc_profile"] = self.output_profile.tobytes()
|
| 287 |
+
return im
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def get_display_profile(handle=None):
|
| 291 |
+
"""
|
| 292 |
+
(experimental) Fetches the profile for the current display device.
|
| 293 |
+
|
| 294 |
+
:returns: ``None`` if the profile is not known.
|
| 295 |
+
"""
|
| 296 |
+
|
| 297 |
+
if sys.platform != "win32":
|
| 298 |
+
return None
|
| 299 |
+
|
| 300 |
+
from PIL import ImageWin
|
| 301 |
+
|
| 302 |
+
if isinstance(handle, ImageWin.HDC):
|
| 303 |
+
profile = core.get_display_profile_win32(handle, 1)
|
| 304 |
+
else:
|
| 305 |
+
profile = core.get_display_profile_win32(handle or 0)
|
| 306 |
+
if profile is None:
|
| 307 |
+
return None
|
| 308 |
+
return ImageCmsProfile(profile)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
# --------------------------------------------------------------------.
|
| 312 |
+
# pyCMS compatible layer
|
| 313 |
+
# --------------------------------------------------------------------.
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
class PyCMSError(Exception):
|
| 317 |
+
|
| 318 |
+
"""(pyCMS) Exception class.
|
| 319 |
+
This is used for all errors in the pyCMS API."""
|
| 320 |
+
|
| 321 |
+
pass
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def profileToProfile(
|
| 325 |
+
im,
|
| 326 |
+
inputProfile,
|
| 327 |
+
outputProfile,
|
| 328 |
+
renderingIntent=Intent.PERCEPTUAL,
|
| 329 |
+
outputMode=None,
|
| 330 |
+
inPlace=False,
|
| 331 |
+
flags=0,
|
| 332 |
+
):
|
| 333 |
+
"""
|
| 334 |
+
(pyCMS) Applies an ICC transformation to a given image, mapping from
|
| 335 |
+
``inputProfile`` to ``outputProfile``.
|
| 336 |
+
|
| 337 |
+
If the input or output profiles specified are not valid filenames, a
|
| 338 |
+
:exc:`PyCMSError` will be raised. If ``inPlace`` is ``True`` and
|
| 339 |
+
``outputMode != im.mode``, a :exc:`PyCMSError` will be raised.
|
| 340 |
+
If an error occurs during application of the profiles,
|
| 341 |
+
a :exc:`PyCMSError` will be raised.
|
| 342 |
+
If ``outputMode`` is not a mode supported by the ``outputProfile`` (or by pyCMS),
|
| 343 |
+
a :exc:`PyCMSError` will be raised.
|
| 344 |
+
|
| 345 |
+
This function applies an ICC transformation to im from ``inputProfile``'s
|
| 346 |
+
color space to ``outputProfile``'s color space using the specified rendering
|
| 347 |
+
intent to decide how to handle out-of-gamut colors.
|
| 348 |
+
|
| 349 |
+
``outputMode`` can be used to specify that a color mode conversion is to
|
| 350 |
+
be done using these profiles, but the specified profiles must be able
|
| 351 |
+
to handle that mode. I.e., if converting im from RGB to CMYK using
|
| 352 |
+
profiles, the input profile must handle RGB data, and the output
|
| 353 |
+
profile must handle CMYK data.
|
| 354 |
+
|
| 355 |
+
:param im: An open :py:class:`~PIL.Image.Image` object (i.e. Image.new(...)
|
| 356 |
+
or Image.open(...), etc.)
|
| 357 |
+
:param inputProfile: String, as a valid filename path to the ICC input
|
| 358 |
+
profile you wish to use for this image, or a profile object
|
| 359 |
+
:param outputProfile: String, as a valid filename path to the ICC output
|
| 360 |
+
profile you wish to use for this image, or a profile object
|
| 361 |
+
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
| 362 |
+
wish to use for the transform
|
| 363 |
+
|
| 364 |
+
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
| 365 |
+
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
| 366 |
+
ImageCms.Intent.SATURATION = 2
|
| 367 |
+
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
| 368 |
+
|
| 369 |
+
see the pyCMS documentation for details on rendering intents and what
|
| 370 |
+
they do.
|
| 371 |
+
:param outputMode: A valid PIL mode for the output image (i.e. "RGB",
|
| 372 |
+
"CMYK", etc.). Note: if rendering the image "inPlace", outputMode
|
| 373 |
+
MUST be the same mode as the input, or omitted completely. If
|
| 374 |
+
omitted, the outputMode will be the same as the mode of the input
|
| 375 |
+
image (im.mode)
|
| 376 |
+
:param inPlace: Boolean. If ``True``, the original image is modified in-place,
|
| 377 |
+
and ``None`` is returned. If ``False`` (default), a new
|
| 378 |
+
:py:class:`~PIL.Image.Image` object is returned with the transform applied.
|
| 379 |
+
:param flags: Integer (0-...) specifying additional flags
|
| 380 |
+
:returns: Either None or a new :py:class:`~PIL.Image.Image` object, depending on
|
| 381 |
+
the value of ``inPlace``
|
| 382 |
+
:exception PyCMSError:
|
| 383 |
+
"""
|
| 384 |
+
|
| 385 |
+
if outputMode is None:
|
| 386 |
+
outputMode = im.mode
|
| 387 |
+
|
| 388 |
+
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
|
| 389 |
+
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
|
| 390 |
+
|
| 391 |
+
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
|
| 392 |
+
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
|
| 393 |
+
|
| 394 |
+
try:
|
| 395 |
+
if not isinstance(inputProfile, ImageCmsProfile):
|
| 396 |
+
inputProfile = ImageCmsProfile(inputProfile)
|
| 397 |
+
if not isinstance(outputProfile, ImageCmsProfile):
|
| 398 |
+
outputProfile = ImageCmsProfile(outputProfile)
|
| 399 |
+
transform = ImageCmsTransform(
|
| 400 |
+
inputProfile,
|
| 401 |
+
outputProfile,
|
| 402 |
+
im.mode,
|
| 403 |
+
outputMode,
|
| 404 |
+
renderingIntent,
|
| 405 |
+
flags=flags,
|
| 406 |
+
)
|
| 407 |
+
if inPlace:
|
| 408 |
+
transform.apply_in_place(im)
|
| 409 |
+
imOut = None
|
| 410 |
+
else:
|
| 411 |
+
imOut = transform.apply(im)
|
| 412 |
+
except (OSError, TypeError, ValueError) as v:
|
| 413 |
+
raise PyCMSError(v) from v
|
| 414 |
+
|
| 415 |
+
return imOut
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def getOpenProfile(profileFilename):
|
| 419 |
+
"""
|
| 420 |
+
(pyCMS) Opens an ICC profile file.
|
| 421 |
+
|
| 422 |
+
The PyCMSProfile object can be passed back into pyCMS for use in creating
|
| 423 |
+
transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
|
| 424 |
+
|
| 425 |
+
If ``profileFilename`` is not a valid filename for an ICC profile,
|
| 426 |
+
a :exc:`PyCMSError` will be raised.
|
| 427 |
+
|
| 428 |
+
:param profileFilename: String, as a valid filename path to the ICC profile
|
| 429 |
+
you wish to open, or a file-like object.
|
| 430 |
+
:returns: A CmsProfile class object.
|
| 431 |
+
:exception PyCMSError:
|
| 432 |
+
"""
|
| 433 |
+
|
| 434 |
+
try:
|
| 435 |
+
return ImageCmsProfile(profileFilename)
|
| 436 |
+
except (OSError, TypeError, ValueError) as v:
|
| 437 |
+
raise PyCMSError(v) from v
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def buildTransform(
|
| 441 |
+
inputProfile,
|
| 442 |
+
outputProfile,
|
| 443 |
+
inMode,
|
| 444 |
+
outMode,
|
| 445 |
+
renderingIntent=Intent.PERCEPTUAL,
|
| 446 |
+
flags=0,
|
| 447 |
+
):
|
| 448 |
+
"""
|
| 449 |
+
(pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the
|
| 450 |
+
``outputProfile``. Use applyTransform to apply the transform to a given
|
| 451 |
+
image.
|
| 452 |
+
|
| 453 |
+
If the input or output profiles specified are not valid filenames, a
|
| 454 |
+
:exc:`PyCMSError` will be raised. If an error occurs during creation
|
| 455 |
+
of the transform, a :exc:`PyCMSError` will be raised.
|
| 456 |
+
|
| 457 |
+
If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile``
|
| 458 |
+
(or by pyCMS), a :exc:`PyCMSError` will be raised.
|
| 459 |
+
|
| 460 |
+
This function builds and returns an ICC transform from the ``inputProfile``
|
| 461 |
+
to the ``outputProfile`` using the ``renderingIntent`` to determine what to do
|
| 462 |
+
with out-of-gamut colors. It will ONLY work for converting images that
|
| 463 |
+
are in ``inMode`` to images that are in ``outMode`` color format (PIL mode,
|
| 464 |
+
i.e. "RGB", "RGBA", "CMYK", etc.).
|
| 465 |
+
|
| 466 |
+
Building the transform is a fair part of the overhead in
|
| 467 |
+
ImageCms.profileToProfile(), so if you're planning on converting multiple
|
| 468 |
+
images using the same input/output settings, this can save you time.
|
| 469 |
+
Once you have a transform object, it can be used with
|
| 470 |
+
ImageCms.applyProfile() to convert images without the need to re-compute
|
| 471 |
+
the lookup table for the transform.
|
| 472 |
+
|
| 473 |
+
The reason pyCMS returns a class object rather than a handle directly
|
| 474 |
+
to the transform is that it needs to keep track of the PIL input/output
|
| 475 |
+
modes that the transform is meant for. These attributes are stored in
|
| 476 |
+
the ``inMode`` and ``outMode`` attributes of the object (which can be
|
| 477 |
+
manually overridden if you really want to, but I don't know of any
|
| 478 |
+
time that would be of use, or would even work).
|
| 479 |
+
|
| 480 |
+
:param inputProfile: String, as a valid filename path to the ICC input
|
| 481 |
+
profile you wish to use for this transform, or a profile object
|
| 482 |
+
:param outputProfile: String, as a valid filename path to the ICC output
|
| 483 |
+
profile you wish to use for this transform, or a profile object
|
| 484 |
+
:param inMode: String, as a valid PIL mode that the appropriate profile
|
| 485 |
+
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
|
| 486 |
+
:param outMode: String, as a valid PIL mode that the appropriate profile
|
| 487 |
+
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
|
| 488 |
+
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
| 489 |
+
wish to use for the transform
|
| 490 |
+
|
| 491 |
+
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
| 492 |
+
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
| 493 |
+
ImageCms.Intent.SATURATION = 2
|
| 494 |
+
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
| 495 |
+
|
| 496 |
+
see the pyCMS documentation for details on rendering intents and what
|
| 497 |
+
they do.
|
| 498 |
+
:param flags: Integer (0-...) specifying additional flags
|
| 499 |
+
:returns: A CmsTransform class object.
|
| 500 |
+
:exception PyCMSError:
|
| 501 |
+
"""
|
| 502 |
+
|
| 503 |
+
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
|
| 504 |
+
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
|
| 505 |
+
|
| 506 |
+
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
|
| 507 |
+
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
|
| 508 |
+
|
| 509 |
+
try:
|
| 510 |
+
if not isinstance(inputProfile, ImageCmsProfile):
|
| 511 |
+
inputProfile = ImageCmsProfile(inputProfile)
|
| 512 |
+
if not isinstance(outputProfile, ImageCmsProfile):
|
| 513 |
+
outputProfile = ImageCmsProfile(outputProfile)
|
| 514 |
+
return ImageCmsTransform(
|
| 515 |
+
inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags
|
| 516 |
+
)
|
| 517 |
+
except (OSError, TypeError, ValueError) as v:
|
| 518 |
+
raise PyCMSError(v) from v
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def buildProofTransform(
|
| 522 |
+
inputProfile,
|
| 523 |
+
outputProfile,
|
| 524 |
+
proofProfile,
|
| 525 |
+
inMode,
|
| 526 |
+
outMode,
|
| 527 |
+
renderingIntent=Intent.PERCEPTUAL,
|
| 528 |
+
proofRenderingIntent=Intent.ABSOLUTE_COLORIMETRIC,
|
| 529 |
+
flags=FLAGS["SOFTPROOFING"],
|
| 530 |
+
):
|
| 531 |
+
"""
|
| 532 |
+
(pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the
|
| 533 |
+
``outputProfile``, but tries to simulate the result that would be
|
| 534 |
+
obtained on the ``proofProfile`` device.
|
| 535 |
+
|
| 536 |
+
If the input, output, or proof profiles specified are not valid
|
| 537 |
+
filenames, a :exc:`PyCMSError` will be raised.
|
| 538 |
+
|
| 539 |
+
If an error occurs during creation of the transform,
|
| 540 |
+
a :exc:`PyCMSError` will be raised.
|
| 541 |
+
|
| 542 |
+
If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile``
|
| 543 |
+
(or by pyCMS), a :exc:`PyCMSError` will be raised.
|
| 544 |
+
|
| 545 |
+
This function builds and returns an ICC transform from the ``inputProfile``
|
| 546 |
+
to the ``outputProfile``, but tries to simulate the result that would be
|
| 547 |
+
obtained on the ``proofProfile`` device using ``renderingIntent`` and
|
| 548 |
+
``proofRenderingIntent`` to determine what to do with out-of-gamut
|
| 549 |
+
colors. This is known as "soft-proofing". It will ONLY work for
|
| 550 |
+
converting images that are in ``inMode`` to images that are in outMode
|
| 551 |
+
color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
|
| 552 |
+
|
| 553 |
+
Usage of the resulting transform object is exactly the same as with
|
| 554 |
+
ImageCms.buildTransform().
|
| 555 |
+
|
| 556 |
+
Proof profiling is generally used when using an output device to get a
|
| 557 |
+
good idea of what the final printed/displayed image would look like on
|
| 558 |
+
the ``proofProfile`` device when it's quicker and easier to use the
|
| 559 |
+
output device for judging color. Generally, this means that the
|
| 560 |
+
output device is a monitor, or a dye-sub printer (etc.), and the simulated
|
| 561 |
+
device is something more expensive, complicated, or time consuming
|
| 562 |
+
(making it difficult to make a real print for color judgement purposes).
|
| 563 |
+
|
| 564 |
+
Soft-proofing basically functions by adjusting the colors on the
|
| 565 |
+
output device to match the colors of the device being simulated. However,
|
| 566 |
+
when the simulated device has a much wider gamut than the output
|
| 567 |
+
device, you may obtain marginal results.
|
| 568 |
+
|
| 569 |
+
:param inputProfile: String, as a valid filename path to the ICC input
|
| 570 |
+
profile you wish to use for this transform, or a profile object
|
| 571 |
+
:param outputProfile: String, as a valid filename path to the ICC output
|
| 572 |
+
(monitor, usually) profile you wish to use for this transform, or a
|
| 573 |
+
profile object
|
| 574 |
+
:param proofProfile: String, as a valid filename path to the ICC proof
|
| 575 |
+
profile you wish to use for this transform, or a profile object
|
| 576 |
+
:param inMode: String, as a valid PIL mode that the appropriate profile
|
| 577 |
+
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
|
| 578 |
+
:param outMode: String, as a valid PIL mode that the appropriate profile
|
| 579 |
+
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
|
| 580 |
+
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
| 581 |
+
wish to use for the input->proof (simulated) transform
|
| 582 |
+
|
| 583 |
+
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
| 584 |
+
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
| 585 |
+
ImageCms.Intent.SATURATION = 2
|
| 586 |
+
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
| 587 |
+
|
| 588 |
+
see the pyCMS documentation for details on rendering intents and what
|
| 589 |
+
they do.
|
| 590 |
+
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent
|
| 591 |
+
you wish to use for proof->output transform
|
| 592 |
+
|
| 593 |
+
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
| 594 |
+
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
| 595 |
+
ImageCms.Intent.SATURATION = 2
|
| 596 |
+
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
| 597 |
+
|
| 598 |
+
see the pyCMS documentation for details on rendering intents and what
|
| 599 |
+
they do.
|
| 600 |
+
:param flags: Integer (0-...) specifying additional flags
|
| 601 |
+
:returns: A CmsTransform class object.
|
| 602 |
+
:exception PyCMSError:
|
| 603 |
+
"""
|
| 604 |
+
|
| 605 |
+
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
|
| 606 |
+
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
|
| 607 |
+
|
| 608 |
+
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
|
| 609 |
+
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
|
| 610 |
+
|
| 611 |
+
try:
|
| 612 |
+
if not isinstance(inputProfile, ImageCmsProfile):
|
| 613 |
+
inputProfile = ImageCmsProfile(inputProfile)
|
| 614 |
+
if not isinstance(outputProfile, ImageCmsProfile):
|
| 615 |
+
outputProfile = ImageCmsProfile(outputProfile)
|
| 616 |
+
if not isinstance(proofProfile, ImageCmsProfile):
|
| 617 |
+
proofProfile = ImageCmsProfile(proofProfile)
|
| 618 |
+
return ImageCmsTransform(
|
| 619 |
+
inputProfile,
|
| 620 |
+
outputProfile,
|
| 621 |
+
inMode,
|
| 622 |
+
outMode,
|
| 623 |
+
renderingIntent,
|
| 624 |
+
proofProfile,
|
| 625 |
+
proofRenderingIntent,
|
| 626 |
+
flags,
|
| 627 |
+
)
|
| 628 |
+
except (OSError, TypeError, ValueError) as v:
|
| 629 |
+
raise PyCMSError(v) from v
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
buildTransformFromOpenProfiles = buildTransform
|
| 633 |
+
buildProofTransformFromOpenProfiles = buildProofTransform
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def applyTransform(im, transform, inPlace=False):
|
| 637 |
+
"""
|
| 638 |
+
(pyCMS) Applies a transform to a given image.
|
| 639 |
+
|
| 640 |
+
If ``im.mode != transform.inMode``, a :exc:`PyCMSError` is raised.
|
| 641 |
+
|
| 642 |
+
If ``inPlace`` is ``True`` and ``transform.inMode != transform.outMode``, a
|
| 643 |
+
:exc:`PyCMSError` is raised.
|
| 644 |
+
|
| 645 |
+
If ``im.mode``, ``transform.inMode`` or ``transform.outMode`` is not
|
| 646 |
+
supported by pyCMSdll or the profiles you used for the transform, a
|
| 647 |
+
:exc:`PyCMSError` is raised.
|
| 648 |
+
|
| 649 |
+
If an error occurs while the transform is being applied,
|
| 650 |
+
a :exc:`PyCMSError` is raised.
|
| 651 |
+
|
| 652 |
+
This function applies a pre-calculated transform (from
|
| 653 |
+
ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles())
|
| 654 |
+
to an image. The transform can be used for multiple images, saving
|
| 655 |
+
considerable calculation time if doing the same conversion multiple times.
|
| 656 |
+
|
| 657 |
+
If you want to modify im in-place instead of receiving a new image as
|
| 658 |
+
the return value, set ``inPlace`` to ``True``. This can only be done if
|
| 659 |
+
``transform.inMode`` and ``transform.outMode`` are the same, because we can't
|
| 660 |
+
change the mode in-place (the buffer sizes for some modes are
|
| 661 |
+
different). The default behavior is to return a new :py:class:`~PIL.Image.Image`
|
| 662 |
+
object of the same dimensions in mode ``transform.outMode``.
|
| 663 |
+
|
| 664 |
+
:param im: An :py:class:`~PIL.Image.Image` object, and im.mode must be the same
|
| 665 |
+
as the ``inMode`` supported by the transform.
|
| 666 |
+
:param transform: A valid CmsTransform class object
|
| 667 |
+
:param inPlace: Bool. If ``True``, ``im`` is modified in place and ``None`` is
|
| 668 |
+
returned, if ``False``, a new :py:class:`~PIL.Image.Image` object with the
|
| 669 |
+
transform applied is returned (and ``im`` is not changed). The default is
|
| 670 |
+
``False``.
|
| 671 |
+
:returns: Either ``None``, or a new :py:class:`~PIL.Image.Image` object,
|
| 672 |
+
depending on the value of ``inPlace``. The profile will be returned in
|
| 673 |
+
the image's ``info['icc_profile']``.
|
| 674 |
+
:exception PyCMSError:
|
| 675 |
+
"""
|
| 676 |
+
|
| 677 |
+
try:
|
| 678 |
+
if inPlace:
|
| 679 |
+
transform.apply_in_place(im)
|
| 680 |
+
imOut = None
|
| 681 |
+
else:
|
| 682 |
+
imOut = transform.apply(im)
|
| 683 |
+
except (TypeError, ValueError) as v:
|
| 684 |
+
raise PyCMSError(v) from v
|
| 685 |
+
|
| 686 |
+
return imOut
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def createProfile(colorSpace, colorTemp=-1):
|
| 690 |
+
"""
|
| 691 |
+
(pyCMS) Creates a profile.
|
| 692 |
+
|
| 693 |
+
If colorSpace not in ``["LAB", "XYZ", "sRGB"]``,
|
| 694 |
+
a :exc:`PyCMSError` is raised.
|
| 695 |
+
|
| 696 |
+
If using LAB and ``colorTemp`` is not a positive integer,
|
| 697 |
+
a :exc:`PyCMSError` is raised.
|
| 698 |
+
|
| 699 |
+
If an error occurs while creating the profile,
|
| 700 |
+
a :exc:`PyCMSError` is raised.
|
| 701 |
+
|
| 702 |
+
Use this function to create common profiles on-the-fly instead of
|
| 703 |
+
having to supply a profile on disk and knowing the path to it. It
|
| 704 |
+
returns a normal CmsProfile object that can be passed to
|
| 705 |
+
ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
|
| 706 |
+
to images.
|
| 707 |
+
|
| 708 |
+
:param colorSpace: String, the color space of the profile you wish to
|
| 709 |
+
create.
|
| 710 |
+
Currently only "LAB", "XYZ", and "sRGB" are supported.
|
| 711 |
+
:param colorTemp: Positive integer for the white point for the profile, in
|
| 712 |
+
degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
|
| 713 |
+
illuminant if omitted (5000k). colorTemp is ONLY applied to LAB
|
| 714 |
+
profiles, and is ignored for XYZ and sRGB.
|
| 715 |
+
:returns: A CmsProfile class object
|
| 716 |
+
:exception PyCMSError:
|
| 717 |
+
"""
|
| 718 |
+
|
| 719 |
+
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
|
| 720 |
+
raise PyCMSError(
|
| 721 |
+
f"Color space not supported for on-the-fly profile creation ({colorSpace})"
|
| 722 |
+
)
|
| 723 |
+
|
| 724 |
+
if colorSpace == "LAB":
|
| 725 |
+
try:
|
| 726 |
+
colorTemp = float(colorTemp)
|
| 727 |
+
except (TypeError, ValueError) as e:
|
| 728 |
+
raise PyCMSError(
|
| 729 |
+
f'Color temperature must be numeric, "{colorTemp}" not valid'
|
| 730 |
+
) from e
|
| 731 |
+
|
| 732 |
+
try:
|
| 733 |
+
return core.createProfile(colorSpace, colorTemp)
|
| 734 |
+
except (TypeError, ValueError) as v:
|
| 735 |
+
raise PyCMSError(v) from v
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def getProfileName(profile):
|
| 739 |
+
"""
|
| 740 |
+
|
| 741 |
+
(pyCMS) Gets the internal product name for the given profile.
|
| 742 |
+
|
| 743 |
+
If ``profile`` isn't a valid CmsProfile object or filename to a profile,
|
| 744 |
+
a :exc:`PyCMSError` is raised If an error occurs while trying
|
| 745 |
+
to obtain the name tag, a :exc:`PyCMSError` is raised.
|
| 746 |
+
|
| 747 |
+
Use this function to obtain the INTERNAL name of the profile (stored
|
| 748 |
+
in an ICC tag in the profile itself), usually the one used when the
|
| 749 |
+
profile was originally created. Sometimes this tag also contains
|
| 750 |
+
additional information supplied by the creator.
|
| 751 |
+
|
| 752 |
+
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
| 753 |
+
filename of an ICC profile.
|
| 754 |
+
:returns: A string containing the internal name of the profile as stored
|
| 755 |
+
in an ICC tag.
|
| 756 |
+
:exception PyCMSError:
|
| 757 |
+
"""
|
| 758 |
+
|
| 759 |
+
try:
|
| 760 |
+
# add an extra newline to preserve pyCMS compatibility
|
| 761 |
+
if not isinstance(profile, ImageCmsProfile):
|
| 762 |
+
profile = ImageCmsProfile(profile)
|
| 763 |
+
# do it in python, not c.
|
| 764 |
+
# // name was "%s - %s" (model, manufacturer) || Description ,
|
| 765 |
+
# // but if the Model and Manufacturer were the same or the model
|
| 766 |
+
# // was long, Just the model, in 1.x
|
| 767 |
+
model = profile.profile.model
|
| 768 |
+
manufacturer = profile.profile.manufacturer
|
| 769 |
+
|
| 770 |
+
if not (model or manufacturer):
|
| 771 |
+
return (profile.profile.profile_description or "") + "\n"
|
| 772 |
+
if not manufacturer or len(model) > 30:
|
| 773 |
+
return model + "\n"
|
| 774 |
+
return f"{model} - {manufacturer}\n"
|
| 775 |
+
|
| 776 |
+
except (AttributeError, OSError, TypeError, ValueError) as v:
|
| 777 |
+
raise PyCMSError(v) from v
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
def getProfileInfo(profile):
|
| 781 |
+
"""
|
| 782 |
+
(pyCMS) Gets the internal product information for the given profile.
|
| 783 |
+
|
| 784 |
+
If ``profile`` isn't a valid CmsProfile object or filename to a profile,
|
| 785 |
+
a :exc:`PyCMSError` is raised.
|
| 786 |
+
|
| 787 |
+
If an error occurs while trying to obtain the info tag,
|
| 788 |
+
a :exc:`PyCMSError` is raised.
|
| 789 |
+
|
| 790 |
+
Use this function to obtain the information stored in the profile's
|
| 791 |
+
info tag. This often contains details about the profile, and how it
|
| 792 |
+
was created, as supplied by the creator.
|
| 793 |
+
|
| 794 |
+
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
| 795 |
+
filename of an ICC profile.
|
| 796 |
+
:returns: A string containing the internal profile information stored in
|
| 797 |
+
an ICC tag.
|
| 798 |
+
:exception PyCMSError:
|
| 799 |
+
"""
|
| 800 |
+
|
| 801 |
+
try:
|
| 802 |
+
if not isinstance(profile, ImageCmsProfile):
|
| 803 |
+
profile = ImageCmsProfile(profile)
|
| 804 |
+
# add an extra newline to preserve pyCMS compatibility
|
| 805 |
+
# Python, not C. the white point bits weren't working well,
|
| 806 |
+
# so skipping.
|
| 807 |
+
# info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
|
| 808 |
+
description = profile.profile.profile_description
|
| 809 |
+
cpright = profile.profile.copyright
|
| 810 |
+
arr = []
|
| 811 |
+
for elt in (description, cpright):
|
| 812 |
+
if elt:
|
| 813 |
+
arr.append(elt)
|
| 814 |
+
return "\r\n\r\n".join(arr) + "\r\n\r\n"
|
| 815 |
+
|
| 816 |
+
except (AttributeError, OSError, TypeError, ValueError) as v:
|
| 817 |
+
raise PyCMSError(v) from v
|
| 818 |
+
|
| 819 |
+
|
| 820 |
+
def getProfileCopyright(profile):
|
| 821 |
+
"""
|
| 822 |
+
(pyCMS) Gets the copyright for the given profile.
|
| 823 |
+
|
| 824 |
+
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
|
| 825 |
+
:exc:`PyCMSError` is raised.
|
| 826 |
+
|
| 827 |
+
If an error occurs while trying to obtain the copyright tag,
|
| 828 |
+
a :exc:`PyCMSError` is raised.
|
| 829 |
+
|
| 830 |
+
Use this function to obtain the information stored in the profile's
|
| 831 |
+
copyright tag.
|
| 832 |
+
|
| 833 |
+
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
| 834 |
+
filename of an ICC profile.
|
| 835 |
+
:returns: A string containing the internal profile information stored in
|
| 836 |
+
an ICC tag.
|
| 837 |
+
:exception PyCMSError:
|
| 838 |
+
"""
|
| 839 |
+
try:
|
| 840 |
+
# add an extra newline to preserve pyCMS compatibility
|
| 841 |
+
if not isinstance(profile, ImageCmsProfile):
|
| 842 |
+
profile = ImageCmsProfile(profile)
|
| 843 |
+
return (profile.profile.copyright or "") + "\n"
|
| 844 |
+
except (AttributeError, OSError, TypeError, ValueError) as v:
|
| 845 |
+
raise PyCMSError(v) from v
|
| 846 |
+
|
| 847 |
+
|
| 848 |
+
def getProfileManufacturer(profile):
|
| 849 |
+
"""
|
| 850 |
+
(pyCMS) Gets the manufacturer for the given profile.
|
| 851 |
+
|
| 852 |
+
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
|
| 853 |
+
:exc:`PyCMSError` is raised.
|
| 854 |
+
|
| 855 |
+
If an error occurs while trying to obtain the manufacturer tag, a
|
| 856 |
+
:exc:`PyCMSError` is raised.
|
| 857 |
+
|
| 858 |
+
Use this function to obtain the information stored in the profile's
|
| 859 |
+
manufacturer tag.
|
| 860 |
+
|
| 861 |
+
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
| 862 |
+
filename of an ICC profile.
|
| 863 |
+
:returns: A string containing the internal profile information stored in
|
| 864 |
+
an ICC tag.
|
| 865 |
+
:exception PyCMSError:
|
| 866 |
+
"""
|
| 867 |
+
try:
|
| 868 |
+
# add an extra newline to preserve pyCMS compatibility
|
| 869 |
+
if not isinstance(profile, ImageCmsProfile):
|
| 870 |
+
profile = ImageCmsProfile(profile)
|
| 871 |
+
return (profile.profile.manufacturer or "") + "\n"
|
| 872 |
+
except (AttributeError, OSError, TypeError, ValueError) as v:
|
| 873 |
+
raise PyCMSError(v) from v
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
def getProfileModel(profile):
|
| 877 |
+
"""
|
| 878 |
+
(pyCMS) Gets the model for the given profile.
|
| 879 |
+
|
| 880 |
+
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
|
| 881 |
+
:exc:`PyCMSError` is raised.
|
| 882 |
+
|
| 883 |
+
If an error occurs while trying to obtain the model tag,
|
| 884 |
+
a :exc:`PyCMSError` is raised.
|
| 885 |
+
|
| 886 |
+
Use this function to obtain the information stored in the profile's
|
| 887 |
+
model tag.
|
| 888 |
+
|
| 889 |
+
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
| 890 |
+
filename of an ICC profile.
|
| 891 |
+
:returns: A string containing the internal profile information stored in
|
| 892 |
+
an ICC tag.
|
| 893 |
+
:exception PyCMSError:
|
| 894 |
+
"""
|
| 895 |
+
|
| 896 |
+
try:
|
| 897 |
+
# add an extra newline to preserve pyCMS compatibility
|
| 898 |
+
if not isinstance(profile, ImageCmsProfile):
|
| 899 |
+
profile = ImageCmsProfile(profile)
|
| 900 |
+
return (profile.profile.model or "") + "\n"
|
| 901 |
+
except (AttributeError, OSError, TypeError, ValueError) as v:
|
| 902 |
+
raise PyCMSError(v) from v
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
def getProfileDescription(profile):
|
| 906 |
+
"""
|
| 907 |
+
(pyCMS) Gets the description for the given profile.
|
| 908 |
+
|
| 909 |
+
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
|
| 910 |
+
:exc:`PyCMSError` is raised.
|
| 911 |
+
|
| 912 |
+
If an error occurs while trying to obtain the description tag,
|
| 913 |
+
a :exc:`PyCMSError` is raised.
|
| 914 |
+
|
| 915 |
+
Use this function to obtain the information stored in the profile's
|
| 916 |
+
description tag.
|
| 917 |
+
|
| 918 |
+
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
| 919 |
+
filename of an ICC profile.
|
| 920 |
+
:returns: A string containing the internal profile information stored in an
|
| 921 |
+
ICC tag.
|
| 922 |
+
:exception PyCMSError:
|
| 923 |
+
"""
|
| 924 |
+
|
| 925 |
+
try:
|
| 926 |
+
# add an extra newline to preserve pyCMS compatibility
|
| 927 |
+
if not isinstance(profile, ImageCmsProfile):
|
| 928 |
+
profile = ImageCmsProfile(profile)
|
| 929 |
+
return (profile.profile.profile_description or "") + "\n"
|
| 930 |
+
except (AttributeError, OSError, TypeError, ValueError) as v:
|
| 931 |
+
raise PyCMSError(v) from v
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
def getDefaultIntent(profile):
|
| 935 |
+
"""
|
| 936 |
+
(pyCMS) Gets the default intent name for the given profile.
|
| 937 |
+
|
| 938 |
+
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
|
| 939 |
+
:exc:`PyCMSError` is raised.
|
| 940 |
+
|
| 941 |
+
If an error occurs while trying to obtain the default intent, a
|
| 942 |
+
:exc:`PyCMSError` is raised.
|
| 943 |
+
|
| 944 |
+
Use this function to determine the default (and usually best optimized)
|
| 945 |
+
rendering intent for this profile. Most profiles support multiple
|
| 946 |
+
rendering intents, but are intended mostly for one type of conversion.
|
| 947 |
+
If you wish to use a different intent than returned, use
|
| 948 |
+
ImageCms.isIntentSupported() to verify it will work first.
|
| 949 |
+
|
| 950 |
+
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
| 951 |
+
filename of an ICC profile.
|
| 952 |
+
:returns: Integer 0-3 specifying the default rendering intent for this
|
| 953 |
+
profile.
|
| 954 |
+
|
| 955 |
+
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
| 956 |
+
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
| 957 |
+
ImageCms.Intent.SATURATION = 2
|
| 958 |
+
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
| 959 |
+
|
| 960 |
+
see the pyCMS documentation for details on rendering intents and what
|
| 961 |
+
they do.
|
| 962 |
+
:exception PyCMSError:
|
| 963 |
+
"""
|
| 964 |
+
|
| 965 |
+
try:
|
| 966 |
+
if not isinstance(profile, ImageCmsProfile):
|
| 967 |
+
profile = ImageCmsProfile(profile)
|
| 968 |
+
return profile.profile.rendering_intent
|
| 969 |
+
except (AttributeError, OSError, TypeError, ValueError) as v:
|
| 970 |
+
raise PyCMSError(v) from v
|
| 971 |
+
|
| 972 |
+
|
| 973 |
+
def isIntentSupported(profile, intent, direction):
|
| 974 |
+
"""
|
| 975 |
+
(pyCMS) Checks if a given intent is supported.
|
| 976 |
+
|
| 977 |
+
Use this function to verify that you can use your desired
|
| 978 |
+
``intent`` with ``profile``, and that ``profile`` can be used for the
|
| 979 |
+
input/output/proof profile as you desire.
|
| 980 |
+
|
| 981 |
+
Some profiles are created specifically for one "direction", can cannot
|
| 982 |
+
be used for others. Some profiles can only be used for certain
|
| 983 |
+
rendering intents, so it's best to either verify this before trying
|
| 984 |
+
to create a transform with them (using this function), or catch the
|
| 985 |
+
potential :exc:`PyCMSError` that will occur if they don't
|
| 986 |
+
support the modes you select.
|
| 987 |
+
|
| 988 |
+
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
| 989 |
+
filename of an ICC profile.
|
| 990 |
+
:param intent: Integer (0-3) specifying the rendering intent you wish to
|
| 991 |
+
use with this profile
|
| 992 |
+
|
| 993 |
+
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
|
| 994 |
+
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
|
| 995 |
+
ImageCms.Intent.SATURATION = 2
|
| 996 |
+
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
|
| 997 |
+
|
| 998 |
+
see the pyCMS documentation for details on rendering intents and what
|
| 999 |
+
they do.
|
| 1000 |
+
:param direction: Integer specifying if the profile is to be used for
|
| 1001 |
+
input, output, or proof
|
| 1002 |
+
|
| 1003 |
+
INPUT = 0 (or use ImageCms.Direction.INPUT)
|
| 1004 |
+
OUTPUT = 1 (or use ImageCms.Direction.OUTPUT)
|
| 1005 |
+
PROOF = 2 (or use ImageCms.Direction.PROOF)
|
| 1006 |
+
|
| 1007 |
+
:returns: 1 if the intent/direction are supported, -1 if they are not.
|
| 1008 |
+
:exception PyCMSError:
|
| 1009 |
+
"""
|
| 1010 |
+
|
| 1011 |
+
try:
|
| 1012 |
+
if not isinstance(profile, ImageCmsProfile):
|
| 1013 |
+
profile = ImageCmsProfile(profile)
|
| 1014 |
+
# FIXME: I get different results for the same data w. different
|
| 1015 |
+
# compilers. Bug in LittleCMS or in the binding?
|
| 1016 |
+
if profile.profile.is_intent_supported(intent, direction):
|
| 1017 |
+
return 1
|
| 1018 |
+
else:
|
| 1019 |
+
return -1
|
| 1020 |
+
except (AttributeError, OSError, TypeError, ValueError) as v:
|
| 1021 |
+
raise PyCMSError(v) from v
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
def versions():
|
| 1025 |
+
"""
|
| 1026 |
+
(pyCMS) Fetches versions.
|
| 1027 |
+
"""
|
| 1028 |
+
|
| 1029 |
+
return (VERSION, core.littlecms_version, sys.version.split()[0], Image.__version__)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageFilter.py
ADDED
|
@@ -0,0 +1,538 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# standard filters
|
| 6 |
+
#
|
| 7 |
+
# History:
|
| 8 |
+
# 1995-11-27 fl Created
|
| 9 |
+
# 2002-06-08 fl Added rank and mode filters
|
| 10 |
+
# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
|
| 11 |
+
#
|
| 12 |
+
# Copyright (c) 1997-2003 by Secret Labs AB.
|
| 13 |
+
# Copyright (c) 1995-2002 by Fredrik Lundh.
|
| 14 |
+
#
|
| 15 |
+
# See the README file for information on usage and redistribution.
|
| 16 |
+
#
|
| 17 |
+
import functools
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Filter:
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class MultibandFilter(Filter):
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class BuiltinFilter(MultibandFilter):
|
| 29 |
+
def filter(self, image):
|
| 30 |
+
if image.mode == "P":
|
| 31 |
+
raise ValueError("cannot filter palette images")
|
| 32 |
+
return image.filter(*self.filterargs)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class Kernel(BuiltinFilter):
|
| 36 |
+
"""
|
| 37 |
+
Create a convolution kernel. The current version only
|
| 38 |
+
supports 3x3 and 5x5 integer and floating point kernels.
|
| 39 |
+
|
| 40 |
+
In the current version, kernels can only be applied to
|
| 41 |
+
"L" and "RGB" images.
|
| 42 |
+
|
| 43 |
+
:param size: Kernel size, given as (width, height). In the current
|
| 44 |
+
version, this must be (3,3) or (5,5).
|
| 45 |
+
:param kernel: A sequence containing kernel weights.
|
| 46 |
+
:param scale: Scale factor. If given, the result for each pixel is
|
| 47 |
+
divided by this value. The default is the sum of the
|
| 48 |
+
kernel weights.
|
| 49 |
+
:param offset: Offset. If given, this value is added to the result,
|
| 50 |
+
after it has been divided by the scale factor.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
name = "Kernel"
|
| 54 |
+
|
| 55 |
+
def __init__(self, size, kernel, scale=None, offset=0):
|
| 56 |
+
if scale is None:
|
| 57 |
+
# default scale is sum of kernel
|
| 58 |
+
scale = functools.reduce(lambda a, b: a + b, kernel)
|
| 59 |
+
if size[0] * size[1] != len(kernel):
|
| 60 |
+
raise ValueError("not enough coefficients in kernel")
|
| 61 |
+
self.filterargs = size, scale, offset, kernel
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class RankFilter(Filter):
|
| 65 |
+
"""
|
| 66 |
+
Create a rank filter. The rank filter sorts all pixels in
|
| 67 |
+
a window of the given size, and returns the ``rank``'th value.
|
| 68 |
+
|
| 69 |
+
:param size: The kernel size, in pixels.
|
| 70 |
+
:param rank: What pixel value to pick. Use 0 for a min filter,
|
| 71 |
+
``size * size / 2`` for a median filter, ``size * size - 1``
|
| 72 |
+
for a max filter, etc.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
name = "Rank"
|
| 76 |
+
|
| 77 |
+
def __init__(self, size, rank):
|
| 78 |
+
self.size = size
|
| 79 |
+
self.rank = rank
|
| 80 |
+
|
| 81 |
+
def filter(self, image):
|
| 82 |
+
if image.mode == "P":
|
| 83 |
+
raise ValueError("cannot filter palette images")
|
| 84 |
+
image = image.expand(self.size // 2, self.size // 2)
|
| 85 |
+
return image.rankfilter(self.size, self.rank)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class MedianFilter(RankFilter):
|
| 89 |
+
"""
|
| 90 |
+
Create a median filter. Picks the median pixel value in a window with the
|
| 91 |
+
given size.
|
| 92 |
+
|
| 93 |
+
:param size: The kernel size, in pixels.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
name = "Median"
|
| 97 |
+
|
| 98 |
+
def __init__(self, size=3):
|
| 99 |
+
self.size = size
|
| 100 |
+
self.rank = size * size // 2
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class MinFilter(RankFilter):
|
| 104 |
+
"""
|
| 105 |
+
Create a min filter. Picks the lowest pixel value in a window with the
|
| 106 |
+
given size.
|
| 107 |
+
|
| 108 |
+
:param size: The kernel size, in pixels.
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
name = "Min"
|
| 112 |
+
|
| 113 |
+
def __init__(self, size=3):
|
| 114 |
+
self.size = size
|
| 115 |
+
self.rank = 0
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class MaxFilter(RankFilter):
|
| 119 |
+
"""
|
| 120 |
+
Create a max filter. Picks the largest pixel value in a window with the
|
| 121 |
+
given size.
|
| 122 |
+
|
| 123 |
+
:param size: The kernel size, in pixels.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
name = "Max"
|
| 127 |
+
|
| 128 |
+
def __init__(self, size=3):
|
| 129 |
+
self.size = size
|
| 130 |
+
self.rank = size * size - 1
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class ModeFilter(Filter):
|
| 134 |
+
"""
|
| 135 |
+
Create a mode filter. Picks the most frequent pixel value in a box with the
|
| 136 |
+
given size. Pixel values that occur only once or twice are ignored; if no
|
| 137 |
+
pixel value occurs more than twice, the original pixel value is preserved.
|
| 138 |
+
|
| 139 |
+
:param size: The kernel size, in pixels.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
name = "Mode"
|
| 143 |
+
|
| 144 |
+
def __init__(self, size=3):
|
| 145 |
+
self.size = size
|
| 146 |
+
|
| 147 |
+
def filter(self, image):
|
| 148 |
+
return image.modefilter(self.size)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class GaussianBlur(MultibandFilter):
|
| 152 |
+
"""Blurs the image with a sequence of extended box filters, which
|
| 153 |
+
approximates a Gaussian kernel. For details on accuracy see
|
| 154 |
+
<https://www.mia.uni-saarland.de/Publications/gwosdek-ssvm11.pdf>
|
| 155 |
+
|
| 156 |
+
:param radius: Standard deviation of the Gaussian kernel.
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
name = "GaussianBlur"
|
| 160 |
+
|
| 161 |
+
def __init__(self, radius=2):
|
| 162 |
+
self.radius = radius
|
| 163 |
+
|
| 164 |
+
def filter(self, image):
|
| 165 |
+
return image.gaussian_blur(self.radius)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class BoxBlur(MultibandFilter):
|
| 169 |
+
"""Blurs the image by setting each pixel to the average value of the pixels
|
| 170 |
+
in a square box extending radius pixels in each direction.
|
| 171 |
+
Supports float radius of arbitrary size. Uses an optimized implementation
|
| 172 |
+
which runs in linear time relative to the size of the image
|
| 173 |
+
for any radius value.
|
| 174 |
+
|
| 175 |
+
:param radius: Size of the box in one direction. Radius 0 does not blur,
|
| 176 |
+
returns an identical image. Radius 1 takes 1 pixel
|
| 177 |
+
in each direction, i.e. 9 pixels in total.
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
name = "BoxBlur"
|
| 181 |
+
|
| 182 |
+
def __init__(self, radius):
|
| 183 |
+
self.radius = radius
|
| 184 |
+
|
| 185 |
+
def filter(self, image):
|
| 186 |
+
return image.box_blur(self.radius)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class UnsharpMask(MultibandFilter):
|
| 190 |
+
"""Unsharp mask filter.
|
| 191 |
+
|
| 192 |
+
See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
|
| 193 |
+
the parameters.
|
| 194 |
+
|
| 195 |
+
:param radius: Blur Radius
|
| 196 |
+
:param percent: Unsharp strength, in percent
|
| 197 |
+
:param threshold: Threshold controls the minimum brightness change that
|
| 198 |
+
will be sharpened
|
| 199 |
+
|
| 200 |
+
.. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
|
| 201 |
+
|
| 202 |
+
""" # noqa: E501
|
| 203 |
+
|
| 204 |
+
name = "UnsharpMask"
|
| 205 |
+
|
| 206 |
+
def __init__(self, radius=2, percent=150, threshold=3):
|
| 207 |
+
self.radius = radius
|
| 208 |
+
self.percent = percent
|
| 209 |
+
self.threshold = threshold
|
| 210 |
+
|
| 211 |
+
def filter(self, image):
|
| 212 |
+
return image.unsharp_mask(self.radius, self.percent, self.threshold)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
class BLUR(BuiltinFilter):
|
| 216 |
+
name = "Blur"
|
| 217 |
+
# fmt: off
|
| 218 |
+
filterargs = (5, 5), 16, 0, (
|
| 219 |
+
1, 1, 1, 1, 1,
|
| 220 |
+
1, 0, 0, 0, 1,
|
| 221 |
+
1, 0, 0, 0, 1,
|
| 222 |
+
1, 0, 0, 0, 1,
|
| 223 |
+
1, 1, 1, 1, 1,
|
| 224 |
+
)
|
| 225 |
+
# fmt: on
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
class CONTOUR(BuiltinFilter):
|
| 229 |
+
name = "Contour"
|
| 230 |
+
# fmt: off
|
| 231 |
+
filterargs = (3, 3), 1, 255, (
|
| 232 |
+
-1, -1, -1,
|
| 233 |
+
-1, 8, -1,
|
| 234 |
+
-1, -1, -1,
|
| 235 |
+
)
|
| 236 |
+
# fmt: on
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
class DETAIL(BuiltinFilter):
|
| 240 |
+
name = "Detail"
|
| 241 |
+
# fmt: off
|
| 242 |
+
filterargs = (3, 3), 6, 0, (
|
| 243 |
+
0, -1, 0,
|
| 244 |
+
-1, 10, -1,
|
| 245 |
+
0, -1, 0,
|
| 246 |
+
)
|
| 247 |
+
# fmt: on
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class EDGE_ENHANCE(BuiltinFilter):
|
| 251 |
+
name = "Edge-enhance"
|
| 252 |
+
# fmt: off
|
| 253 |
+
filterargs = (3, 3), 2, 0, (
|
| 254 |
+
-1, -1, -1,
|
| 255 |
+
-1, 10, -1,
|
| 256 |
+
-1, -1, -1,
|
| 257 |
+
)
|
| 258 |
+
# fmt: on
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
class EDGE_ENHANCE_MORE(BuiltinFilter):
|
| 262 |
+
name = "Edge-enhance More"
|
| 263 |
+
# fmt: off
|
| 264 |
+
filterargs = (3, 3), 1, 0, (
|
| 265 |
+
-1, -1, -1,
|
| 266 |
+
-1, 9, -1,
|
| 267 |
+
-1, -1, -1,
|
| 268 |
+
)
|
| 269 |
+
# fmt: on
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class EMBOSS(BuiltinFilter):
|
| 273 |
+
name = "Emboss"
|
| 274 |
+
# fmt: off
|
| 275 |
+
filterargs = (3, 3), 1, 128, (
|
| 276 |
+
-1, 0, 0,
|
| 277 |
+
0, 1, 0,
|
| 278 |
+
0, 0, 0,
|
| 279 |
+
)
|
| 280 |
+
# fmt: on
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class FIND_EDGES(BuiltinFilter):
|
| 284 |
+
name = "Find Edges"
|
| 285 |
+
# fmt: off
|
| 286 |
+
filterargs = (3, 3), 1, 0, (
|
| 287 |
+
-1, -1, -1,
|
| 288 |
+
-1, 8, -1,
|
| 289 |
+
-1, -1, -1,
|
| 290 |
+
)
|
| 291 |
+
# fmt: on
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class SHARPEN(BuiltinFilter):
|
| 295 |
+
name = "Sharpen"
|
| 296 |
+
# fmt: off
|
| 297 |
+
filterargs = (3, 3), 16, 0, (
|
| 298 |
+
-2, -2, -2,
|
| 299 |
+
-2, 32, -2,
|
| 300 |
+
-2, -2, -2,
|
| 301 |
+
)
|
| 302 |
+
# fmt: on
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class SMOOTH(BuiltinFilter):
|
| 306 |
+
name = "Smooth"
|
| 307 |
+
# fmt: off
|
| 308 |
+
filterargs = (3, 3), 13, 0, (
|
| 309 |
+
1, 1, 1,
|
| 310 |
+
1, 5, 1,
|
| 311 |
+
1, 1, 1,
|
| 312 |
+
)
|
| 313 |
+
# fmt: on
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
class SMOOTH_MORE(BuiltinFilter):
|
| 317 |
+
name = "Smooth More"
|
| 318 |
+
# fmt: off
|
| 319 |
+
filterargs = (5, 5), 100, 0, (
|
| 320 |
+
1, 1, 1, 1, 1,
|
| 321 |
+
1, 5, 5, 5, 1,
|
| 322 |
+
1, 5, 44, 5, 1,
|
| 323 |
+
1, 5, 5, 5, 1,
|
| 324 |
+
1, 1, 1, 1, 1,
|
| 325 |
+
)
|
| 326 |
+
# fmt: on
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
class Color3DLUT(MultibandFilter):
|
| 330 |
+
"""Three-dimensional color lookup table.
|
| 331 |
+
|
| 332 |
+
Transforms 3-channel pixels using the values of the channels as coordinates
|
| 333 |
+
in the 3D lookup table and interpolating the nearest elements.
|
| 334 |
+
|
| 335 |
+
This method allows you to apply almost any color transformation
|
| 336 |
+
in constant time by using pre-calculated decimated tables.
|
| 337 |
+
|
| 338 |
+
.. versionadded:: 5.2.0
|
| 339 |
+
|
| 340 |
+
:param size: Size of the table. One int or tuple of (int, int, int).
|
| 341 |
+
Minimal size in any dimension is 2, maximum is 65.
|
| 342 |
+
:param table: Flat lookup table. A list of ``channels * size**3``
|
| 343 |
+
float elements or a list of ``size**3`` channels-sized
|
| 344 |
+
tuples with floats. Channels are changed first,
|
| 345 |
+
then first dimension, then second, then third.
|
| 346 |
+
Value 0.0 corresponds lowest value of output, 1.0 highest.
|
| 347 |
+
:param channels: Number of channels in the table. Could be 3 or 4.
|
| 348 |
+
Default is 3.
|
| 349 |
+
:param target_mode: A mode for the result image. Should have not less
|
| 350 |
+
than ``channels`` channels. Default is ``None``,
|
| 351 |
+
which means that mode wouldn't be changed.
|
| 352 |
+
"""
|
| 353 |
+
|
| 354 |
+
name = "Color 3D LUT"
|
| 355 |
+
|
| 356 |
+
def __init__(self, size, table, channels=3, target_mode=None, **kwargs):
|
| 357 |
+
if channels not in (3, 4):
|
| 358 |
+
raise ValueError("Only 3 or 4 output channels are supported")
|
| 359 |
+
self.size = size = self._check_size(size)
|
| 360 |
+
self.channels = channels
|
| 361 |
+
self.mode = target_mode
|
| 362 |
+
|
| 363 |
+
# Hidden flag `_copy_table=False` could be used to avoid extra copying
|
| 364 |
+
# of the table if the table is specially made for the constructor.
|
| 365 |
+
copy_table = kwargs.get("_copy_table", True)
|
| 366 |
+
items = size[0] * size[1] * size[2]
|
| 367 |
+
wrong_size = False
|
| 368 |
+
|
| 369 |
+
numpy = None
|
| 370 |
+
if hasattr(table, "shape"):
|
| 371 |
+
try:
|
| 372 |
+
import numpy
|
| 373 |
+
except ImportError: # pragma: no cover
|
| 374 |
+
pass
|
| 375 |
+
|
| 376 |
+
if numpy and isinstance(table, numpy.ndarray):
|
| 377 |
+
if copy_table:
|
| 378 |
+
table = table.copy()
|
| 379 |
+
|
| 380 |
+
if table.shape in [
|
| 381 |
+
(items * channels,),
|
| 382 |
+
(items, channels),
|
| 383 |
+
(size[2], size[1], size[0], channels),
|
| 384 |
+
]:
|
| 385 |
+
table = table.reshape(items * channels)
|
| 386 |
+
else:
|
| 387 |
+
wrong_size = True
|
| 388 |
+
|
| 389 |
+
else:
|
| 390 |
+
if copy_table:
|
| 391 |
+
table = list(table)
|
| 392 |
+
|
| 393 |
+
# Convert to a flat list
|
| 394 |
+
if table and isinstance(table[0], (list, tuple)):
|
| 395 |
+
table, raw_table = [], table
|
| 396 |
+
for pixel in raw_table:
|
| 397 |
+
if len(pixel) != channels:
|
| 398 |
+
raise ValueError(
|
| 399 |
+
"The elements of the table should "
|
| 400 |
+
"have a length of {}.".format(channels)
|
| 401 |
+
)
|
| 402 |
+
table.extend(pixel)
|
| 403 |
+
|
| 404 |
+
if wrong_size or len(table) != items * channels:
|
| 405 |
+
raise ValueError(
|
| 406 |
+
"The table should have either channels * size**3 float items "
|
| 407 |
+
"or size**3 items of channels-sized tuples with floats. "
|
| 408 |
+
f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. "
|
| 409 |
+
f"Actual length: {len(table)}"
|
| 410 |
+
)
|
| 411 |
+
self.table = table
|
| 412 |
+
|
| 413 |
+
@staticmethod
|
| 414 |
+
def _check_size(size):
|
| 415 |
+
try:
|
| 416 |
+
_, _, _ = size
|
| 417 |
+
except ValueError as e:
|
| 418 |
+
raise ValueError(
|
| 419 |
+
"Size should be either an integer or a tuple of three integers."
|
| 420 |
+
) from e
|
| 421 |
+
except TypeError:
|
| 422 |
+
size = (size, size, size)
|
| 423 |
+
size = [int(x) for x in size]
|
| 424 |
+
for size1D in size:
|
| 425 |
+
if not 2 <= size1D <= 65:
|
| 426 |
+
raise ValueError("Size should be in [2, 65] range.")
|
| 427 |
+
return size
|
| 428 |
+
|
| 429 |
+
@classmethod
|
| 430 |
+
def generate(cls, size, callback, channels=3, target_mode=None):
|
| 431 |
+
"""Generates new LUT using provided callback.
|
| 432 |
+
|
| 433 |
+
:param size: Size of the table. Passed to the constructor.
|
| 434 |
+
:param callback: Function with three parameters which correspond
|
| 435 |
+
three color channels. Will be called ``size**3``
|
| 436 |
+
times with values from 0.0 to 1.0 and should return
|
| 437 |
+
a tuple with ``channels`` elements.
|
| 438 |
+
:param channels: The number of channels which should return callback.
|
| 439 |
+
:param target_mode: Passed to the constructor of the resulting
|
| 440 |
+
lookup table.
|
| 441 |
+
"""
|
| 442 |
+
size1D, size2D, size3D = cls._check_size(size)
|
| 443 |
+
if channels not in (3, 4):
|
| 444 |
+
raise ValueError("Only 3 or 4 output channels are supported")
|
| 445 |
+
|
| 446 |
+
table = [0] * (size1D * size2D * size3D * channels)
|
| 447 |
+
idx_out = 0
|
| 448 |
+
for b in range(size3D):
|
| 449 |
+
for g in range(size2D):
|
| 450 |
+
for r in range(size1D):
|
| 451 |
+
table[idx_out : idx_out + channels] = callback(
|
| 452 |
+
r / (size1D - 1), g / (size2D - 1), b / (size3D - 1)
|
| 453 |
+
)
|
| 454 |
+
idx_out += channels
|
| 455 |
+
|
| 456 |
+
return cls(
|
| 457 |
+
(size1D, size2D, size3D),
|
| 458 |
+
table,
|
| 459 |
+
channels=channels,
|
| 460 |
+
target_mode=target_mode,
|
| 461 |
+
_copy_table=False,
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
def transform(self, callback, with_normals=False, channels=None, target_mode=None):
|
| 465 |
+
"""Transforms the table values using provided callback and returns
|
| 466 |
+
a new LUT with altered values.
|
| 467 |
+
|
| 468 |
+
:param callback: A function which takes old lookup table values
|
| 469 |
+
and returns a new set of values. The number
|
| 470 |
+
of arguments which function should take is
|
| 471 |
+
``self.channels`` or ``3 + self.channels``
|
| 472 |
+
if ``with_normals`` flag is set.
|
| 473 |
+
Should return a tuple of ``self.channels`` or
|
| 474 |
+
``channels`` elements if it is set.
|
| 475 |
+
:param with_normals: If true, ``callback`` will be called with
|
| 476 |
+
coordinates in the color cube as the first
|
| 477 |
+
three arguments. Otherwise, ``callback``
|
| 478 |
+
will be called only with actual color values.
|
| 479 |
+
:param channels: The number of channels in the resulting lookup table.
|
| 480 |
+
:param target_mode: Passed to the constructor of the resulting
|
| 481 |
+
lookup table.
|
| 482 |
+
"""
|
| 483 |
+
if channels not in (None, 3, 4):
|
| 484 |
+
raise ValueError("Only 3 or 4 output channels are supported")
|
| 485 |
+
ch_in = self.channels
|
| 486 |
+
ch_out = channels or ch_in
|
| 487 |
+
size1D, size2D, size3D = self.size
|
| 488 |
+
|
| 489 |
+
table = [0] * (size1D * size2D * size3D * ch_out)
|
| 490 |
+
idx_in = 0
|
| 491 |
+
idx_out = 0
|
| 492 |
+
for b in range(size3D):
|
| 493 |
+
for g in range(size2D):
|
| 494 |
+
for r in range(size1D):
|
| 495 |
+
values = self.table[idx_in : idx_in + ch_in]
|
| 496 |
+
if with_normals:
|
| 497 |
+
values = callback(
|
| 498 |
+
r / (size1D - 1),
|
| 499 |
+
g / (size2D - 1),
|
| 500 |
+
b / (size3D - 1),
|
| 501 |
+
*values,
|
| 502 |
+
)
|
| 503 |
+
else:
|
| 504 |
+
values = callback(*values)
|
| 505 |
+
table[idx_out : idx_out + ch_out] = values
|
| 506 |
+
idx_in += ch_in
|
| 507 |
+
idx_out += ch_out
|
| 508 |
+
|
| 509 |
+
return type(self)(
|
| 510 |
+
self.size,
|
| 511 |
+
table,
|
| 512 |
+
channels=ch_out,
|
| 513 |
+
target_mode=target_mode or self.mode,
|
| 514 |
+
_copy_table=False,
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
def __repr__(self):
|
| 518 |
+
r = [
|
| 519 |
+
f"{self.__class__.__name__} from {self.table.__class__.__name__}",
|
| 520 |
+
"size={:d}x{:d}x{:d}".format(*self.size),
|
| 521 |
+
f"channels={self.channels:d}",
|
| 522 |
+
]
|
| 523 |
+
if self.mode:
|
| 524 |
+
r.append(f"target_mode={self.mode}")
|
| 525 |
+
return "<{}>".format(" ".join(r))
|
| 526 |
+
|
| 527 |
+
def filter(self, image):
|
| 528 |
+
from . import Image
|
| 529 |
+
|
| 530 |
+
return image.color_lut_3d(
|
| 531 |
+
self.mode or image.mode,
|
| 532 |
+
Image.Resampling.BILINEAR,
|
| 533 |
+
self.channels,
|
| 534 |
+
self.size[0],
|
| 535 |
+
self.size[1],
|
| 536 |
+
self.size[2],
|
| 537 |
+
self.table,
|
| 538 |
+
)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageMorph.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A binary morphology add-on for the Python Imaging Library
|
| 2 |
+
#
|
| 3 |
+
# History:
|
| 4 |
+
# 2014-06-04 Initial version.
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2014 Dov Grobgeld <dov.grobgeld@gmail.com>
|
| 7 |
+
|
| 8 |
+
import re
|
| 9 |
+
|
| 10 |
+
from . import Image, _imagingmorph
|
| 11 |
+
|
| 12 |
+
LUT_SIZE = 1 << 9
|
| 13 |
+
|
| 14 |
+
# fmt: off
|
| 15 |
+
ROTATION_MATRIX = [
|
| 16 |
+
6, 3, 0,
|
| 17 |
+
7, 4, 1,
|
| 18 |
+
8, 5, 2,
|
| 19 |
+
]
|
| 20 |
+
MIRROR_MATRIX = [
|
| 21 |
+
2, 1, 0,
|
| 22 |
+
5, 4, 3,
|
| 23 |
+
8, 7, 6,
|
| 24 |
+
]
|
| 25 |
+
# fmt: on
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class LutBuilder:
|
| 29 |
+
"""A class for building a MorphLut from a descriptive language
|
| 30 |
+
|
| 31 |
+
The input patterns is a list of a strings sequences like these::
|
| 32 |
+
|
| 33 |
+
4:(...
|
| 34 |
+
.1.
|
| 35 |
+
111)->1
|
| 36 |
+
|
| 37 |
+
(whitespaces including linebreaks are ignored). The option 4
|
| 38 |
+
describes a series of symmetry operations (in this case a
|
| 39 |
+
4-rotation), the pattern is described by:
|
| 40 |
+
|
| 41 |
+
- . or X - Ignore
|
| 42 |
+
- 1 - Pixel is on
|
| 43 |
+
- 0 - Pixel is off
|
| 44 |
+
|
| 45 |
+
The result of the operation is described after "->" string.
|
| 46 |
+
|
| 47 |
+
The default is to return the current pixel value, which is
|
| 48 |
+
returned if no other match is found.
|
| 49 |
+
|
| 50 |
+
Operations:
|
| 51 |
+
|
| 52 |
+
- 4 - 4 way rotation
|
| 53 |
+
- N - Negate
|
| 54 |
+
- 1 - Dummy op for no other operation (an op must always be given)
|
| 55 |
+
- M - Mirroring
|
| 56 |
+
|
| 57 |
+
Example::
|
| 58 |
+
|
| 59 |
+
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
|
| 60 |
+
lut = lb.build_lut()
|
| 61 |
+
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(self, patterns=None, op_name=None):
|
| 65 |
+
if patterns is not None:
|
| 66 |
+
self.patterns = patterns
|
| 67 |
+
else:
|
| 68 |
+
self.patterns = []
|
| 69 |
+
self.lut = None
|
| 70 |
+
if op_name is not None:
|
| 71 |
+
known_patterns = {
|
| 72 |
+
"corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"],
|
| 73 |
+
"dilation4": ["4:(... .0. .1.)->1"],
|
| 74 |
+
"dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"],
|
| 75 |
+
"erosion4": ["4:(... .1. .0.)->0"],
|
| 76 |
+
"erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"],
|
| 77 |
+
"edge": [
|
| 78 |
+
"1:(... ... ...)->0",
|
| 79 |
+
"4:(.0. .1. ...)->1",
|
| 80 |
+
"4:(01. .1. ...)->1",
|
| 81 |
+
],
|
| 82 |
+
}
|
| 83 |
+
if op_name not in known_patterns:
|
| 84 |
+
raise Exception("Unknown pattern " + op_name + "!")
|
| 85 |
+
|
| 86 |
+
self.patterns = known_patterns[op_name]
|
| 87 |
+
|
| 88 |
+
def add_patterns(self, patterns):
|
| 89 |
+
self.patterns += patterns
|
| 90 |
+
|
| 91 |
+
def build_default_lut(self):
|
| 92 |
+
symbols = [0, 1]
|
| 93 |
+
m = 1 << 4 # pos of current pixel
|
| 94 |
+
self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE))
|
| 95 |
+
|
| 96 |
+
def get_lut(self):
|
| 97 |
+
return self.lut
|
| 98 |
+
|
| 99 |
+
def _string_permute(self, pattern, permutation):
|
| 100 |
+
"""string_permute takes a pattern and a permutation and returns the
|
| 101 |
+
string permuted according to the permutation list.
|
| 102 |
+
"""
|
| 103 |
+
assert len(permutation) == 9
|
| 104 |
+
return "".join(pattern[p] for p in permutation)
|
| 105 |
+
|
| 106 |
+
def _pattern_permute(self, basic_pattern, options, basic_result):
|
| 107 |
+
"""pattern_permute takes a basic pattern and its result and clones
|
| 108 |
+
the pattern according to the modifications described in the $options
|
| 109 |
+
parameter. It returns a list of all cloned patterns."""
|
| 110 |
+
patterns = [(basic_pattern, basic_result)]
|
| 111 |
+
|
| 112 |
+
# rotations
|
| 113 |
+
if "4" in options:
|
| 114 |
+
res = patterns[-1][1]
|
| 115 |
+
for i in range(4):
|
| 116 |
+
patterns.append(
|
| 117 |
+
(self._string_permute(patterns[-1][0], ROTATION_MATRIX), res)
|
| 118 |
+
)
|
| 119 |
+
# mirror
|
| 120 |
+
if "M" in options:
|
| 121 |
+
n = len(patterns)
|
| 122 |
+
for pattern, res in patterns[0:n]:
|
| 123 |
+
patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res))
|
| 124 |
+
|
| 125 |
+
# negate
|
| 126 |
+
if "N" in options:
|
| 127 |
+
n = len(patterns)
|
| 128 |
+
for pattern, res in patterns[0:n]:
|
| 129 |
+
# Swap 0 and 1
|
| 130 |
+
pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1")
|
| 131 |
+
res = 1 - int(res)
|
| 132 |
+
patterns.append((pattern, res))
|
| 133 |
+
|
| 134 |
+
return patterns
|
| 135 |
+
|
| 136 |
+
def build_lut(self):
|
| 137 |
+
"""Compile all patterns into a morphology lut.
|
| 138 |
+
|
| 139 |
+
TBD :Build based on (file) morphlut:modify_lut
|
| 140 |
+
"""
|
| 141 |
+
self.build_default_lut()
|
| 142 |
+
patterns = []
|
| 143 |
+
|
| 144 |
+
# Parse and create symmetries of the patterns strings
|
| 145 |
+
for p in self.patterns:
|
| 146 |
+
m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", ""))
|
| 147 |
+
if not m:
|
| 148 |
+
raise Exception('Syntax error in pattern "' + p + '"')
|
| 149 |
+
options = m.group(1)
|
| 150 |
+
pattern = m.group(2)
|
| 151 |
+
result = int(m.group(3))
|
| 152 |
+
|
| 153 |
+
# Get rid of spaces
|
| 154 |
+
pattern = pattern.replace(" ", "").replace("\n", "")
|
| 155 |
+
|
| 156 |
+
patterns += self._pattern_permute(pattern, options, result)
|
| 157 |
+
|
| 158 |
+
# compile the patterns into regular expressions for speed
|
| 159 |
+
for i, pattern in enumerate(patterns):
|
| 160 |
+
p = pattern[0].replace(".", "X").replace("X", "[01]")
|
| 161 |
+
p = re.compile(p)
|
| 162 |
+
patterns[i] = (p, pattern[1])
|
| 163 |
+
|
| 164 |
+
# Step through table and find patterns that match.
|
| 165 |
+
# Note that all the patterns are searched. The last one
|
| 166 |
+
# caught overrides
|
| 167 |
+
for i in range(LUT_SIZE):
|
| 168 |
+
# Build the bit pattern
|
| 169 |
+
bitpattern = bin(i)[2:]
|
| 170 |
+
bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1]
|
| 171 |
+
|
| 172 |
+
for p, r in patterns:
|
| 173 |
+
if p.match(bitpattern):
|
| 174 |
+
self.lut[i] = [0, 1][r]
|
| 175 |
+
|
| 176 |
+
return self.lut
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class MorphOp:
|
| 180 |
+
"""A class for binary morphological operators"""
|
| 181 |
+
|
| 182 |
+
def __init__(self, lut=None, op_name=None, patterns=None):
|
| 183 |
+
"""Create a binary morphological operator"""
|
| 184 |
+
self.lut = lut
|
| 185 |
+
if op_name is not None:
|
| 186 |
+
self.lut = LutBuilder(op_name=op_name).build_lut()
|
| 187 |
+
elif patterns is not None:
|
| 188 |
+
self.lut = LutBuilder(patterns=patterns).build_lut()
|
| 189 |
+
|
| 190 |
+
def apply(self, image):
|
| 191 |
+
"""Run a single morphological operation on an image
|
| 192 |
+
|
| 193 |
+
Returns a tuple of the number of changed pixels and the
|
| 194 |
+
morphed image"""
|
| 195 |
+
if self.lut is None:
|
| 196 |
+
raise Exception("No operator loaded")
|
| 197 |
+
|
| 198 |
+
if image.mode != "L":
|
| 199 |
+
raise ValueError("Image mode must be L")
|
| 200 |
+
outimage = Image.new(image.mode, image.size, None)
|
| 201 |
+
count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id)
|
| 202 |
+
return count, outimage
|
| 203 |
+
|
| 204 |
+
def match(self, image):
|
| 205 |
+
"""Get a list of coordinates matching the morphological operation on
|
| 206 |
+
an image.
|
| 207 |
+
|
| 208 |
+
Returns a list of tuples of (x,y) coordinates
|
| 209 |
+
of all matching pixels. See :ref:`coordinate-system`."""
|
| 210 |
+
if self.lut is None:
|
| 211 |
+
raise Exception("No operator loaded")
|
| 212 |
+
|
| 213 |
+
if image.mode != "L":
|
| 214 |
+
raise ValueError("Image mode must be L")
|
| 215 |
+
return _imagingmorph.match(bytes(self.lut), image.im.id)
|
| 216 |
+
|
| 217 |
+
def get_on_pixels(self, image):
|
| 218 |
+
"""Get a list of all turned on pixels in a binary image
|
| 219 |
+
|
| 220 |
+
Returns a list of tuples of (x,y) coordinates
|
| 221 |
+
of all matching pixels. See :ref:`coordinate-system`."""
|
| 222 |
+
|
| 223 |
+
if image.mode != "L":
|
| 224 |
+
raise ValueError("Image mode must be L")
|
| 225 |
+
return _imagingmorph.get_on_pixels(image.im.id)
|
| 226 |
+
|
| 227 |
+
def load_lut(self, filename):
|
| 228 |
+
"""Load an operator from an mrl file"""
|
| 229 |
+
with open(filename, "rb") as f:
|
| 230 |
+
self.lut = bytearray(f.read())
|
| 231 |
+
|
| 232 |
+
if len(self.lut) != LUT_SIZE:
|
| 233 |
+
self.lut = None
|
| 234 |
+
raise Exception("Wrong size operator file!")
|
| 235 |
+
|
| 236 |
+
def save_lut(self, filename):
|
| 237 |
+
"""Save an operator to an mrl file"""
|
| 238 |
+
if self.lut is None:
|
| 239 |
+
raise Exception("No operator loaded")
|
| 240 |
+
with open(filename, "wb") as f:
|
| 241 |
+
f.write(self.lut)
|
| 242 |
+
|
| 243 |
+
def set_lut(self, lut):
|
| 244 |
+
"""Set the lut from an external source"""
|
| 245 |
+
self.lut = lut
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageShow.py
ADDED
|
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# im.show() drivers
|
| 6 |
+
#
|
| 7 |
+
# History:
|
| 8 |
+
# 2008-04-06 fl Created
|
| 9 |
+
#
|
| 10 |
+
# Copyright (c) Secret Labs AB 2008.
|
| 11 |
+
#
|
| 12 |
+
# See the README file for information on usage and redistribution.
|
| 13 |
+
#
|
| 14 |
+
import os
|
| 15 |
+
import shutil
|
| 16 |
+
import subprocess
|
| 17 |
+
import sys
|
| 18 |
+
import warnings
|
| 19 |
+
from shlex import quote
|
| 20 |
+
|
| 21 |
+
from PIL import Image
|
| 22 |
+
|
| 23 |
+
_viewers = []
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def register(viewer, order=1):
|
| 27 |
+
"""
|
| 28 |
+
The :py:func:`register` function is used to register additional viewers::
|
| 29 |
+
|
| 30 |
+
from PIL import ImageShow
|
| 31 |
+
ImageShow.register(MyViewer()) # MyViewer will be used as a last resort
|
| 32 |
+
ImageShow.register(MySecondViewer(), 0) # MySecondViewer will be prioritised
|
| 33 |
+
ImageShow.register(ImageShow.XVViewer(), 0) # XVViewer will be prioritised
|
| 34 |
+
|
| 35 |
+
:param viewer: The viewer to be registered.
|
| 36 |
+
:param order:
|
| 37 |
+
Zero or a negative integer to prepend this viewer to the list,
|
| 38 |
+
a positive integer to append it.
|
| 39 |
+
"""
|
| 40 |
+
try:
|
| 41 |
+
if issubclass(viewer, Viewer):
|
| 42 |
+
viewer = viewer()
|
| 43 |
+
except TypeError:
|
| 44 |
+
pass # raised if viewer wasn't a class
|
| 45 |
+
if order > 0:
|
| 46 |
+
_viewers.append(viewer)
|
| 47 |
+
else:
|
| 48 |
+
_viewers.insert(0, viewer)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def show(image, title=None, **options):
|
| 52 |
+
r"""
|
| 53 |
+
Display a given image.
|
| 54 |
+
|
| 55 |
+
:param image: An image object.
|
| 56 |
+
:param title: Optional title. Not all viewers can display the title.
|
| 57 |
+
:param \**options: Additional viewer options.
|
| 58 |
+
:returns: ``True`` if a suitable viewer was found, ``False`` otherwise.
|
| 59 |
+
"""
|
| 60 |
+
for viewer in _viewers:
|
| 61 |
+
if viewer.show(image, title=title, **options):
|
| 62 |
+
return True
|
| 63 |
+
return False
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class Viewer:
|
| 67 |
+
"""Base class for viewers."""
|
| 68 |
+
|
| 69 |
+
# main api
|
| 70 |
+
|
| 71 |
+
def show(self, image, **options):
|
| 72 |
+
"""
|
| 73 |
+
The main function for displaying an image.
|
| 74 |
+
Converts the given image to the target format and displays it.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
if not (
|
| 78 |
+
image.mode in ("1", "RGBA")
|
| 79 |
+
or (self.format == "PNG" and image.mode in ("I;16", "LA"))
|
| 80 |
+
):
|
| 81 |
+
base = Image.getmodebase(image.mode)
|
| 82 |
+
if image.mode != base:
|
| 83 |
+
image = image.convert(base)
|
| 84 |
+
|
| 85 |
+
return self.show_image(image, **options)
|
| 86 |
+
|
| 87 |
+
# hook methods
|
| 88 |
+
|
| 89 |
+
format = None
|
| 90 |
+
"""The format to convert the image into."""
|
| 91 |
+
options = {}
|
| 92 |
+
"""Additional options used to convert the image."""
|
| 93 |
+
|
| 94 |
+
def get_format(self, image):
|
| 95 |
+
"""Return format name, or ``None`` to save as PGM/PPM."""
|
| 96 |
+
return self.format
|
| 97 |
+
|
| 98 |
+
def get_command(self, file, **options):
|
| 99 |
+
"""
|
| 100 |
+
Returns the command used to display the file.
|
| 101 |
+
Not implemented in the base class.
|
| 102 |
+
"""
|
| 103 |
+
raise NotImplementedError
|
| 104 |
+
|
| 105 |
+
def save_image(self, image):
|
| 106 |
+
"""Save to temporary file and return filename."""
|
| 107 |
+
return image._dump(format=self.get_format(image), **self.options)
|
| 108 |
+
|
| 109 |
+
def show_image(self, image, **options):
|
| 110 |
+
"""Display the given image."""
|
| 111 |
+
return self.show_file(self.save_image(image), **options)
|
| 112 |
+
|
| 113 |
+
def show_file(self, path=None, **options):
|
| 114 |
+
"""
|
| 115 |
+
Display given file.
|
| 116 |
+
|
| 117 |
+
Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,
|
| 118 |
+
and will be removed in Pillow 10.0.0 (2023-07-01). ``path`` should be used
|
| 119 |
+
instead.
|
| 120 |
+
"""
|
| 121 |
+
if path is None:
|
| 122 |
+
if "file" in options:
|
| 123 |
+
warnings.warn(
|
| 124 |
+
"The 'file' argument is deprecated and will be removed in Pillow "
|
| 125 |
+
"10 (2023-07-01). Use 'path' instead.",
|
| 126 |
+
DeprecationWarning,
|
| 127 |
+
)
|
| 128 |
+
path = options.pop("file")
|
| 129 |
+
else:
|
| 130 |
+
raise TypeError("Missing required argument: 'path'")
|
| 131 |
+
os.system(self.get_command(path, **options))
|
| 132 |
+
return 1
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
# --------------------------------------------------------------------
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class WindowsViewer(Viewer):
|
| 139 |
+
"""The default viewer on Windows is the default system application for PNG files."""
|
| 140 |
+
|
| 141 |
+
format = "PNG"
|
| 142 |
+
options = {"compress_level": 1}
|
| 143 |
+
|
| 144 |
+
def get_command(self, file, **options):
|
| 145 |
+
return (
|
| 146 |
+
f'start "Pillow" /WAIT "{file}" '
|
| 147 |
+
"&& ping -n 2 127.0.0.1 >NUL "
|
| 148 |
+
f'&& del /f "{file}"'
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
if sys.platform == "win32":
|
| 153 |
+
register(WindowsViewer)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class MacViewer(Viewer):
|
| 157 |
+
"""The default viewer on macOS using ``Preview.app``."""
|
| 158 |
+
|
| 159 |
+
format = "PNG"
|
| 160 |
+
options = {"compress_level": 1}
|
| 161 |
+
|
| 162 |
+
def get_command(self, file, **options):
|
| 163 |
+
# on darwin open returns immediately resulting in the temp
|
| 164 |
+
# file removal while app is opening
|
| 165 |
+
command = "open -a Preview.app"
|
| 166 |
+
command = f"({command} {quote(file)}; sleep 20; rm -f {quote(file)})&"
|
| 167 |
+
return command
|
| 168 |
+
|
| 169 |
+
def show_file(self, path=None, **options):
|
| 170 |
+
"""
|
| 171 |
+
Display given file.
|
| 172 |
+
|
| 173 |
+
Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,
|
| 174 |
+
and will be removed in Pillow 10.0.0 (2023-07-01). ``path`` should be used
|
| 175 |
+
instead.
|
| 176 |
+
"""
|
| 177 |
+
if path is None:
|
| 178 |
+
if "file" in options:
|
| 179 |
+
warnings.warn(
|
| 180 |
+
"The 'file' argument is deprecated and will be removed in Pillow "
|
| 181 |
+
"10 (2023-07-01). Use 'path' instead.",
|
| 182 |
+
DeprecationWarning,
|
| 183 |
+
)
|
| 184 |
+
path = options.pop("file")
|
| 185 |
+
else:
|
| 186 |
+
raise TypeError("Missing required argument: 'path'")
|
| 187 |
+
subprocess.call(["open", "-a", "Preview.app", path])
|
| 188 |
+
subprocess.Popen(
|
| 189 |
+
[
|
| 190 |
+
sys.executable,
|
| 191 |
+
"-c",
|
| 192 |
+
"import os, sys, time; time.sleep(20); os.remove(sys.argv[1])",
|
| 193 |
+
path,
|
| 194 |
+
]
|
| 195 |
+
)
|
| 196 |
+
return 1
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
if sys.platform == "darwin":
|
| 200 |
+
register(MacViewer)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class UnixViewer(Viewer):
|
| 204 |
+
format = "PNG"
|
| 205 |
+
options = {"compress_level": 1}
|
| 206 |
+
|
| 207 |
+
def get_command(self, file, **options):
|
| 208 |
+
command = self.get_command_ex(file, **options)[0]
|
| 209 |
+
return f"({command} {quote(file)}"
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class XDGViewer(UnixViewer):
|
| 213 |
+
"""
|
| 214 |
+
The freedesktop.org ``xdg-open`` command.
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
def get_command_ex(self, file, **options):
|
| 218 |
+
command = executable = "xdg-open"
|
| 219 |
+
return command, executable
|
| 220 |
+
|
| 221 |
+
def show_file(self, path=None, **options):
|
| 222 |
+
"""
|
| 223 |
+
Display given file.
|
| 224 |
+
|
| 225 |
+
Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,
|
| 226 |
+
and will be removed in Pillow 10.0.0 (2023-07-01). ``path`` should be used
|
| 227 |
+
instead.
|
| 228 |
+
"""
|
| 229 |
+
if path is None:
|
| 230 |
+
if "file" in options:
|
| 231 |
+
warnings.warn(
|
| 232 |
+
"The 'file' argument is deprecated and will be removed in Pillow "
|
| 233 |
+
"10 (2023-07-01). Use 'path' instead.",
|
| 234 |
+
DeprecationWarning,
|
| 235 |
+
)
|
| 236 |
+
path = options.pop("file")
|
| 237 |
+
else:
|
| 238 |
+
raise TypeError("Missing required argument: 'path'")
|
| 239 |
+
subprocess.Popen(["xdg-open", path])
|
| 240 |
+
return 1
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class DisplayViewer(UnixViewer):
|
| 244 |
+
"""
|
| 245 |
+
The ImageMagick ``display`` command.
|
| 246 |
+
This viewer supports the ``title`` parameter.
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
def get_command_ex(self, file, title=None, **options):
|
| 250 |
+
command = executable = "display"
|
| 251 |
+
if title:
|
| 252 |
+
command += f" -title {quote(title)}"
|
| 253 |
+
return command, executable
|
| 254 |
+
|
| 255 |
+
def show_file(self, path=None, **options):
|
| 256 |
+
"""
|
| 257 |
+
Display given file.
|
| 258 |
+
|
| 259 |
+
Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,
|
| 260 |
+
and ``path`` should be used instead.
|
| 261 |
+
"""
|
| 262 |
+
if path is None:
|
| 263 |
+
if "file" in options:
|
| 264 |
+
warnings.warn(
|
| 265 |
+
"The 'file' argument is deprecated and will be removed in Pillow "
|
| 266 |
+
"10 (2023-07-01). Use 'path' instead.",
|
| 267 |
+
DeprecationWarning,
|
| 268 |
+
)
|
| 269 |
+
path = options.pop("file")
|
| 270 |
+
else:
|
| 271 |
+
raise TypeError("Missing required argument: 'path'")
|
| 272 |
+
args = ["display"]
|
| 273 |
+
title = options.get("title")
|
| 274 |
+
if title:
|
| 275 |
+
args += ["-title", title]
|
| 276 |
+
args.append(path)
|
| 277 |
+
|
| 278 |
+
subprocess.Popen(args)
|
| 279 |
+
return 1
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class GmDisplayViewer(UnixViewer):
|
| 283 |
+
"""The GraphicsMagick ``gm display`` command."""
|
| 284 |
+
|
| 285 |
+
def get_command_ex(self, file, **options):
|
| 286 |
+
executable = "gm"
|
| 287 |
+
command = "gm display"
|
| 288 |
+
return command, executable
|
| 289 |
+
|
| 290 |
+
def show_file(self, path=None, **options):
|
| 291 |
+
"""
|
| 292 |
+
Display given file.
|
| 293 |
+
|
| 294 |
+
Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,
|
| 295 |
+
and ``path`` should be used instead.
|
| 296 |
+
"""
|
| 297 |
+
if path is None:
|
| 298 |
+
if "file" in options:
|
| 299 |
+
warnings.warn(
|
| 300 |
+
"The 'file' argument is deprecated and will be removed in Pillow "
|
| 301 |
+
"10 (2023-07-01). Use 'path' instead.",
|
| 302 |
+
DeprecationWarning,
|
| 303 |
+
)
|
| 304 |
+
path = options.pop("file")
|
| 305 |
+
else:
|
| 306 |
+
raise TypeError("Missing required argument: 'path'")
|
| 307 |
+
subprocess.Popen(["gm", "display", path])
|
| 308 |
+
return 1
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
class EogViewer(UnixViewer):
|
| 312 |
+
"""The GNOME Image Viewer ``eog`` command."""
|
| 313 |
+
|
| 314 |
+
def get_command_ex(self, file, **options):
|
| 315 |
+
executable = "eog"
|
| 316 |
+
command = "eog -n"
|
| 317 |
+
return command, executable
|
| 318 |
+
|
| 319 |
+
def show_file(self, path=None, **options):
|
| 320 |
+
"""
|
| 321 |
+
Display given file.
|
| 322 |
+
|
| 323 |
+
Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,
|
| 324 |
+
and ``path`` should be used instead.
|
| 325 |
+
"""
|
| 326 |
+
if path is None:
|
| 327 |
+
if "file" in options:
|
| 328 |
+
warnings.warn(
|
| 329 |
+
"The 'file' argument is deprecated and will be removed in Pillow "
|
| 330 |
+
"10 (2023-07-01). Use 'path' instead.",
|
| 331 |
+
DeprecationWarning,
|
| 332 |
+
)
|
| 333 |
+
path = options.pop("file")
|
| 334 |
+
else:
|
| 335 |
+
raise TypeError("Missing required argument: 'path'")
|
| 336 |
+
subprocess.Popen(["eog", "-n", path])
|
| 337 |
+
return 1
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
class XVViewer(UnixViewer):
|
| 341 |
+
"""
|
| 342 |
+
The X Viewer ``xv`` command.
|
| 343 |
+
This viewer supports the ``title`` parameter.
|
| 344 |
+
"""
|
| 345 |
+
|
| 346 |
+
def get_command_ex(self, file, title=None, **options):
|
| 347 |
+
# note: xv is pretty outdated. most modern systems have
|
| 348 |
+
# imagemagick's display command instead.
|
| 349 |
+
command = executable = "xv"
|
| 350 |
+
if title:
|
| 351 |
+
command += f" -name {quote(title)}"
|
| 352 |
+
return command, executable
|
| 353 |
+
|
| 354 |
+
def show_file(self, path=None, **options):
|
| 355 |
+
"""
|
| 356 |
+
Display given file.
|
| 357 |
+
|
| 358 |
+
Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,
|
| 359 |
+
and ``path`` should be used instead.
|
| 360 |
+
"""
|
| 361 |
+
if path is None:
|
| 362 |
+
if "file" in options:
|
| 363 |
+
warnings.warn(
|
| 364 |
+
"The 'file' argument is deprecated and will be removed in Pillow "
|
| 365 |
+
"10 (2023-07-01). Use 'path' instead.",
|
| 366 |
+
DeprecationWarning,
|
| 367 |
+
)
|
| 368 |
+
path = options.pop("file")
|
| 369 |
+
else:
|
| 370 |
+
raise TypeError("Missing required argument: 'path'")
|
| 371 |
+
args = ["xv"]
|
| 372 |
+
title = options.get("title")
|
| 373 |
+
if title:
|
| 374 |
+
args += ["-name", title]
|
| 375 |
+
args.append(path)
|
| 376 |
+
|
| 377 |
+
subprocess.Popen(args)
|
| 378 |
+
return 1
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
if sys.platform not in ("win32", "darwin"): # unixoids
|
| 382 |
+
if shutil.which("xdg-open"):
|
| 383 |
+
register(XDGViewer)
|
| 384 |
+
if shutil.which("display"):
|
| 385 |
+
register(DisplayViewer)
|
| 386 |
+
if shutil.which("gm"):
|
| 387 |
+
register(GmDisplayViewer)
|
| 388 |
+
if shutil.which("eog"):
|
| 389 |
+
register(EogViewer)
|
| 390 |
+
if shutil.which("xv"):
|
| 391 |
+
register(XVViewer)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
class IPythonViewer(Viewer):
|
| 395 |
+
"""The viewer for IPython frontends."""
|
| 396 |
+
|
| 397 |
+
def show_image(self, image, **options):
|
| 398 |
+
ipython_display(image)
|
| 399 |
+
return 1
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
try:
|
| 403 |
+
from IPython.display import display as ipython_display
|
| 404 |
+
except ImportError:
|
| 405 |
+
pass
|
| 406 |
+
else:
|
| 407 |
+
register(IPythonViewer)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
if __name__ == "__main__":
|
| 411 |
+
|
| 412 |
+
if len(sys.argv) < 2:
|
| 413 |
+
print("Syntax: python3 ImageShow.py imagefile [title]")
|
| 414 |
+
sys.exit()
|
| 415 |
+
|
| 416 |
+
with Image.open(sys.argv[1]) as im:
|
| 417 |
+
print(show(im, *sys.argv[2:]))
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageTk.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# a Tk display interface
|
| 6 |
+
#
|
| 7 |
+
# History:
|
| 8 |
+
# 96-04-08 fl Created
|
| 9 |
+
# 96-09-06 fl Added getimage method
|
| 10 |
+
# 96-11-01 fl Rewritten, removed image attribute and crop method
|
| 11 |
+
# 97-05-09 fl Use PyImagingPaste method instead of image type
|
| 12 |
+
# 97-05-12 fl Minor tweaks to match the IFUNC95 interface
|
| 13 |
+
# 97-05-17 fl Support the "pilbitmap" booster patch
|
| 14 |
+
# 97-06-05 fl Added file= and data= argument to image constructors
|
| 15 |
+
# 98-03-09 fl Added width and height methods to Image classes
|
| 16 |
+
# 98-07-02 fl Use default mode for "P" images without palette attribute
|
| 17 |
+
# 98-07-02 fl Explicitly destroy Tkinter image objects
|
| 18 |
+
# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
|
| 19 |
+
# 99-07-26 fl Automatically hook into Tkinter (if possible)
|
| 20 |
+
# 99-08-15 fl Hook uses _imagingtk instead of _imaging
|
| 21 |
+
#
|
| 22 |
+
# Copyright (c) 1997-1999 by Secret Labs AB
|
| 23 |
+
# Copyright (c) 1996-1997 by Fredrik Lundh
|
| 24 |
+
#
|
| 25 |
+
# See the README file for information on usage and redistribution.
|
| 26 |
+
#
|
| 27 |
+
|
| 28 |
+
import tkinter
|
| 29 |
+
from io import BytesIO
|
| 30 |
+
|
| 31 |
+
from . import Image
|
| 32 |
+
|
| 33 |
+
# --------------------------------------------------------------------
|
| 34 |
+
# Check for Tkinter interface hooks
|
| 35 |
+
|
| 36 |
+
_pilbitmap_ok = None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _pilbitmap_check():
|
| 40 |
+
global _pilbitmap_ok
|
| 41 |
+
if _pilbitmap_ok is None:
|
| 42 |
+
try:
|
| 43 |
+
im = Image.new("1", (1, 1))
|
| 44 |
+
tkinter.BitmapImage(data=f"PIL:{im.im.id}")
|
| 45 |
+
_pilbitmap_ok = 1
|
| 46 |
+
except tkinter.TclError:
|
| 47 |
+
_pilbitmap_ok = 0
|
| 48 |
+
return _pilbitmap_ok
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _get_image_from_kw(kw):
|
| 52 |
+
source = None
|
| 53 |
+
if "file" in kw:
|
| 54 |
+
source = kw.pop("file")
|
| 55 |
+
elif "data" in kw:
|
| 56 |
+
source = BytesIO(kw.pop("data"))
|
| 57 |
+
if source:
|
| 58 |
+
return Image.open(source)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _pyimagingtkcall(command, photo, id):
|
| 62 |
+
tk = photo.tk
|
| 63 |
+
try:
|
| 64 |
+
tk.call(command, photo, id)
|
| 65 |
+
except tkinter.TclError:
|
| 66 |
+
# activate Tkinter hook
|
| 67 |
+
# may raise an error if it cannot attach to Tkinter
|
| 68 |
+
from . import _imagingtk
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
if hasattr(tk, "interp"):
|
| 72 |
+
# Required for PyPy, which always has CFFI installed
|
| 73 |
+
from cffi import FFI
|
| 74 |
+
|
| 75 |
+
ffi = FFI()
|
| 76 |
+
|
| 77 |
+
# PyPy is using an FFI CDATA element
|
| 78 |
+
# (Pdb) self.tk.interp
|
| 79 |
+
# <cdata 'Tcl_Interp *' 0x3061b50>
|
| 80 |
+
_imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1)
|
| 81 |
+
else:
|
| 82 |
+
_imagingtk.tkinit(tk.interpaddr(), 1)
|
| 83 |
+
except AttributeError:
|
| 84 |
+
_imagingtk.tkinit(id(tk), 0)
|
| 85 |
+
tk.call(command, photo, id)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# --------------------------------------------------------------------
|
| 89 |
+
# PhotoImage
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class PhotoImage:
|
| 93 |
+
"""
|
| 94 |
+
A Tkinter-compatible photo image. This can be used
|
| 95 |
+
everywhere Tkinter expects an image object. If the image is an RGBA
|
| 96 |
+
image, pixels having alpha 0 are treated as transparent.
|
| 97 |
+
|
| 98 |
+
The constructor takes either a PIL image, or a mode and a size.
|
| 99 |
+
Alternatively, you can use the ``file`` or ``data`` options to initialize
|
| 100 |
+
the photo image object.
|
| 101 |
+
|
| 102 |
+
:param image: Either a PIL image, or a mode string. If a mode string is
|
| 103 |
+
used, a size must also be given.
|
| 104 |
+
:param size: If the first argument is a mode string, this defines the size
|
| 105 |
+
of the image.
|
| 106 |
+
:keyword file: A filename to load the image from (using
|
| 107 |
+
``Image.open(file)``).
|
| 108 |
+
:keyword data: An 8-bit string containing image data (as loaded from an
|
| 109 |
+
image file).
|
| 110 |
+
"""
|
| 111 |
+
|
| 112 |
+
def __init__(self, image=None, size=None, **kw):
|
| 113 |
+
|
| 114 |
+
# Tk compatibility: file or data
|
| 115 |
+
if image is None:
|
| 116 |
+
image = _get_image_from_kw(kw)
|
| 117 |
+
|
| 118 |
+
if hasattr(image, "mode") and hasattr(image, "size"):
|
| 119 |
+
# got an image instead of a mode
|
| 120 |
+
mode = image.mode
|
| 121 |
+
if mode == "P":
|
| 122 |
+
# palette mapped data
|
| 123 |
+
image.load()
|
| 124 |
+
try:
|
| 125 |
+
mode = image.palette.mode
|
| 126 |
+
except AttributeError:
|
| 127 |
+
mode = "RGB" # default
|
| 128 |
+
size = image.size
|
| 129 |
+
kw["width"], kw["height"] = size
|
| 130 |
+
else:
|
| 131 |
+
mode = image
|
| 132 |
+
image = None
|
| 133 |
+
|
| 134 |
+
if mode not in ["1", "L", "RGB", "RGBA"]:
|
| 135 |
+
mode = Image.getmodebase(mode)
|
| 136 |
+
|
| 137 |
+
self.__mode = mode
|
| 138 |
+
self.__size = size
|
| 139 |
+
self.__photo = tkinter.PhotoImage(**kw)
|
| 140 |
+
self.tk = self.__photo.tk
|
| 141 |
+
if image:
|
| 142 |
+
self.paste(image)
|
| 143 |
+
|
| 144 |
+
def __del__(self):
|
| 145 |
+
name = self.__photo.name
|
| 146 |
+
self.__photo.name = None
|
| 147 |
+
try:
|
| 148 |
+
self.__photo.tk.call("image", "delete", name)
|
| 149 |
+
except Exception:
|
| 150 |
+
pass # ignore internal errors
|
| 151 |
+
|
| 152 |
+
def __str__(self):
|
| 153 |
+
"""
|
| 154 |
+
Get the Tkinter photo image identifier. This method is automatically
|
| 155 |
+
called by Tkinter whenever a PhotoImage object is passed to a Tkinter
|
| 156 |
+
method.
|
| 157 |
+
|
| 158 |
+
:return: A Tkinter photo image identifier (a string).
|
| 159 |
+
"""
|
| 160 |
+
return str(self.__photo)
|
| 161 |
+
|
| 162 |
+
def width(self):
|
| 163 |
+
"""
|
| 164 |
+
Get the width of the image.
|
| 165 |
+
|
| 166 |
+
:return: The width, in pixels.
|
| 167 |
+
"""
|
| 168 |
+
return self.__size[0]
|
| 169 |
+
|
| 170 |
+
def height(self):
|
| 171 |
+
"""
|
| 172 |
+
Get the height of the image.
|
| 173 |
+
|
| 174 |
+
:return: The height, in pixels.
|
| 175 |
+
"""
|
| 176 |
+
return self.__size[1]
|
| 177 |
+
|
| 178 |
+
def paste(self, im, box=None):
|
| 179 |
+
"""
|
| 180 |
+
Paste a PIL image into the photo image. Note that this can
|
| 181 |
+
be very slow if the photo image is displayed.
|
| 182 |
+
|
| 183 |
+
:param im: A PIL image. The size must match the target region. If the
|
| 184 |
+
mode does not match, the image is converted to the mode of
|
| 185 |
+
the bitmap image.
|
| 186 |
+
:param box: A 4-tuple defining the left, upper, right, and lower pixel
|
| 187 |
+
coordinate. See :ref:`coordinate-system`. If None is given
|
| 188 |
+
instead of a tuple, all of the image is assumed.
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
# convert to blittable
|
| 192 |
+
im.load()
|
| 193 |
+
image = im.im
|
| 194 |
+
if image.isblock() and im.mode == self.__mode:
|
| 195 |
+
block = image
|
| 196 |
+
else:
|
| 197 |
+
block = image.new_block(self.__mode, im.size)
|
| 198 |
+
image.convert2(block, image) # convert directly between buffers
|
| 199 |
+
|
| 200 |
+
_pyimagingtkcall("PyImagingPhoto", self.__photo, block.id)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
# --------------------------------------------------------------------
|
| 204 |
+
# BitmapImage
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
class BitmapImage:
|
| 208 |
+
"""
|
| 209 |
+
A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
|
| 210 |
+
expects an image object.
|
| 211 |
+
|
| 212 |
+
The given image must have mode "1". Pixels having value 0 are treated as
|
| 213 |
+
transparent. Options, if any, are passed on to Tkinter. The most commonly
|
| 214 |
+
used option is ``foreground``, which is used to specify the color for the
|
| 215 |
+
non-transparent parts. See the Tkinter documentation for information on
|
| 216 |
+
how to specify colours.
|
| 217 |
+
|
| 218 |
+
:param image: A PIL image.
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
def __init__(self, image=None, **kw):
|
| 222 |
+
|
| 223 |
+
# Tk compatibility: file or data
|
| 224 |
+
if image is None:
|
| 225 |
+
image = _get_image_from_kw(kw)
|
| 226 |
+
|
| 227 |
+
self.__mode = image.mode
|
| 228 |
+
self.__size = image.size
|
| 229 |
+
|
| 230 |
+
if _pilbitmap_check():
|
| 231 |
+
# fast way (requires the pilbitmap booster patch)
|
| 232 |
+
image.load()
|
| 233 |
+
kw["data"] = f"PIL:{image.im.id}"
|
| 234 |
+
self.__im = image # must keep a reference
|
| 235 |
+
else:
|
| 236 |
+
# slow but safe way
|
| 237 |
+
kw["data"] = image.tobitmap()
|
| 238 |
+
self.__photo = tkinter.BitmapImage(**kw)
|
| 239 |
+
|
| 240 |
+
def __del__(self):
|
| 241 |
+
name = self.__photo.name
|
| 242 |
+
self.__photo.name = None
|
| 243 |
+
try:
|
| 244 |
+
self.__photo.tk.call("image", "delete", name)
|
| 245 |
+
except Exception:
|
| 246 |
+
pass # ignore internal errors
|
| 247 |
+
|
| 248 |
+
def width(self):
|
| 249 |
+
"""
|
| 250 |
+
Get the width of the image.
|
| 251 |
+
|
| 252 |
+
:return: The width, in pixels.
|
| 253 |
+
"""
|
| 254 |
+
return self.__size[0]
|
| 255 |
+
|
| 256 |
+
def height(self):
|
| 257 |
+
"""
|
| 258 |
+
Get the height of the image.
|
| 259 |
+
|
| 260 |
+
:return: The height, in pixels.
|
| 261 |
+
"""
|
| 262 |
+
return self.__size[1]
|
| 263 |
+
|
| 264 |
+
def __str__(self):
|
| 265 |
+
"""
|
| 266 |
+
Get the Tkinter bitmap image identifier. This method is automatically
|
| 267 |
+
called by Tkinter whenever a BitmapImage object is passed to a Tkinter
|
| 268 |
+
method.
|
| 269 |
+
|
| 270 |
+
:return: A Tkinter bitmap image identifier (a string).
|
| 271 |
+
"""
|
| 272 |
+
return str(self.__photo)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def getimage(photo):
|
| 276 |
+
"""Copies the contents of a PhotoImage to a PIL image memory."""
|
| 277 |
+
im = Image.new("RGBA", (photo.width(), photo.height()))
|
| 278 |
+
block = im.im
|
| 279 |
+
|
| 280 |
+
_pyimagingtkcall("PyImagingPhotoGet", photo, block.id)
|
| 281 |
+
|
| 282 |
+
return im
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def _show(image, title):
|
| 286 |
+
"""Helper for the Image.show method."""
|
| 287 |
+
|
| 288 |
+
class UI(tkinter.Label):
|
| 289 |
+
def __init__(self, master, im):
|
| 290 |
+
if im.mode == "1":
|
| 291 |
+
self.image = BitmapImage(im, foreground="white", master=master)
|
| 292 |
+
else:
|
| 293 |
+
self.image = PhotoImage(im, master=master)
|
| 294 |
+
super().__init__(master, image=self.image, bg="black", bd=0)
|
| 295 |
+
|
| 296 |
+
if not tkinter._default_root:
|
| 297 |
+
raise OSError("tkinter not initialized")
|
| 298 |
+
top = tkinter.Toplevel()
|
| 299 |
+
if title:
|
| 300 |
+
top.title(title)
|
| 301 |
+
UI(top, image).pack()
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/ImageTransform.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# transform wrappers
|
| 6 |
+
#
|
| 7 |
+
# History:
|
| 8 |
+
# 2002-04-08 fl Created
|
| 9 |
+
#
|
| 10 |
+
# Copyright (c) 2002 by Secret Labs AB
|
| 11 |
+
# Copyright (c) 2002 by Fredrik Lundh
|
| 12 |
+
#
|
| 13 |
+
# See the README file for information on usage and redistribution.
|
| 14 |
+
#
|
| 15 |
+
|
| 16 |
+
from . import Image
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class Transform(Image.ImageTransformHandler):
|
| 20 |
+
def __init__(self, data):
|
| 21 |
+
self.data = data
|
| 22 |
+
|
| 23 |
+
def getdata(self):
|
| 24 |
+
return self.method, self.data
|
| 25 |
+
|
| 26 |
+
def transform(self, size, image, **options):
|
| 27 |
+
# can be overridden
|
| 28 |
+
method, data = self.getdata()
|
| 29 |
+
return image.transform(size, method, data, **options)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class AffineTransform(Transform):
|
| 33 |
+
"""
|
| 34 |
+
Define an affine image transform.
|
| 35 |
+
|
| 36 |
+
This function takes a 6-tuple (a, b, c, d, e, f) which contain the first
|
| 37 |
+
two rows from an affine transform matrix. For each pixel (x, y) in the
|
| 38 |
+
output image, the new value is taken from a position (a x + b y + c,
|
| 39 |
+
d x + e y + f) in the input image, rounded to nearest pixel.
|
| 40 |
+
|
| 41 |
+
This function can be used to scale, translate, rotate, and shear the
|
| 42 |
+
original image.
|
| 43 |
+
|
| 44 |
+
See :py:meth:`~PIL.Image.Image.transform`
|
| 45 |
+
|
| 46 |
+
:param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows
|
| 47 |
+
from an affine transform matrix.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
method = Image.Transform.AFFINE
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class ExtentTransform(Transform):
|
| 54 |
+
"""
|
| 55 |
+
Define a transform to extract a subregion from an image.
|
| 56 |
+
|
| 57 |
+
Maps a rectangle (defined by two corners) from the image to a rectangle of
|
| 58 |
+
the given size. The resulting image will contain data sampled from between
|
| 59 |
+
the corners, such that (x0, y0) in the input image will end up at (0,0) in
|
| 60 |
+
the output image, and (x1, y1) at size.
|
| 61 |
+
|
| 62 |
+
This method can be used to crop, stretch, shrink, or mirror an arbitrary
|
| 63 |
+
rectangle in the current image. It is slightly slower than crop, but about
|
| 64 |
+
as fast as a corresponding resize operation.
|
| 65 |
+
|
| 66 |
+
See :py:meth:`~PIL.Image.Image.transform`
|
| 67 |
+
|
| 68 |
+
:param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the
|
| 69 |
+
input image's coordinate system. See :ref:`coordinate-system`.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
method = Image.Transform.EXTENT
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class QuadTransform(Transform):
|
| 76 |
+
"""
|
| 77 |
+
Define a quad image transform.
|
| 78 |
+
|
| 79 |
+
Maps a quadrilateral (a region defined by four corners) from the image to a
|
| 80 |
+
rectangle of the given size.
|
| 81 |
+
|
| 82 |
+
See :py:meth:`~PIL.Image.Image.transform`
|
| 83 |
+
|
| 84 |
+
:param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the
|
| 85 |
+
upper left, lower left, lower right, and upper right corner of the
|
| 86 |
+
source quadrilateral.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
method = Image.Transform.QUAD
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class MeshTransform(Transform):
|
| 93 |
+
"""
|
| 94 |
+
Define a mesh image transform. A mesh transform consists of one or more
|
| 95 |
+
individual quad transforms.
|
| 96 |
+
|
| 97 |
+
See :py:meth:`~PIL.Image.Image.transform`
|
| 98 |
+
|
| 99 |
+
:param data: A list of (bbox, quad) tuples.
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
method = Image.Transform.MESH
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/McIdasImagePlugin.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# Basic McIdas support for PIL
|
| 6 |
+
#
|
| 7 |
+
# History:
|
| 8 |
+
# 1997-05-05 fl Created (8-bit images only)
|
| 9 |
+
# 2009-03-08 fl Added 16/32-bit support.
|
| 10 |
+
#
|
| 11 |
+
# Thanks to Richard Jones and Craig Swank for specs and samples.
|
| 12 |
+
#
|
| 13 |
+
# Copyright (c) Secret Labs AB 1997.
|
| 14 |
+
# Copyright (c) Fredrik Lundh 1997.
|
| 15 |
+
#
|
| 16 |
+
# See the README file for information on usage and redistribution.
|
| 17 |
+
#
|
| 18 |
+
|
| 19 |
+
import struct
|
| 20 |
+
|
| 21 |
+
from . import Image, ImageFile
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _accept(s):
|
| 25 |
+
return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
##
|
| 29 |
+
# Image plugin for McIdas area images.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class McIdasImageFile(ImageFile.ImageFile):
|
| 33 |
+
|
| 34 |
+
format = "MCIDAS"
|
| 35 |
+
format_description = "McIdas area file"
|
| 36 |
+
|
| 37 |
+
def _open(self):
|
| 38 |
+
|
| 39 |
+
# parse area file directory
|
| 40 |
+
s = self.fp.read(256)
|
| 41 |
+
if not _accept(s) or len(s) != 256:
|
| 42 |
+
raise SyntaxError("not an McIdas area file")
|
| 43 |
+
|
| 44 |
+
self.area_descriptor_raw = s
|
| 45 |
+
self.area_descriptor = w = [0] + list(struct.unpack("!64i", s))
|
| 46 |
+
|
| 47 |
+
# get mode
|
| 48 |
+
if w[11] == 1:
|
| 49 |
+
mode = rawmode = "L"
|
| 50 |
+
elif w[11] == 2:
|
| 51 |
+
# FIXME: add memory map support
|
| 52 |
+
mode = "I"
|
| 53 |
+
rawmode = "I;16B"
|
| 54 |
+
elif w[11] == 4:
|
| 55 |
+
# FIXME: add memory map support
|
| 56 |
+
mode = "I"
|
| 57 |
+
rawmode = "I;32B"
|
| 58 |
+
else:
|
| 59 |
+
raise SyntaxError("unsupported McIdas format")
|
| 60 |
+
|
| 61 |
+
self.mode = mode
|
| 62 |
+
self._size = w[10], w[9]
|
| 63 |
+
|
| 64 |
+
offset = w[34] + w[15]
|
| 65 |
+
stride = w[15] + w[10] * w[11] * w[14]
|
| 66 |
+
|
| 67 |
+
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))]
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# --------------------------------------------------------------------
|
| 71 |
+
# registry
|
| 72 |
+
|
| 73 |
+
Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept)
|
| 74 |
+
|
| 75 |
+
# no default extension
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/PyAccess.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library
|
| 3 |
+
# Pillow fork
|
| 4 |
+
#
|
| 5 |
+
# Python implementation of the PixelAccess Object
|
| 6 |
+
#
|
| 7 |
+
# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
|
| 8 |
+
# Copyright (c) 1995-2009 by Fredrik Lundh.
|
| 9 |
+
# Copyright (c) 2013 Eric Soroos
|
| 10 |
+
#
|
| 11 |
+
# See the README file for information on usage and redistribution
|
| 12 |
+
#
|
| 13 |
+
|
| 14 |
+
# Notes:
|
| 15 |
+
#
|
| 16 |
+
# * Implements the pixel access object following Access.
|
| 17 |
+
# * Does not implement the line functions, as they don't appear to be used
|
| 18 |
+
# * Taking only the tuple form, which is used from python.
|
| 19 |
+
# * Fill.c uses the integer form, but it's still going to use the old
|
| 20 |
+
# Access.c implementation.
|
| 21 |
+
#
|
| 22 |
+
|
| 23 |
+
import logging
|
| 24 |
+
import sys
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
from cffi import FFI
|
| 28 |
+
|
| 29 |
+
defs = """
|
| 30 |
+
struct Pixel_RGBA {
|
| 31 |
+
unsigned char r,g,b,a;
|
| 32 |
+
};
|
| 33 |
+
struct Pixel_I16 {
|
| 34 |
+
unsigned char l,r;
|
| 35 |
+
};
|
| 36 |
+
"""
|
| 37 |
+
ffi = FFI()
|
| 38 |
+
ffi.cdef(defs)
|
| 39 |
+
except ImportError as ex:
|
| 40 |
+
# Allow error import for doc purposes, but error out when accessing
|
| 41 |
+
# anything in core.
|
| 42 |
+
from ._util import deferred_error
|
| 43 |
+
|
| 44 |
+
FFI = ffi = deferred_error(ex)
|
| 45 |
+
|
| 46 |
+
logger = logging.getLogger(__name__)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class PyAccess:
|
| 50 |
+
def __init__(self, img, readonly=False):
|
| 51 |
+
vals = dict(img.im.unsafe_ptrs)
|
| 52 |
+
self.readonly = readonly
|
| 53 |
+
self.image8 = ffi.cast("unsigned char **", vals["image8"])
|
| 54 |
+
self.image32 = ffi.cast("int **", vals["image32"])
|
| 55 |
+
self.image = ffi.cast("unsigned char **", vals["image"])
|
| 56 |
+
self.xsize, self.ysize = img.im.size
|
| 57 |
+
self._img = img
|
| 58 |
+
|
| 59 |
+
# Keep pointer to im object to prevent dereferencing.
|
| 60 |
+
self._im = img.im
|
| 61 |
+
if self._im.mode == "P":
|
| 62 |
+
self._palette = img.palette
|
| 63 |
+
|
| 64 |
+
# Debugging is polluting test traces, only useful here
|
| 65 |
+
# when hacking on PyAccess
|
| 66 |
+
# logger.debug("%s", vals)
|
| 67 |
+
self._post_init()
|
| 68 |
+
|
| 69 |
+
def _post_init(self):
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def __setitem__(self, xy, color):
|
| 73 |
+
"""
|
| 74 |
+
Modifies the pixel at x,y. The color is given as a single
|
| 75 |
+
numerical value for single band images, and a tuple for
|
| 76 |
+
multi-band images
|
| 77 |
+
|
| 78 |
+
:param xy: The pixel coordinate, given as (x, y). See
|
| 79 |
+
:ref:`coordinate-system`.
|
| 80 |
+
:param color: The pixel value.
|
| 81 |
+
"""
|
| 82 |
+
if self.readonly:
|
| 83 |
+
raise ValueError("Attempt to putpixel a read only image")
|
| 84 |
+
(x, y) = xy
|
| 85 |
+
if x < 0:
|
| 86 |
+
x = self.xsize + x
|
| 87 |
+
if y < 0:
|
| 88 |
+
y = self.ysize + y
|
| 89 |
+
(x, y) = self.check_xy((x, y))
|
| 90 |
+
|
| 91 |
+
if (
|
| 92 |
+
self._im.mode == "P"
|
| 93 |
+
and isinstance(color, (list, tuple))
|
| 94 |
+
and len(color) in [3, 4]
|
| 95 |
+
):
|
| 96 |
+
# RGB or RGBA value for a P image
|
| 97 |
+
color = self._palette.getcolor(color, self._img)
|
| 98 |
+
|
| 99 |
+
return self.set_pixel(x, y, color)
|
| 100 |
+
|
| 101 |
+
def __getitem__(self, xy):
|
| 102 |
+
"""
|
| 103 |
+
Returns the pixel at x,y. The pixel is returned as a single
|
| 104 |
+
value for single band images or a tuple for multiple band
|
| 105 |
+
images
|
| 106 |
+
|
| 107 |
+
:param xy: The pixel coordinate, given as (x, y). See
|
| 108 |
+
:ref:`coordinate-system`.
|
| 109 |
+
:returns: a pixel value for single band images, a tuple of
|
| 110 |
+
pixel values for multiband images.
|
| 111 |
+
"""
|
| 112 |
+
(x, y) = xy
|
| 113 |
+
if x < 0:
|
| 114 |
+
x = self.xsize + x
|
| 115 |
+
if y < 0:
|
| 116 |
+
y = self.ysize + y
|
| 117 |
+
(x, y) = self.check_xy((x, y))
|
| 118 |
+
return self.get_pixel(x, y)
|
| 119 |
+
|
| 120 |
+
putpixel = __setitem__
|
| 121 |
+
getpixel = __getitem__
|
| 122 |
+
|
| 123 |
+
def check_xy(self, xy):
|
| 124 |
+
(x, y) = xy
|
| 125 |
+
if not (0 <= x < self.xsize and 0 <= y < self.ysize):
|
| 126 |
+
raise ValueError("pixel location out of range")
|
| 127 |
+
return xy
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class _PyAccess32_2(PyAccess):
|
| 131 |
+
"""PA, LA, stored in first and last bytes of a 32 bit word"""
|
| 132 |
+
|
| 133 |
+
def _post_init(self, *args, **kwargs):
|
| 134 |
+
self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
|
| 135 |
+
|
| 136 |
+
def get_pixel(self, x, y):
|
| 137 |
+
pixel = self.pixels[y][x]
|
| 138 |
+
return (pixel.r, pixel.a)
|
| 139 |
+
|
| 140 |
+
def set_pixel(self, x, y, color):
|
| 141 |
+
pixel = self.pixels[y][x]
|
| 142 |
+
# tuple
|
| 143 |
+
pixel.r = min(color[0], 255)
|
| 144 |
+
pixel.a = min(color[1], 255)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class _PyAccess32_3(PyAccess):
|
| 148 |
+
"""RGB and friends, stored in the first three bytes of a 32 bit word"""
|
| 149 |
+
|
| 150 |
+
def _post_init(self, *args, **kwargs):
|
| 151 |
+
self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
|
| 152 |
+
|
| 153 |
+
def get_pixel(self, x, y):
|
| 154 |
+
pixel = self.pixels[y][x]
|
| 155 |
+
return (pixel.r, pixel.g, pixel.b)
|
| 156 |
+
|
| 157 |
+
def set_pixel(self, x, y, color):
|
| 158 |
+
pixel = self.pixels[y][x]
|
| 159 |
+
# tuple
|
| 160 |
+
pixel.r = min(color[0], 255)
|
| 161 |
+
pixel.g = min(color[1], 255)
|
| 162 |
+
pixel.b = min(color[2], 255)
|
| 163 |
+
pixel.a = 255
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class _PyAccess32_4(PyAccess):
|
| 167 |
+
"""RGBA etc, all 4 bytes of a 32 bit word"""
|
| 168 |
+
|
| 169 |
+
def _post_init(self, *args, **kwargs):
|
| 170 |
+
self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
|
| 171 |
+
|
| 172 |
+
def get_pixel(self, x, y):
|
| 173 |
+
pixel = self.pixels[y][x]
|
| 174 |
+
return (pixel.r, pixel.g, pixel.b, pixel.a)
|
| 175 |
+
|
| 176 |
+
def set_pixel(self, x, y, color):
|
| 177 |
+
pixel = self.pixels[y][x]
|
| 178 |
+
# tuple
|
| 179 |
+
pixel.r = min(color[0], 255)
|
| 180 |
+
pixel.g = min(color[1], 255)
|
| 181 |
+
pixel.b = min(color[2], 255)
|
| 182 |
+
pixel.a = min(color[3], 255)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class _PyAccess8(PyAccess):
|
| 186 |
+
"""1, L, P, 8 bit images stored as uint8"""
|
| 187 |
+
|
| 188 |
+
def _post_init(self, *args, **kwargs):
|
| 189 |
+
self.pixels = self.image8
|
| 190 |
+
|
| 191 |
+
def get_pixel(self, x, y):
|
| 192 |
+
return self.pixels[y][x]
|
| 193 |
+
|
| 194 |
+
def set_pixel(self, x, y, color):
|
| 195 |
+
try:
|
| 196 |
+
# integer
|
| 197 |
+
self.pixels[y][x] = min(color, 255)
|
| 198 |
+
except TypeError:
|
| 199 |
+
# tuple
|
| 200 |
+
self.pixels[y][x] = min(color[0], 255)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class _PyAccessI16_N(PyAccess):
|
| 204 |
+
"""I;16 access, native bitendian without conversion"""
|
| 205 |
+
|
| 206 |
+
def _post_init(self, *args, **kwargs):
|
| 207 |
+
self.pixels = ffi.cast("unsigned short **", self.image)
|
| 208 |
+
|
| 209 |
+
def get_pixel(self, x, y):
|
| 210 |
+
return self.pixels[y][x]
|
| 211 |
+
|
| 212 |
+
def set_pixel(self, x, y, color):
|
| 213 |
+
try:
|
| 214 |
+
# integer
|
| 215 |
+
self.pixels[y][x] = min(color, 65535)
|
| 216 |
+
except TypeError:
|
| 217 |
+
# tuple
|
| 218 |
+
self.pixels[y][x] = min(color[0], 65535)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class _PyAccessI16_L(PyAccess):
|
| 222 |
+
"""I;16L access, with conversion"""
|
| 223 |
+
|
| 224 |
+
def _post_init(self, *args, **kwargs):
|
| 225 |
+
self.pixels = ffi.cast("struct Pixel_I16 **", self.image)
|
| 226 |
+
|
| 227 |
+
def get_pixel(self, x, y):
|
| 228 |
+
pixel = self.pixels[y][x]
|
| 229 |
+
return pixel.l + pixel.r * 256
|
| 230 |
+
|
| 231 |
+
def set_pixel(self, x, y, color):
|
| 232 |
+
pixel = self.pixels[y][x]
|
| 233 |
+
try:
|
| 234 |
+
color = min(color, 65535)
|
| 235 |
+
except TypeError:
|
| 236 |
+
color = min(color[0], 65535)
|
| 237 |
+
|
| 238 |
+
pixel.l = color & 0xFF # noqa: E741
|
| 239 |
+
pixel.r = color >> 8
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class _PyAccessI16_B(PyAccess):
|
| 243 |
+
"""I;16B access, with conversion"""
|
| 244 |
+
|
| 245 |
+
def _post_init(self, *args, **kwargs):
|
| 246 |
+
self.pixels = ffi.cast("struct Pixel_I16 **", self.image)
|
| 247 |
+
|
| 248 |
+
def get_pixel(self, x, y):
|
| 249 |
+
pixel = self.pixels[y][x]
|
| 250 |
+
return pixel.l * 256 + pixel.r
|
| 251 |
+
|
| 252 |
+
def set_pixel(self, x, y, color):
|
| 253 |
+
pixel = self.pixels[y][x]
|
| 254 |
+
try:
|
| 255 |
+
color = min(color, 65535)
|
| 256 |
+
except Exception:
|
| 257 |
+
color = min(color[0], 65535)
|
| 258 |
+
|
| 259 |
+
pixel.l = color >> 8 # noqa: E741
|
| 260 |
+
pixel.r = color & 0xFF
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class _PyAccessI32_N(PyAccess):
|
| 264 |
+
"""Signed Int32 access, native endian"""
|
| 265 |
+
|
| 266 |
+
def _post_init(self, *args, **kwargs):
|
| 267 |
+
self.pixels = self.image32
|
| 268 |
+
|
| 269 |
+
def get_pixel(self, x, y):
|
| 270 |
+
return self.pixels[y][x]
|
| 271 |
+
|
| 272 |
+
def set_pixel(self, x, y, color):
|
| 273 |
+
self.pixels[y][x] = color
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
class _PyAccessI32_Swap(PyAccess):
|
| 277 |
+
"""I;32L/B access, with byteswapping conversion"""
|
| 278 |
+
|
| 279 |
+
def _post_init(self, *args, **kwargs):
|
| 280 |
+
self.pixels = self.image32
|
| 281 |
+
|
| 282 |
+
def reverse(self, i):
|
| 283 |
+
orig = ffi.new("int *", i)
|
| 284 |
+
chars = ffi.cast("unsigned char *", orig)
|
| 285 |
+
chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], chars[1], chars[0]
|
| 286 |
+
return ffi.cast("int *", chars)[0]
|
| 287 |
+
|
| 288 |
+
def get_pixel(self, x, y):
|
| 289 |
+
return self.reverse(self.pixels[y][x])
|
| 290 |
+
|
| 291 |
+
def set_pixel(self, x, y, color):
|
| 292 |
+
self.pixels[y][x] = self.reverse(color)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class _PyAccessF(PyAccess):
|
| 296 |
+
"""32 bit float access"""
|
| 297 |
+
|
| 298 |
+
def _post_init(self, *args, **kwargs):
|
| 299 |
+
self.pixels = ffi.cast("float **", self.image32)
|
| 300 |
+
|
| 301 |
+
def get_pixel(self, x, y):
|
| 302 |
+
return self.pixels[y][x]
|
| 303 |
+
|
| 304 |
+
def set_pixel(self, x, y, color):
|
| 305 |
+
try:
|
| 306 |
+
# not a tuple
|
| 307 |
+
self.pixels[y][x] = color
|
| 308 |
+
except TypeError:
|
| 309 |
+
# tuple
|
| 310 |
+
self.pixels[y][x] = color[0]
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
mode_map = {
|
| 314 |
+
"1": _PyAccess8,
|
| 315 |
+
"L": _PyAccess8,
|
| 316 |
+
"P": _PyAccess8,
|
| 317 |
+
"LA": _PyAccess32_2,
|
| 318 |
+
"La": _PyAccess32_2,
|
| 319 |
+
"PA": _PyAccess32_2,
|
| 320 |
+
"RGB": _PyAccess32_3,
|
| 321 |
+
"LAB": _PyAccess32_3,
|
| 322 |
+
"HSV": _PyAccess32_3,
|
| 323 |
+
"YCbCr": _PyAccess32_3,
|
| 324 |
+
"RGBA": _PyAccess32_4,
|
| 325 |
+
"RGBa": _PyAccess32_4,
|
| 326 |
+
"RGBX": _PyAccess32_4,
|
| 327 |
+
"CMYK": _PyAccess32_4,
|
| 328 |
+
"F": _PyAccessF,
|
| 329 |
+
"I": _PyAccessI32_N,
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
if sys.byteorder == "little":
|
| 333 |
+
mode_map["I;16"] = _PyAccessI16_N
|
| 334 |
+
mode_map["I;16L"] = _PyAccessI16_N
|
| 335 |
+
mode_map["I;16B"] = _PyAccessI16_B
|
| 336 |
+
|
| 337 |
+
mode_map["I;32L"] = _PyAccessI32_N
|
| 338 |
+
mode_map["I;32B"] = _PyAccessI32_Swap
|
| 339 |
+
else:
|
| 340 |
+
mode_map["I;16"] = _PyAccessI16_L
|
| 341 |
+
mode_map["I;16L"] = _PyAccessI16_L
|
| 342 |
+
mode_map["I;16B"] = _PyAccessI16_N
|
| 343 |
+
|
| 344 |
+
mode_map["I;32L"] = _PyAccessI32_Swap
|
| 345 |
+
mode_map["I;32B"] = _PyAccessI32_N
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def new(img, readonly=False):
|
| 349 |
+
access_type = mode_map.get(img.mode, None)
|
| 350 |
+
if not access_type:
|
| 351 |
+
logger.debug("PyAccess Not Implemented: %s", img.mode)
|
| 352 |
+
return None
|
| 353 |
+
return access_type(img, readonly)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/SgiImagePlugin.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library.
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# SGI image file handling
|
| 6 |
+
#
|
| 7 |
+
# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli.
|
| 8 |
+
# <ftp://ftp.sgi.com/graphics/SGIIMAGESPEC>
|
| 9 |
+
#
|
| 10 |
+
#
|
| 11 |
+
# History:
|
| 12 |
+
# 2017-22-07 mb Add RLE decompression
|
| 13 |
+
# 2016-16-10 mb Add save method without compression
|
| 14 |
+
# 1995-09-10 fl Created
|
| 15 |
+
#
|
| 16 |
+
# Copyright (c) 2016 by Mickael Bonfill.
|
| 17 |
+
# Copyright (c) 2008 by Karsten Hiddemann.
|
| 18 |
+
# Copyright (c) 1997 by Secret Labs AB.
|
| 19 |
+
# Copyright (c) 1995 by Fredrik Lundh.
|
| 20 |
+
#
|
| 21 |
+
# See the README file for information on usage and redistribution.
|
| 22 |
+
#
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import os
|
| 26 |
+
import struct
|
| 27 |
+
|
| 28 |
+
from . import Image, ImageFile
|
| 29 |
+
from ._binary import i16be as i16
|
| 30 |
+
from ._binary import o8
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _accept(prefix):
|
| 34 |
+
return len(prefix) >= 2 and i16(prefix) == 474
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
MODES = {
|
| 38 |
+
(1, 1, 1): "L",
|
| 39 |
+
(1, 2, 1): "L",
|
| 40 |
+
(2, 1, 1): "L;16B",
|
| 41 |
+
(2, 2, 1): "L;16B",
|
| 42 |
+
(1, 3, 3): "RGB",
|
| 43 |
+
(2, 3, 3): "RGB;16B",
|
| 44 |
+
(1, 3, 4): "RGBA",
|
| 45 |
+
(2, 3, 4): "RGBA;16B",
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
##
|
| 50 |
+
# Image plugin for SGI images.
|
| 51 |
+
class SgiImageFile(ImageFile.ImageFile):
|
| 52 |
+
|
| 53 |
+
format = "SGI"
|
| 54 |
+
format_description = "SGI Image File Format"
|
| 55 |
+
|
| 56 |
+
def _open(self):
|
| 57 |
+
|
| 58 |
+
# HEAD
|
| 59 |
+
headlen = 512
|
| 60 |
+
s = self.fp.read(headlen)
|
| 61 |
+
|
| 62 |
+
if not _accept(s):
|
| 63 |
+
raise ValueError("Not an SGI image file")
|
| 64 |
+
|
| 65 |
+
# compression : verbatim or RLE
|
| 66 |
+
compression = s[2]
|
| 67 |
+
|
| 68 |
+
# bpc : 1 or 2 bytes (8bits or 16bits)
|
| 69 |
+
bpc = s[3]
|
| 70 |
+
|
| 71 |
+
# dimension : 1, 2 or 3 (depending on xsize, ysize and zsize)
|
| 72 |
+
dimension = i16(s, 4)
|
| 73 |
+
|
| 74 |
+
# xsize : width
|
| 75 |
+
xsize = i16(s, 6)
|
| 76 |
+
|
| 77 |
+
# ysize : height
|
| 78 |
+
ysize = i16(s, 8)
|
| 79 |
+
|
| 80 |
+
# zsize : channels count
|
| 81 |
+
zsize = i16(s, 10)
|
| 82 |
+
|
| 83 |
+
# layout
|
| 84 |
+
layout = bpc, dimension, zsize
|
| 85 |
+
|
| 86 |
+
# determine mode from bits/zsize
|
| 87 |
+
rawmode = ""
|
| 88 |
+
try:
|
| 89 |
+
rawmode = MODES[layout]
|
| 90 |
+
except KeyError:
|
| 91 |
+
pass
|
| 92 |
+
|
| 93 |
+
if rawmode == "":
|
| 94 |
+
raise ValueError("Unsupported SGI image mode")
|
| 95 |
+
|
| 96 |
+
self._size = xsize, ysize
|
| 97 |
+
self.mode = rawmode.split(";")[0]
|
| 98 |
+
if self.mode == "RGB":
|
| 99 |
+
self.custom_mimetype = "image/rgb"
|
| 100 |
+
|
| 101 |
+
# orientation -1 : scanlines begins at the bottom-left corner
|
| 102 |
+
orientation = -1
|
| 103 |
+
|
| 104 |
+
# decoder info
|
| 105 |
+
if compression == 0:
|
| 106 |
+
pagesize = xsize * ysize * bpc
|
| 107 |
+
if bpc == 2:
|
| 108 |
+
self.tile = [
|
| 109 |
+
("SGI16", (0, 0) + self.size, headlen, (self.mode, 0, orientation))
|
| 110 |
+
]
|
| 111 |
+
else:
|
| 112 |
+
self.tile = []
|
| 113 |
+
offset = headlen
|
| 114 |
+
for layer in self.mode:
|
| 115 |
+
self.tile.append(
|
| 116 |
+
("raw", (0, 0) + self.size, offset, (layer, 0, orientation))
|
| 117 |
+
)
|
| 118 |
+
offset += pagesize
|
| 119 |
+
elif compression == 1:
|
| 120 |
+
self.tile = [
|
| 121 |
+
("sgi_rle", (0, 0) + self.size, headlen, (rawmode, orientation, bpc))
|
| 122 |
+
]
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _save(im, fp, filename):
|
| 126 |
+
if im.mode != "RGB" and im.mode != "RGBA" and im.mode != "L":
|
| 127 |
+
raise ValueError("Unsupported SGI image mode")
|
| 128 |
+
|
| 129 |
+
# Get the keyword arguments
|
| 130 |
+
info = im.encoderinfo
|
| 131 |
+
|
| 132 |
+
# Byte-per-pixel precision, 1 = 8bits per pixel
|
| 133 |
+
bpc = info.get("bpc", 1)
|
| 134 |
+
|
| 135 |
+
if bpc not in (1, 2):
|
| 136 |
+
raise ValueError("Unsupported number of bytes per pixel")
|
| 137 |
+
|
| 138 |
+
# Flip the image, since the origin of SGI file is the bottom-left corner
|
| 139 |
+
orientation = -1
|
| 140 |
+
# Define the file as SGI File Format
|
| 141 |
+
magicNumber = 474
|
| 142 |
+
# Run-Length Encoding Compression - Unsupported at this time
|
| 143 |
+
rle = 0
|
| 144 |
+
|
| 145 |
+
# Number of dimensions (x,y,z)
|
| 146 |
+
dim = 3
|
| 147 |
+
# X Dimension = width / Y Dimension = height
|
| 148 |
+
x, y = im.size
|
| 149 |
+
if im.mode == "L" and y == 1:
|
| 150 |
+
dim = 1
|
| 151 |
+
elif im.mode == "L":
|
| 152 |
+
dim = 2
|
| 153 |
+
# Z Dimension: Number of channels
|
| 154 |
+
z = len(im.mode)
|
| 155 |
+
|
| 156 |
+
if dim == 1 or dim == 2:
|
| 157 |
+
z = 1
|
| 158 |
+
|
| 159 |
+
# assert we've got the right number of bands.
|
| 160 |
+
if len(im.getbands()) != z:
|
| 161 |
+
raise ValueError(
|
| 162 |
+
f"incorrect number of bands in SGI write: {z} vs {len(im.getbands())}"
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
# Minimum Byte value
|
| 166 |
+
pinmin = 0
|
| 167 |
+
# Maximum Byte value (255 = 8bits per pixel)
|
| 168 |
+
pinmax = 255
|
| 169 |
+
# Image name (79 characters max, truncated below in write)
|
| 170 |
+
imgName = os.path.splitext(os.path.basename(filename))[0]
|
| 171 |
+
imgName = imgName.encode("ascii", "ignore")
|
| 172 |
+
# Standard representation of pixel in the file
|
| 173 |
+
colormap = 0
|
| 174 |
+
fp.write(struct.pack(">h", magicNumber))
|
| 175 |
+
fp.write(o8(rle))
|
| 176 |
+
fp.write(o8(bpc))
|
| 177 |
+
fp.write(struct.pack(">H", dim))
|
| 178 |
+
fp.write(struct.pack(">H", x))
|
| 179 |
+
fp.write(struct.pack(">H", y))
|
| 180 |
+
fp.write(struct.pack(">H", z))
|
| 181 |
+
fp.write(struct.pack(">l", pinmin))
|
| 182 |
+
fp.write(struct.pack(">l", pinmax))
|
| 183 |
+
fp.write(struct.pack("4s", b"")) # dummy
|
| 184 |
+
fp.write(struct.pack("79s", imgName)) # truncates to 79 chars
|
| 185 |
+
fp.write(struct.pack("s", b"")) # force null byte after imgname
|
| 186 |
+
fp.write(struct.pack(">l", colormap))
|
| 187 |
+
fp.write(struct.pack("404s", b"")) # dummy
|
| 188 |
+
|
| 189 |
+
rawmode = "L"
|
| 190 |
+
if bpc == 2:
|
| 191 |
+
rawmode = "L;16B"
|
| 192 |
+
|
| 193 |
+
for channel in im.split():
|
| 194 |
+
fp.write(channel.tobytes("raw", rawmode, 0, orientation))
|
| 195 |
+
|
| 196 |
+
if hasattr(fp, "flush"):
|
| 197 |
+
fp.flush()
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class SGI16Decoder(ImageFile.PyDecoder):
|
| 201 |
+
_pulls_fd = True
|
| 202 |
+
|
| 203 |
+
def decode(self, buffer):
|
| 204 |
+
rawmode, stride, orientation = self.args
|
| 205 |
+
pagesize = self.state.xsize * self.state.ysize
|
| 206 |
+
zsize = len(self.mode)
|
| 207 |
+
self.fd.seek(512)
|
| 208 |
+
|
| 209 |
+
for band in range(zsize):
|
| 210 |
+
channel = Image.new("L", (self.state.xsize, self.state.ysize))
|
| 211 |
+
channel.frombytes(
|
| 212 |
+
self.fd.read(2 * pagesize), "raw", "L;16B", stride, orientation
|
| 213 |
+
)
|
| 214 |
+
self.im.putband(channel.im, band)
|
| 215 |
+
|
| 216 |
+
return -1, 0
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
#
|
| 220 |
+
# registry
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
Image.register_decoder("SGI16", SGI16Decoder)
|
| 224 |
+
Image.register_open(SgiImageFile.format, SgiImageFile, _accept)
|
| 225 |
+
Image.register_save(SgiImageFile.format, _save)
|
| 226 |
+
Image.register_mime(SgiImageFile.format, "image/sgi")
|
| 227 |
+
|
| 228 |
+
Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"])
|
| 229 |
+
|
| 230 |
+
# End of file
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/WebPImagePlugin.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from io import BytesIO
|
| 2 |
+
|
| 3 |
+
from . import Image, ImageFile
|
| 4 |
+
|
| 5 |
+
try:
|
| 6 |
+
from . import _webp
|
| 7 |
+
|
| 8 |
+
SUPPORTED = True
|
| 9 |
+
except ImportError:
|
| 10 |
+
SUPPORTED = False
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
_VALID_WEBP_MODES = {"RGBX": True, "RGBA": True, "RGB": True}
|
| 14 |
+
|
| 15 |
+
_VALID_WEBP_LEGACY_MODES = {"RGB": True, "RGBA": True}
|
| 16 |
+
|
| 17 |
+
_VP8_MODES_BY_IDENTIFIER = {
|
| 18 |
+
b"VP8 ": "RGB",
|
| 19 |
+
b"VP8X": "RGBA",
|
| 20 |
+
b"VP8L": "RGBA", # lossless
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _accept(prefix):
|
| 25 |
+
is_riff_file_format = prefix[:4] == b"RIFF"
|
| 26 |
+
is_webp_file = prefix[8:12] == b"WEBP"
|
| 27 |
+
is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER
|
| 28 |
+
|
| 29 |
+
if is_riff_file_format and is_webp_file and is_valid_vp8_mode:
|
| 30 |
+
if not SUPPORTED:
|
| 31 |
+
return (
|
| 32 |
+
"image file could not be identified because WEBP support not installed"
|
| 33 |
+
)
|
| 34 |
+
return True
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class WebPImageFile(ImageFile.ImageFile):
|
| 38 |
+
|
| 39 |
+
format = "WEBP"
|
| 40 |
+
format_description = "WebP image"
|
| 41 |
+
__loaded = 0
|
| 42 |
+
__logical_frame = 0
|
| 43 |
+
|
| 44 |
+
def _open(self):
|
| 45 |
+
if not _webp.HAVE_WEBPANIM:
|
| 46 |
+
# Legacy mode
|
| 47 |
+
data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode(
|
| 48 |
+
self.fp.read()
|
| 49 |
+
)
|
| 50 |
+
if icc_profile:
|
| 51 |
+
self.info["icc_profile"] = icc_profile
|
| 52 |
+
if exif:
|
| 53 |
+
self.info["exif"] = exif
|
| 54 |
+
self._size = width, height
|
| 55 |
+
self.fp = BytesIO(data)
|
| 56 |
+
self.tile = [("raw", (0, 0) + self.size, 0, self.mode)]
|
| 57 |
+
self.n_frames = 1
|
| 58 |
+
self.is_animated = False
|
| 59 |
+
return
|
| 60 |
+
|
| 61 |
+
# Use the newer AnimDecoder API to parse the (possibly) animated file,
|
| 62 |
+
# and access muxed chunks like ICC/EXIF/XMP.
|
| 63 |
+
self._decoder = _webp.WebPAnimDecoder(self.fp.read())
|
| 64 |
+
|
| 65 |
+
# Get info from decoder
|
| 66 |
+
width, height, loop_count, bgcolor, frame_count, mode = self._decoder.get_info()
|
| 67 |
+
self._size = width, height
|
| 68 |
+
self.info["loop"] = loop_count
|
| 69 |
+
bg_a, bg_r, bg_g, bg_b = (
|
| 70 |
+
(bgcolor >> 24) & 0xFF,
|
| 71 |
+
(bgcolor >> 16) & 0xFF,
|
| 72 |
+
(bgcolor >> 8) & 0xFF,
|
| 73 |
+
bgcolor & 0xFF,
|
| 74 |
+
)
|
| 75 |
+
self.info["background"] = (bg_r, bg_g, bg_b, bg_a)
|
| 76 |
+
self.n_frames = frame_count
|
| 77 |
+
self.is_animated = self.n_frames > 1
|
| 78 |
+
self.mode = "RGB" if mode == "RGBX" else mode
|
| 79 |
+
self.rawmode = mode
|
| 80 |
+
self.tile = []
|
| 81 |
+
|
| 82 |
+
# Attempt to read ICC / EXIF / XMP chunks from file
|
| 83 |
+
icc_profile = self._decoder.get_chunk("ICCP")
|
| 84 |
+
exif = self._decoder.get_chunk("EXIF")
|
| 85 |
+
xmp = self._decoder.get_chunk("XMP ")
|
| 86 |
+
if icc_profile:
|
| 87 |
+
self.info["icc_profile"] = icc_profile
|
| 88 |
+
if exif:
|
| 89 |
+
self.info["exif"] = exif
|
| 90 |
+
if xmp:
|
| 91 |
+
self.info["xmp"] = xmp
|
| 92 |
+
|
| 93 |
+
# Initialize seek state
|
| 94 |
+
self._reset(reset=False)
|
| 95 |
+
|
| 96 |
+
def _getexif(self):
|
| 97 |
+
if "exif" not in self.info:
|
| 98 |
+
return None
|
| 99 |
+
return self.getexif()._get_merged_dict()
|
| 100 |
+
|
| 101 |
+
def seek(self, frame):
|
| 102 |
+
if not self._seek_check(frame):
|
| 103 |
+
return
|
| 104 |
+
|
| 105 |
+
# Set logical frame to requested position
|
| 106 |
+
self.__logical_frame = frame
|
| 107 |
+
|
| 108 |
+
def _reset(self, reset=True):
|
| 109 |
+
if reset:
|
| 110 |
+
self._decoder.reset()
|
| 111 |
+
self.__physical_frame = 0
|
| 112 |
+
self.__loaded = -1
|
| 113 |
+
self.__timestamp = 0
|
| 114 |
+
|
| 115 |
+
def _get_next(self):
|
| 116 |
+
# Get next frame
|
| 117 |
+
ret = self._decoder.get_next()
|
| 118 |
+
self.__physical_frame += 1
|
| 119 |
+
|
| 120 |
+
# Check if an error occurred
|
| 121 |
+
if ret is None:
|
| 122 |
+
self._reset() # Reset just to be safe
|
| 123 |
+
self.seek(0)
|
| 124 |
+
raise EOFError("failed to decode next frame in WebP file")
|
| 125 |
+
|
| 126 |
+
# Compute duration
|
| 127 |
+
data, timestamp = ret
|
| 128 |
+
duration = timestamp - self.__timestamp
|
| 129 |
+
self.__timestamp = timestamp
|
| 130 |
+
|
| 131 |
+
# libwebp gives frame end, adjust to start of frame
|
| 132 |
+
timestamp -= duration
|
| 133 |
+
return data, timestamp, duration
|
| 134 |
+
|
| 135 |
+
def _seek(self, frame):
|
| 136 |
+
if self.__physical_frame == frame:
|
| 137 |
+
return # Nothing to do
|
| 138 |
+
if frame < self.__physical_frame:
|
| 139 |
+
self._reset() # Rewind to beginning
|
| 140 |
+
while self.__physical_frame < frame:
|
| 141 |
+
self._get_next() # Advance to the requested frame
|
| 142 |
+
|
| 143 |
+
def load(self):
|
| 144 |
+
if _webp.HAVE_WEBPANIM:
|
| 145 |
+
if self.__loaded != self.__logical_frame:
|
| 146 |
+
self._seek(self.__logical_frame)
|
| 147 |
+
|
| 148 |
+
# We need to load the image data for this frame
|
| 149 |
+
data, timestamp, duration = self._get_next()
|
| 150 |
+
self.info["timestamp"] = timestamp
|
| 151 |
+
self.info["duration"] = duration
|
| 152 |
+
self.__loaded = self.__logical_frame
|
| 153 |
+
|
| 154 |
+
# Set tile
|
| 155 |
+
if self.fp and self._exclusive_fp:
|
| 156 |
+
self.fp.close()
|
| 157 |
+
self.fp = BytesIO(data)
|
| 158 |
+
self.tile = [("raw", (0, 0) + self.size, 0, self.rawmode)]
|
| 159 |
+
|
| 160 |
+
return super().load()
|
| 161 |
+
|
| 162 |
+
def tell(self):
|
| 163 |
+
if not _webp.HAVE_WEBPANIM:
|
| 164 |
+
return super().tell()
|
| 165 |
+
|
| 166 |
+
return self.__logical_frame
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def _save_all(im, fp, filename):
|
| 170 |
+
encoderinfo = im.encoderinfo.copy()
|
| 171 |
+
append_images = list(encoderinfo.get("append_images", []))
|
| 172 |
+
|
| 173 |
+
# If total frame count is 1, then save using the legacy API, which
|
| 174 |
+
# will preserve non-alpha modes
|
| 175 |
+
total = 0
|
| 176 |
+
for ims in [im] + append_images:
|
| 177 |
+
total += getattr(ims, "n_frames", 1)
|
| 178 |
+
if total == 1:
|
| 179 |
+
_save(im, fp, filename)
|
| 180 |
+
return
|
| 181 |
+
|
| 182 |
+
background = (0, 0, 0, 0)
|
| 183 |
+
if "background" in encoderinfo:
|
| 184 |
+
background = encoderinfo["background"]
|
| 185 |
+
elif "background" in im.info:
|
| 186 |
+
background = im.info["background"]
|
| 187 |
+
if isinstance(background, int):
|
| 188 |
+
# GifImagePlugin stores a global color table index in
|
| 189 |
+
# info["background"]. So it must be converted to an RGBA value
|
| 190 |
+
palette = im.getpalette()
|
| 191 |
+
if palette:
|
| 192 |
+
r, g, b = palette[background * 3 : (background + 1) * 3]
|
| 193 |
+
background = (r, g, b, 255)
|
| 194 |
+
else:
|
| 195 |
+
background = (background, background, background, 255)
|
| 196 |
+
|
| 197 |
+
duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
|
| 198 |
+
loop = im.encoderinfo.get("loop", 0)
|
| 199 |
+
minimize_size = im.encoderinfo.get("minimize_size", False)
|
| 200 |
+
kmin = im.encoderinfo.get("kmin", None)
|
| 201 |
+
kmax = im.encoderinfo.get("kmax", None)
|
| 202 |
+
allow_mixed = im.encoderinfo.get("allow_mixed", False)
|
| 203 |
+
verbose = False
|
| 204 |
+
lossless = im.encoderinfo.get("lossless", False)
|
| 205 |
+
quality = im.encoderinfo.get("quality", 80)
|
| 206 |
+
method = im.encoderinfo.get("method", 0)
|
| 207 |
+
icc_profile = im.encoderinfo.get("icc_profile") or ""
|
| 208 |
+
exif = im.encoderinfo.get("exif", "")
|
| 209 |
+
if isinstance(exif, Image.Exif):
|
| 210 |
+
exif = exif.tobytes()
|
| 211 |
+
xmp = im.encoderinfo.get("xmp", "")
|
| 212 |
+
if allow_mixed:
|
| 213 |
+
lossless = False
|
| 214 |
+
|
| 215 |
+
# Sensible keyframe defaults are from gif2webp.c script
|
| 216 |
+
if kmin is None:
|
| 217 |
+
kmin = 9 if lossless else 3
|
| 218 |
+
if kmax is None:
|
| 219 |
+
kmax = 17 if lossless else 5
|
| 220 |
+
|
| 221 |
+
# Validate background color
|
| 222 |
+
if (
|
| 223 |
+
not isinstance(background, (list, tuple))
|
| 224 |
+
or len(background) != 4
|
| 225 |
+
or not all(v >= 0 and v < 256 for v in background)
|
| 226 |
+
):
|
| 227 |
+
raise OSError(
|
| 228 |
+
"Background color is not an RGBA tuple clamped to (0-255): %s"
|
| 229 |
+
% str(background)
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# Convert to packed uint
|
| 233 |
+
bg_r, bg_g, bg_b, bg_a = background
|
| 234 |
+
background = (bg_a << 24) | (bg_r << 16) | (bg_g << 8) | (bg_b << 0)
|
| 235 |
+
|
| 236 |
+
# Setup the WebP animation encoder
|
| 237 |
+
enc = _webp.WebPAnimEncoder(
|
| 238 |
+
im.size[0],
|
| 239 |
+
im.size[1],
|
| 240 |
+
background,
|
| 241 |
+
loop,
|
| 242 |
+
minimize_size,
|
| 243 |
+
kmin,
|
| 244 |
+
kmax,
|
| 245 |
+
allow_mixed,
|
| 246 |
+
verbose,
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
# Add each frame
|
| 250 |
+
frame_idx = 0
|
| 251 |
+
timestamp = 0
|
| 252 |
+
cur_idx = im.tell()
|
| 253 |
+
try:
|
| 254 |
+
for ims in [im] + append_images:
|
| 255 |
+
# Get # of frames in this image
|
| 256 |
+
nfr = getattr(ims, "n_frames", 1)
|
| 257 |
+
|
| 258 |
+
for idx in range(nfr):
|
| 259 |
+
ims.seek(idx)
|
| 260 |
+
ims.load()
|
| 261 |
+
|
| 262 |
+
# Make sure image mode is supported
|
| 263 |
+
frame = ims
|
| 264 |
+
rawmode = ims.mode
|
| 265 |
+
if ims.mode not in _VALID_WEBP_MODES:
|
| 266 |
+
alpha = (
|
| 267 |
+
"A" in ims.mode
|
| 268 |
+
or "a" in ims.mode
|
| 269 |
+
or (ims.mode == "P" and "A" in ims.im.getpalettemode())
|
| 270 |
+
)
|
| 271 |
+
rawmode = "RGBA" if alpha else "RGB"
|
| 272 |
+
frame = ims.convert(rawmode)
|
| 273 |
+
|
| 274 |
+
if rawmode == "RGB":
|
| 275 |
+
# For faster conversion, use RGBX
|
| 276 |
+
rawmode = "RGBX"
|
| 277 |
+
|
| 278 |
+
# Append the frame to the animation encoder
|
| 279 |
+
enc.add(
|
| 280 |
+
frame.tobytes("raw", rawmode),
|
| 281 |
+
timestamp,
|
| 282 |
+
frame.size[0],
|
| 283 |
+
frame.size[1],
|
| 284 |
+
rawmode,
|
| 285 |
+
lossless,
|
| 286 |
+
quality,
|
| 287 |
+
method,
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
# Update timestamp and frame index
|
| 291 |
+
if isinstance(duration, (list, tuple)):
|
| 292 |
+
timestamp += duration[frame_idx]
|
| 293 |
+
else:
|
| 294 |
+
timestamp += duration
|
| 295 |
+
frame_idx += 1
|
| 296 |
+
|
| 297 |
+
finally:
|
| 298 |
+
im.seek(cur_idx)
|
| 299 |
+
|
| 300 |
+
# Force encoder to flush frames
|
| 301 |
+
enc.add(None, timestamp, 0, 0, "", lossless, quality, 0)
|
| 302 |
+
|
| 303 |
+
# Get the final output from the encoder
|
| 304 |
+
data = enc.assemble(icc_profile, exif, xmp)
|
| 305 |
+
if data is None:
|
| 306 |
+
raise OSError("cannot write file as WebP (encoder returned None)")
|
| 307 |
+
|
| 308 |
+
fp.write(data)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def _save(im, fp, filename):
|
| 312 |
+
lossless = im.encoderinfo.get("lossless", False)
|
| 313 |
+
quality = im.encoderinfo.get("quality", 80)
|
| 314 |
+
icc_profile = im.encoderinfo.get("icc_profile") or ""
|
| 315 |
+
exif = im.encoderinfo.get("exif", "")
|
| 316 |
+
if isinstance(exif, Image.Exif):
|
| 317 |
+
exif = exif.tobytes()
|
| 318 |
+
xmp = im.encoderinfo.get("xmp", "")
|
| 319 |
+
method = im.encoderinfo.get("method", 4)
|
| 320 |
+
|
| 321 |
+
if im.mode not in _VALID_WEBP_LEGACY_MODES:
|
| 322 |
+
alpha = (
|
| 323 |
+
"A" in im.mode
|
| 324 |
+
or "a" in im.mode
|
| 325 |
+
or (im.mode == "P" and "transparency" in im.info)
|
| 326 |
+
)
|
| 327 |
+
im = im.convert("RGBA" if alpha else "RGB")
|
| 328 |
+
|
| 329 |
+
data = _webp.WebPEncode(
|
| 330 |
+
im.tobytes(),
|
| 331 |
+
im.size[0],
|
| 332 |
+
im.size[1],
|
| 333 |
+
lossless,
|
| 334 |
+
float(quality),
|
| 335 |
+
im.mode,
|
| 336 |
+
icc_profile,
|
| 337 |
+
method,
|
| 338 |
+
exif,
|
| 339 |
+
xmp,
|
| 340 |
+
)
|
| 341 |
+
if data is None:
|
| 342 |
+
raise OSError("cannot write file as WebP (encoder returned None)")
|
| 343 |
+
|
| 344 |
+
fp.write(data)
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
Image.register_open(WebPImageFile.format, WebPImageFile, _accept)
|
| 348 |
+
if SUPPORTED:
|
| 349 |
+
Image.register_save(WebPImageFile.format, _save)
|
| 350 |
+
if _webp.HAVE_WEBPANIM:
|
| 351 |
+
Image.register_save_all(WebPImageFile.format, _save_all)
|
| 352 |
+
Image.register_extension(WebPImageFile.format, ".webp")
|
| 353 |
+
Image.register_mime(WebPImageFile.format, "image/webp")
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/WmfImagePlugin.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# The Python Imaging Library
|
| 3 |
+
# $Id$
|
| 4 |
+
#
|
| 5 |
+
# WMF stub codec
|
| 6 |
+
#
|
| 7 |
+
# history:
|
| 8 |
+
# 1996-12-14 fl Created
|
| 9 |
+
# 2004-02-22 fl Turned into a stub driver
|
| 10 |
+
# 2004-02-23 fl Added EMF support
|
| 11 |
+
#
|
| 12 |
+
# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
|
| 13 |
+
# Copyright (c) Fredrik Lundh 1996.
|
| 14 |
+
#
|
| 15 |
+
# See the README file for information on usage and redistribution.
|
| 16 |
+
#
|
| 17 |
+
# WMF/EMF reference documentation:
|
| 18 |
+
# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf
|
| 19 |
+
# http://wvware.sourceforge.net/caolan/index.html
|
| 20 |
+
# http://wvware.sourceforge.net/caolan/ora-wmf.html
|
| 21 |
+
|
| 22 |
+
from . import Image, ImageFile
|
| 23 |
+
from ._binary import i16le as word
|
| 24 |
+
from ._binary import si16le as short
|
| 25 |
+
from ._binary import si32le as _long
|
| 26 |
+
|
| 27 |
+
_handler = None
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def register_handler(handler):
|
| 31 |
+
"""
|
| 32 |
+
Install application-specific WMF image handler.
|
| 33 |
+
|
| 34 |
+
:param handler: Handler object.
|
| 35 |
+
"""
|
| 36 |
+
global _handler
|
| 37 |
+
_handler = handler
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
if hasattr(Image.core, "drawwmf"):
|
| 41 |
+
# install default handler (windows only)
|
| 42 |
+
|
| 43 |
+
class WmfHandler:
|
| 44 |
+
def open(self, im):
|
| 45 |
+
im.mode = "RGB"
|
| 46 |
+
self.bbox = im.info["wmf_bbox"]
|
| 47 |
+
|
| 48 |
+
def load(self, im):
|
| 49 |
+
im.fp.seek(0) # rewind
|
| 50 |
+
return Image.frombytes(
|
| 51 |
+
"RGB",
|
| 52 |
+
im.size,
|
| 53 |
+
Image.core.drawwmf(im.fp.read(), im.size, self.bbox),
|
| 54 |
+
"raw",
|
| 55 |
+
"BGR",
|
| 56 |
+
(im.size[0] * 3 + 3) & -4,
|
| 57 |
+
-1,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
register_handler(WmfHandler())
|
| 61 |
+
|
| 62 |
+
#
|
| 63 |
+
# --------------------------------------------------------------------
|
| 64 |
+
# Read WMF file
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _accept(prefix):
|
| 68 |
+
return (
|
| 69 |
+
prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or prefix[:4] == b"\x01\x00\x00\x00"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
##
|
| 74 |
+
# Image plugin for Windows metafiles.
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class WmfStubImageFile(ImageFile.StubImageFile):
|
| 78 |
+
|
| 79 |
+
format = "WMF"
|
| 80 |
+
format_description = "Windows Metafile"
|
| 81 |
+
|
| 82 |
+
def _open(self):
|
| 83 |
+
self._inch = None
|
| 84 |
+
|
| 85 |
+
# check placable header
|
| 86 |
+
s = self.fp.read(80)
|
| 87 |
+
|
| 88 |
+
if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00":
|
| 89 |
+
|
| 90 |
+
# placeable windows metafile
|
| 91 |
+
|
| 92 |
+
# get units per inch
|
| 93 |
+
self._inch = word(s, 14)
|
| 94 |
+
|
| 95 |
+
# get bounding box
|
| 96 |
+
x0 = short(s, 6)
|
| 97 |
+
y0 = short(s, 8)
|
| 98 |
+
x1 = short(s, 10)
|
| 99 |
+
y1 = short(s, 12)
|
| 100 |
+
|
| 101 |
+
# normalize size to 72 dots per inch
|
| 102 |
+
self.info["dpi"] = 72
|
| 103 |
+
size = (
|
| 104 |
+
(x1 - x0) * self.info["dpi"] // self._inch,
|
| 105 |
+
(y1 - y0) * self.info["dpi"] // self._inch,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
self.info["wmf_bbox"] = x0, y0, x1, y1
|
| 109 |
+
|
| 110 |
+
# sanity check (standard metafile header)
|
| 111 |
+
if s[22:26] != b"\x01\x00\t\x00":
|
| 112 |
+
raise SyntaxError("Unsupported WMF file format")
|
| 113 |
+
|
| 114 |
+
elif s[:4] == b"\x01\x00\x00\x00" and s[40:44] == b" EMF":
|
| 115 |
+
# enhanced metafile
|
| 116 |
+
|
| 117 |
+
# get bounding box
|
| 118 |
+
x0 = _long(s, 8)
|
| 119 |
+
y0 = _long(s, 12)
|
| 120 |
+
x1 = _long(s, 16)
|
| 121 |
+
y1 = _long(s, 20)
|
| 122 |
+
|
| 123 |
+
# get frame (in 0.01 millimeter units)
|
| 124 |
+
frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)
|
| 125 |
+
|
| 126 |
+
size = x1 - x0, y1 - y0
|
| 127 |
+
|
| 128 |
+
# calculate dots per inch from bbox and frame
|
| 129 |
+
xdpi = 2540.0 * (x1 - y0) / (frame[2] - frame[0])
|
| 130 |
+
ydpi = 2540.0 * (y1 - y0) / (frame[3] - frame[1])
|
| 131 |
+
|
| 132 |
+
self.info["wmf_bbox"] = x0, y0, x1, y1
|
| 133 |
+
|
| 134 |
+
if xdpi == ydpi:
|
| 135 |
+
self.info["dpi"] = xdpi
|
| 136 |
+
else:
|
| 137 |
+
self.info["dpi"] = xdpi, ydpi
|
| 138 |
+
|
| 139 |
+
else:
|
| 140 |
+
raise SyntaxError("Unsupported file format")
|
| 141 |
+
|
| 142 |
+
self.mode = "RGB"
|
| 143 |
+
self._size = size
|
| 144 |
+
|
| 145 |
+
loader = self._load()
|
| 146 |
+
if loader:
|
| 147 |
+
loader.open(self)
|
| 148 |
+
|
| 149 |
+
def _load(self):
|
| 150 |
+
return _handler
|
| 151 |
+
|
| 152 |
+
def load(self, dpi=None):
|
| 153 |
+
if dpi is not None and self._inch is not None:
|
| 154 |
+
self.info["dpi"] = dpi
|
| 155 |
+
x0, y0, x1, y1 = self.info["wmf_bbox"]
|
| 156 |
+
self._size = (
|
| 157 |
+
(x1 - x0) * self.info["dpi"] // self._inch,
|
| 158 |
+
(y1 - y0) * self.info["dpi"] // self._inch,
|
| 159 |
+
)
|
| 160 |
+
return super().load()
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def _save(im, fp, filename):
|
| 164 |
+
if _handler is None or not hasattr(_handler, "save"):
|
| 165 |
+
raise OSError("WMF save handler not installed")
|
| 166 |
+
_handler.save(im, fp, filename)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
#
|
| 170 |
+
# --------------------------------------------------------------------
|
| 171 |
+
# Registry stuff
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
|
| 175 |
+
Image.register_save(WmfStubImageFile.format, _save)
|
| 176 |
+
|
| 177 |
+
Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"])
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/__init__.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pillow (Fork of the Python Imaging Library)
|
| 2 |
+
|
| 3 |
+
Pillow is the friendly PIL fork by Alex Clark and Contributors.
|
| 4 |
+
https://github.com/python-pillow/Pillow/
|
| 5 |
+
|
| 6 |
+
Pillow is forked from PIL 1.1.7.
|
| 7 |
+
|
| 8 |
+
PIL is the Python Imaging Library by Fredrik Lundh and Contributors.
|
| 9 |
+
Copyright (c) 1999 by Secret Labs AB.
|
| 10 |
+
|
| 11 |
+
Use PIL.__version__ for this Pillow version.
|
| 12 |
+
|
| 13 |
+
;-)
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from . import _version
|
| 17 |
+
|
| 18 |
+
# VERSION was removed in Pillow 6.0.0.
|
| 19 |
+
# PILLOW_VERSION was removed in Pillow 9.0.0.
|
| 20 |
+
# Use __version__ instead.
|
| 21 |
+
__version__ = _version.__version__
|
| 22 |
+
del _version
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
_plugins = [
|
| 26 |
+
"BlpImagePlugin",
|
| 27 |
+
"BmpImagePlugin",
|
| 28 |
+
"BufrStubImagePlugin",
|
| 29 |
+
"CurImagePlugin",
|
| 30 |
+
"DcxImagePlugin",
|
| 31 |
+
"DdsImagePlugin",
|
| 32 |
+
"EpsImagePlugin",
|
| 33 |
+
"FitsImagePlugin",
|
| 34 |
+
"FitsStubImagePlugin",
|
| 35 |
+
"FliImagePlugin",
|
| 36 |
+
"FpxImagePlugin",
|
| 37 |
+
"FtexImagePlugin",
|
| 38 |
+
"GbrImagePlugin",
|
| 39 |
+
"GifImagePlugin",
|
| 40 |
+
"GribStubImagePlugin",
|
| 41 |
+
"Hdf5StubImagePlugin",
|
| 42 |
+
"IcnsImagePlugin",
|
| 43 |
+
"IcoImagePlugin",
|
| 44 |
+
"ImImagePlugin",
|
| 45 |
+
"ImtImagePlugin",
|
| 46 |
+
"IptcImagePlugin",
|
| 47 |
+
"JpegImagePlugin",
|
| 48 |
+
"Jpeg2KImagePlugin",
|
| 49 |
+
"McIdasImagePlugin",
|
| 50 |
+
"MicImagePlugin",
|
| 51 |
+
"MpegImagePlugin",
|
| 52 |
+
"MpoImagePlugin",
|
| 53 |
+
"MspImagePlugin",
|
| 54 |
+
"PalmImagePlugin",
|
| 55 |
+
"PcdImagePlugin",
|
| 56 |
+
"PcxImagePlugin",
|
| 57 |
+
"PdfImagePlugin",
|
| 58 |
+
"PixarImagePlugin",
|
| 59 |
+
"PngImagePlugin",
|
| 60 |
+
"PpmImagePlugin",
|
| 61 |
+
"PsdImagePlugin",
|
| 62 |
+
"SgiImagePlugin",
|
| 63 |
+
"SpiderImagePlugin",
|
| 64 |
+
"SunImagePlugin",
|
| 65 |
+
"TgaImagePlugin",
|
| 66 |
+
"TiffImagePlugin",
|
| 67 |
+
"WebPImagePlugin",
|
| 68 |
+
"WmfImagePlugin",
|
| 69 |
+
"XbmImagePlugin",
|
| 70 |
+
"XpmImagePlugin",
|
| 71 |
+
"XVThumbImagePlugin",
|
| 72 |
+
]
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class UnidentifiedImageError(OSError):
|
| 76 |
+
"""
|
| 77 |
+
Raised in :py:meth:`PIL.Image.open` if an image cannot be opened and identified.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
pass
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/_imagingft.cpython-38-x86_64-linux-gnu.so
ADDED
|
Binary file (68.7 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/PIL/_imagingmorph.cpython-38-x86_64-linux-gnu.so
ADDED
|
Binary file (14.7 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_http_writer.cpython-38-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29833177b75fee1a4825d7a843e70c63752c877889ce015d73c03cdaa8ef4b63
|
| 3 |
+
size 396664
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (1.71 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/api.cpython-38.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/cd.cpython-38.pyc
ADDED
|
Binary file (8.87 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/constant.cpython-38.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/legacy.cpython-38.pyc
ADDED
|
Binary file (3 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/md.cpython-38.pyc
ADDED
|
Binary file (14.7 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/models.cpython-38.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__pycache__/version.cpython-38.pyc
ADDED
|
Binary file (255 Bytes). View file
|
|
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/cli/__init__.py
ADDED
|
File without changes
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/cli/normalizer.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import sys
|
| 3 |
+
from json import dumps
|
| 4 |
+
from os.path import abspath
|
| 5 |
+
from platform import python_version
|
| 6 |
+
from typing import List
|
| 7 |
+
|
| 8 |
+
from charset_normalizer import from_fp
|
| 9 |
+
from charset_normalizer.models import CliDetectionResult
|
| 10 |
+
from charset_normalizer.version import __version__
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def query_yes_no(question: str, default: str = "yes") -> bool:
|
| 14 |
+
"""Ask a yes/no question via input() and return their answer.
|
| 15 |
+
|
| 16 |
+
"question" is a string that is presented to the user.
|
| 17 |
+
"default" is the presumed answer if the user just hits <Enter>.
|
| 18 |
+
It must be "yes" (the default), "no" or None (meaning
|
| 19 |
+
an answer is required of the user).
|
| 20 |
+
|
| 21 |
+
The "answer" return value is True for "yes" or False for "no".
|
| 22 |
+
|
| 23 |
+
Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
|
| 24 |
+
"""
|
| 25 |
+
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
|
| 26 |
+
if default is None:
|
| 27 |
+
prompt = " [y/n] "
|
| 28 |
+
elif default == "yes":
|
| 29 |
+
prompt = " [Y/n] "
|
| 30 |
+
elif default == "no":
|
| 31 |
+
prompt = " [y/N] "
|
| 32 |
+
else:
|
| 33 |
+
raise ValueError("invalid default answer: '%s'" % default)
|
| 34 |
+
|
| 35 |
+
while True:
|
| 36 |
+
sys.stdout.write(question + prompt)
|
| 37 |
+
choice = input().lower()
|
| 38 |
+
if default is not None and choice == "":
|
| 39 |
+
return valid[default]
|
| 40 |
+
elif choice in valid:
|
| 41 |
+
return valid[choice]
|
| 42 |
+
else:
|
| 43 |
+
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def cli_detect(argv: List[str] = None) -> int:
|
| 47 |
+
"""
|
| 48 |
+
CLI assistant using ARGV and ArgumentParser
|
| 49 |
+
:param argv:
|
| 50 |
+
:return: 0 if everything is fine, anything else equal trouble
|
| 51 |
+
"""
|
| 52 |
+
parser = argparse.ArgumentParser(
|
| 53 |
+
description="The Real First Universal Charset Detector. "
|
| 54 |
+
"Discover originating encoding used on text file. "
|
| 55 |
+
"Normalize text to unicode."
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
parser.add_argument(
|
| 59 |
+
"files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed"
|
| 60 |
+
)
|
| 61 |
+
parser.add_argument(
|
| 62 |
+
"-v",
|
| 63 |
+
"--verbose",
|
| 64 |
+
action="store_true",
|
| 65 |
+
default=False,
|
| 66 |
+
dest="verbose",
|
| 67 |
+
help="Display complementary information about file if any. "
|
| 68 |
+
"Stdout will contain logs about the detection process.",
|
| 69 |
+
)
|
| 70 |
+
parser.add_argument(
|
| 71 |
+
"-a",
|
| 72 |
+
"--with-alternative",
|
| 73 |
+
action="store_true",
|
| 74 |
+
default=False,
|
| 75 |
+
dest="alternatives",
|
| 76 |
+
help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
|
| 77 |
+
)
|
| 78 |
+
parser.add_argument(
|
| 79 |
+
"-n",
|
| 80 |
+
"--normalize",
|
| 81 |
+
action="store_true",
|
| 82 |
+
default=False,
|
| 83 |
+
dest="normalize",
|
| 84 |
+
help="Permit to normalize input file. If not set, program does not write anything.",
|
| 85 |
+
)
|
| 86 |
+
parser.add_argument(
|
| 87 |
+
"-m",
|
| 88 |
+
"--minimal",
|
| 89 |
+
action="store_true",
|
| 90 |
+
default=False,
|
| 91 |
+
dest="minimal",
|
| 92 |
+
help="Only output the charset detected to STDOUT. Disabling JSON output.",
|
| 93 |
+
)
|
| 94 |
+
parser.add_argument(
|
| 95 |
+
"-r",
|
| 96 |
+
"--replace",
|
| 97 |
+
action="store_true",
|
| 98 |
+
default=False,
|
| 99 |
+
dest="replace",
|
| 100 |
+
help="Replace file when trying to normalize it instead of creating a new one.",
|
| 101 |
+
)
|
| 102 |
+
parser.add_argument(
|
| 103 |
+
"-f",
|
| 104 |
+
"--force",
|
| 105 |
+
action="store_true",
|
| 106 |
+
default=False,
|
| 107 |
+
dest="force",
|
| 108 |
+
help="Replace file without asking if you are sure, use this flag with caution.",
|
| 109 |
+
)
|
| 110 |
+
parser.add_argument(
|
| 111 |
+
"-t",
|
| 112 |
+
"--threshold",
|
| 113 |
+
action="store",
|
| 114 |
+
default=0.1,
|
| 115 |
+
type=float,
|
| 116 |
+
dest="threshold",
|
| 117 |
+
help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.",
|
| 118 |
+
)
|
| 119 |
+
parser.add_argument(
|
| 120 |
+
"--version",
|
| 121 |
+
action="version",
|
| 122 |
+
version="Charset-Normalizer {} - Python {}".format(
|
| 123 |
+
__version__, python_version()
|
| 124 |
+
),
|
| 125 |
+
help="Show version information and exit.",
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
args = parser.parse_args(argv)
|
| 129 |
+
|
| 130 |
+
if args.replace is True and args.normalize is False:
|
| 131 |
+
print("Use --replace in addition of --normalize only.", file=sys.stderr)
|
| 132 |
+
return 1
|
| 133 |
+
|
| 134 |
+
if args.force is True and args.replace is False:
|
| 135 |
+
print("Use --force in addition of --replace only.", file=sys.stderr)
|
| 136 |
+
return 1
|
| 137 |
+
|
| 138 |
+
if args.threshold < 0.0 or args.threshold > 1.0:
|
| 139 |
+
print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
|
| 140 |
+
return 1
|
| 141 |
+
|
| 142 |
+
x_ = []
|
| 143 |
+
|
| 144 |
+
for my_file in args.files:
|
| 145 |
+
|
| 146 |
+
matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose)
|
| 147 |
+
|
| 148 |
+
best_guess = matches.best()
|
| 149 |
+
|
| 150 |
+
if best_guess is None:
|
| 151 |
+
print(
|
| 152 |
+
'Unable to identify originating encoding for "{}". {}'.format(
|
| 153 |
+
my_file.name,
|
| 154 |
+
"Maybe try increasing maximum amount of chaos."
|
| 155 |
+
if args.threshold < 1.0
|
| 156 |
+
else "",
|
| 157 |
+
),
|
| 158 |
+
file=sys.stderr,
|
| 159 |
+
)
|
| 160 |
+
x_.append(
|
| 161 |
+
CliDetectionResult(
|
| 162 |
+
abspath(my_file.name),
|
| 163 |
+
None,
|
| 164 |
+
[],
|
| 165 |
+
[],
|
| 166 |
+
"Unknown",
|
| 167 |
+
[],
|
| 168 |
+
False,
|
| 169 |
+
1.0,
|
| 170 |
+
0.0,
|
| 171 |
+
None,
|
| 172 |
+
True,
|
| 173 |
+
)
|
| 174 |
+
)
|
| 175 |
+
else:
|
| 176 |
+
x_.append(
|
| 177 |
+
CliDetectionResult(
|
| 178 |
+
abspath(my_file.name),
|
| 179 |
+
best_guess.encoding,
|
| 180 |
+
best_guess.encoding_aliases,
|
| 181 |
+
[
|
| 182 |
+
cp
|
| 183 |
+
for cp in best_guess.could_be_from_charset
|
| 184 |
+
if cp != best_guess.encoding
|
| 185 |
+
],
|
| 186 |
+
best_guess.language,
|
| 187 |
+
best_guess.alphabets,
|
| 188 |
+
best_guess.bom,
|
| 189 |
+
best_guess.percent_chaos,
|
| 190 |
+
best_guess.percent_coherence,
|
| 191 |
+
None,
|
| 192 |
+
True,
|
| 193 |
+
)
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
if len(matches) > 1 and args.alternatives:
|
| 197 |
+
for el in matches:
|
| 198 |
+
if el != best_guess:
|
| 199 |
+
x_.append(
|
| 200 |
+
CliDetectionResult(
|
| 201 |
+
abspath(my_file.name),
|
| 202 |
+
el.encoding,
|
| 203 |
+
el.encoding_aliases,
|
| 204 |
+
[
|
| 205 |
+
cp
|
| 206 |
+
for cp in el.could_be_from_charset
|
| 207 |
+
if cp != el.encoding
|
| 208 |
+
],
|
| 209 |
+
el.language,
|
| 210 |
+
el.alphabets,
|
| 211 |
+
el.bom,
|
| 212 |
+
el.percent_chaos,
|
| 213 |
+
el.percent_coherence,
|
| 214 |
+
None,
|
| 215 |
+
False,
|
| 216 |
+
)
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
if args.normalize is True:
|
| 220 |
+
|
| 221 |
+
if best_guess.encoding.startswith("utf") is True:
|
| 222 |
+
print(
|
| 223 |
+
'"{}" file does not need to be normalized, as it already came from unicode.'.format(
|
| 224 |
+
my_file.name
|
| 225 |
+
),
|
| 226 |
+
file=sys.stderr,
|
| 227 |
+
)
|
| 228 |
+
if my_file.closed is False:
|
| 229 |
+
my_file.close()
|
| 230 |
+
continue
|
| 231 |
+
|
| 232 |
+
o_ = my_file.name.split(".") # type: List[str]
|
| 233 |
+
|
| 234 |
+
if args.replace is False:
|
| 235 |
+
o_.insert(-1, best_guess.encoding)
|
| 236 |
+
if my_file.closed is False:
|
| 237 |
+
my_file.close()
|
| 238 |
+
elif (
|
| 239 |
+
args.force is False
|
| 240 |
+
and query_yes_no(
|
| 241 |
+
'Are you sure to normalize "{}" by replacing it ?'.format(
|
| 242 |
+
my_file.name
|
| 243 |
+
),
|
| 244 |
+
"no",
|
| 245 |
+
)
|
| 246 |
+
is False
|
| 247 |
+
):
|
| 248 |
+
if my_file.closed is False:
|
| 249 |
+
my_file.close()
|
| 250 |
+
continue
|
| 251 |
+
|
| 252 |
+
try:
|
| 253 |
+
x_[0].unicode_path = abspath("./{}".format(".".join(o_)))
|
| 254 |
+
|
| 255 |
+
with open(x_[0].unicode_path, "w", encoding="utf-8") as fp:
|
| 256 |
+
fp.write(str(best_guess))
|
| 257 |
+
except IOError as e:
|
| 258 |
+
print(str(e), file=sys.stderr)
|
| 259 |
+
if my_file.closed is False:
|
| 260 |
+
my_file.close()
|
| 261 |
+
return 2
|
| 262 |
+
|
| 263 |
+
if my_file.closed is False:
|
| 264 |
+
my_file.close()
|
| 265 |
+
|
| 266 |
+
if args.minimal is False:
|
| 267 |
+
print(
|
| 268 |
+
dumps(
|
| 269 |
+
[el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
|
| 270 |
+
ensure_ascii=True,
|
| 271 |
+
indent=4,
|
| 272 |
+
)
|
| 273 |
+
)
|
| 274 |
+
else:
|
| 275 |
+
for my_file in args.files:
|
| 276 |
+
print(
|
| 277 |
+
", ".join(
|
| 278 |
+
[
|
| 279 |
+
el.encoding or "undefined"
|
| 280 |
+
for el in x_
|
| 281 |
+
if el.path == abspath(my_file.name)
|
| 282 |
+
]
|
| 283 |
+
)
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
return 0
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
if __name__ == "__main__":
|
| 290 |
+
cli_detect()
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/LICENSE.txt
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (c) 2001, 2002 Enthought, Inc.
|
| 2 |
+
All rights reserved.
|
| 3 |
+
|
| 4 |
+
Copyright (c) 2003-2017 SciPy Developers.
|
| 5 |
+
All rights reserved.
|
| 6 |
+
|
| 7 |
+
Copyright (c) 2018 Gijs van Tulder.
|
| 8 |
+
All rights reserved.
|
| 9 |
+
|
| 10 |
+
Redistribution and use in source and binary forms, with or without
|
| 11 |
+
modification, are permitted provided that the following conditions are met:
|
| 12 |
+
|
| 13 |
+
a. Redistributions of source code must retain the above copyright notice,
|
| 14 |
+
this list of conditions and the following disclaimer.
|
| 15 |
+
b. Redistributions in binary form must reproduce the above copyright
|
| 16 |
+
notice, this list of conditions and the following disclaimer in the
|
| 17 |
+
documentation and/or other materials provided with the distribution.
|
| 18 |
+
c. Neither the name of Enthought nor the names of the SciPy Developers
|
| 19 |
+
may be used to endorse or promote products derived from this software
|
| 20 |
+
without specific prior written permission.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 24 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 25 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 26 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
|
| 27 |
+
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
|
| 28 |
+
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 29 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 30 |
+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 31 |
+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 32 |
+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
| 33 |
+
THE POSSIBILITY OF SUCH DAMAGE.
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: elasticdeform
|
| 3 |
+
Version: 0.5.0
|
| 4 |
+
Summary: Elastic deformations for N-D images.
|
| 5 |
+
Home-page: https://github.com/gvtulder/elasticdeform
|
| 6 |
+
Author: Gijs van Tulder
|
| 7 |
+
Author-email: gvtulder@gmail.com
|
| 8 |
+
License: BSD
|
| 9 |
+
Classifier: Programming Language :: Python :: 2
|
| 10 |
+
Classifier: Programming Language :: Python :: 3
|
| 11 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 12 |
+
Description-Content-Type: text/markdown
|
| 13 |
+
License-File: LICENSE.txt
|
| 14 |
+
Requires-Dist: numpy
|
| 15 |
+
Requires-Dist: scipy
|
| 16 |
+
|
| 17 |
+
Elastic deformations for N-dimensional images (Python, SciPy, NumPy, TensorFlow, PyTorch)
|
| 18 |
+
=========================================================================================
|
| 19 |
+
|
| 20 |
+
[](https://elasticdeform.readthedocs.io/en/latest/?badge=latest)
|
| 21 |
+
[](https://github.com/gvtulder/elasticdeform/actions/workflows/test.yml)
|
| 22 |
+
[](https://github.com/gvtulder/elasticdeform/actions/workflows/wheels.yml)
|
| 23 |
+
[](https://zenodo.org/badge/latestdoi/145003699)
|
| 24 |
+
|
| 25 |
+
This library implements elastic grid-based deformations for N-dimensional images.
|
| 26 |
+
|
| 27 |
+
The elastic deformation approach is described in
|
| 28 |
+
* Ronneberger, Fischer, and Brox, "U-Net: Convolutional Networks for Biomedical
|
| 29 |
+
Image Segmentation" (<https://arxiv.org/abs/1505.04597>)
|
| 30 |
+
* Çiçek et al., "3D U-Net: Learning Dense Volumetric
|
| 31 |
+
Segmentation from Sparse Annotation" (<https://arxiv.org/abs/1606.06650>)
|
| 32 |
+
|
| 33 |
+
The procedure generates a coarse displacement grid with a random displacement
|
| 34 |
+
for each grid point. This grid is then interpolated to compute a displacement for
|
| 35 |
+
each pixel in the input image. The input image is then deformed using the
|
| 36 |
+
displacement vectors and a spline interpolation.
|
| 37 |
+
|
| 38 |
+
In addition to the normal, forward deformation, this package also provides a
|
| 39 |
+
function that can backpropagate the gradient through the deformation. This makes
|
| 40 |
+
it possible to use the deformation as a layer in a convolutional neural network.
|
| 41 |
+
For convenience, TensorFlow and PyTorch wrappers are provided in `elasticdeform.tf`
|
| 42 |
+
and `elasticdeform.torch`.
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
Installation
|
| 46 |
+
------------
|
| 47 |
+
|
| 48 |
+
```
|
| 49 |
+
pip install elasticdeform
|
| 50 |
+
or
|
| 51 |
+
pip install git+https://github.com/gvtulder/elasticdeform
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
This library requires Python 3 and NumPy development headers.
|
| 55 |
+
|
| 56 |
+
On Windows, try to install the precompiled binaries directly using `pip install elasticdeform`.
|
| 57 |
+
If that does not work, [these precompiled packages](https://www.lfd.uci.edu/~gohlke/pythonlibs/#elasticdeform) might be an alternative option.
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
Examples
|
| 61 |
+
--------
|
| 62 |
+
|
| 63 |
+
This basic example deforms an image with a random 3 x 3 deformation grid:
|
| 64 |
+
```python
|
| 65 |
+
import numpy, imageio, elasticdeform
|
| 66 |
+
X = numpy.zeros((200, 300))
|
| 67 |
+
X[::10, ::10] = 1
|
| 68 |
+
|
| 69 |
+
# apply deformation with a random 3 x 3 grid
|
| 70 |
+
X_deformed = elasticdeform.deform_random_grid(X, sigma=25, points=3)
|
| 71 |
+
|
| 72 |
+
imageio.imsave('test_X.png', X)
|
| 73 |
+
imageio.imsave('test_X_deformed.png', X_deformed)
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
### Multiple inputs
|
| 77 |
+
|
| 78 |
+
If you have multiple images, e.g., an image and a segmentation image, you can
|
| 79 |
+
deform both simultaneously by providing a list of inputs. You can specify
|
| 80 |
+
a different spline order for each input.
|
| 81 |
+
```python
|
| 82 |
+
# apply deformation to inputs X and Y
|
| 83 |
+
[X_deformed, Y_deformed] = elasticdeform.deform_random_grid([X, Y])
|
| 84 |
+
|
| 85 |
+
# apply deformation to inputs X and Y,
|
| 86 |
+
# with a different interpolation for each input
|
| 87 |
+
[X_deformed, Y_deformed] = elasticdeform.deform_random_grid([X, Y], order=[3, 0])
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
### Multi-channel images
|
| 91 |
+
|
| 92 |
+
By default, a deformation will be applied to every dimension of the input. If you
|
| 93 |
+
have multi-channel images, you can use the `axis` parameter to specify which axes
|
| 94 |
+
should be deformed. The same deformation will be applied for each channel.
|
| 95 |
+
|
| 96 |
+
For example, to deform an RGB image across the first two dimensions, run:
|
| 97 |
+
```python
|
| 98 |
+
X_deformed = elasticdeform.deform_random_grid(X, axis=(0, 1))
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
When deforming multiple inputs, you can provide a tuple of axes for each input:
|
| 102 |
+
```python
|
| 103 |
+
X = numpy.random.rand(3, 200, 300)
|
| 104 |
+
Y = numpy.random.rand(200, 300)
|
| 105 |
+
[X_deformed, Y_deformed] = elasticdeform.deform_random_grid([X, Y], axis=[(1, 2), (0, 1)])
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
### Cropping
|
| 109 |
+
|
| 110 |
+
If you intend to crop a small subpatch from the deformed image, you can provide
|
| 111 |
+
the crop dimensions to the deform function. It will then compute only the cropped
|
| 112 |
+
output pixels, while still computing the deformation grid based on the full image
|
| 113 |
+
dimensions. This saves computation time.
|
| 114 |
+
```python
|
| 115 |
+
X = numpy.random.rand(200, 300)
|
| 116 |
+
|
| 117 |
+
# define a crop region
|
| 118 |
+
crop = (slice(50, 150), slice(0, 100))
|
| 119 |
+
|
| 120 |
+
# generate a deformation grid
|
| 121 |
+
displacement = numpy.random.randn(2, 3, 3) * 25
|
| 122 |
+
|
| 123 |
+
# deform full image
|
| 124 |
+
X_deformed = elasticdeform.deform_grid(X, displacement)
|
| 125 |
+
# compute only the cropped region
|
| 126 |
+
X_deformed_crop = elasticdeform.deform_grid(X, displacement, crop=crop)
|
| 127 |
+
|
| 128 |
+
# the deformation is the same
|
| 129 |
+
numpy.testing.assert_equal(X_deformed[crop], X_deformed_crop)
|
| 130 |
+
```
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
### Rotate and zoom
|
| 134 |
+
|
| 135 |
+
The deformation functions accept `rotate` and `zoom` parameters, which allows you
|
| 136 |
+
to combine the elastic deformation with rotation and scaling. This can be useful
|
| 137 |
+
as data augmentation step. The rotation and zoom are applied to the output
|
| 138 |
+
coordinates, using the center pixel of the output patch as the origin.
|
| 139 |
+
```python
|
| 140 |
+
# apply deformation with a random 3 x 3 grid,
|
| 141 |
+
# rotate by 30 degrees and rescale with a factor 1.5
|
| 142 |
+
X_deformed = elasticdeform.deform_random_grid(X, sigma=25, points=3,
|
| 143 |
+
rotate=30, zoom=1.5)
|
| 144 |
+
```
|
| 145 |
+
Note that the output shape remains the same. The mapping of the input to the
|
| 146 |
+
output is rotated within the given output frame.
|
| 147 |
+
|
| 148 |
+
Rotate and zoom can be combined with the `crop` argument. In that case, the
|
| 149 |
+
scaling and rotation is performed relative to the center of the cropped output.
|
| 150 |
+
|
| 151 |
+
For more advanced transformations, it is also possible to provide an affine
|
| 152 |
+
transformation matrix directly.
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
### Gradient
|
| 156 |
+
|
| 157 |
+
The `deform_grid_gradient` function can be used to backpropagate the gradient of
|
| 158 |
+
the output with respect to the input. Call `deform_grid_gradient` with the
|
| 159 |
+
parameters that were used for the forward step.
|
| 160 |
+
```python
|
| 161 |
+
X = numpy.random.rand(200, 300)
|
| 162 |
+
|
| 163 |
+
# generate a deformation grid
|
| 164 |
+
displacement = numpy.random.randn(2, 3, 3) * 25
|
| 165 |
+
|
| 166 |
+
# perform forward deformation
|
| 167 |
+
X_deformed = elasticdeform.deform_grid(X, displacement)
|
| 168 |
+
|
| 169 |
+
# obtain the gradient w.r.t. X_deformed (e.g., with backpropagation)
|
| 170 |
+
dX_deformed = numpy.random.randn(*X_deformed.shape)
|
| 171 |
+
|
| 172 |
+
# compute the gradient w.r.t. X
|
| 173 |
+
dX = elasticdeform.deform_grid_gradient(dX_deformed, displacement)
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
Note: The gradient function will assume that the input has the same size as the
|
| 177 |
+
output. If you used the `crop` parameter in the forward phase, it is necessary to
|
| 178 |
+
provide the gradient function with the original, uncropped input shape in the
|
| 179 |
+
`X_shape` parameter.
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
### TensorFlow wrapper
|
| 183 |
+
|
| 184 |
+
The `elasticdeform.tf` module provides a wrapper for `deform_grid` in TensorFlow.
|
| 185 |
+
The function uses TensorFlow Tensors as input and output, but otherwise uses
|
| 186 |
+
the same parameters.
|
| 187 |
+
```python
|
| 188 |
+
import numpy
|
| 189 |
+
import elasticdeform.tf as etf
|
| 190 |
+
|
| 191 |
+
displacement_val = numpy.random.randn(2, 3, 3) * 5
|
| 192 |
+
X_val = numpy.random.rand(200, 300)
|
| 193 |
+
dY_val = numpy.random.rand(200, 300)
|
| 194 |
+
|
| 195 |
+
# construct TensorFlow input and top gradient
|
| 196 |
+
displacement = tf.Variable(displacement_val)
|
| 197 |
+
X = tf.Variable(X_val)
|
| 198 |
+
dY = tf.Variable(dY_val)
|
| 199 |
+
|
| 200 |
+
# the deform_grid function is similar to the plain Python equivalent,
|
| 201 |
+
# but it accepts and returns TensorFlow Tensors
|
| 202 |
+
X_deformed = etf.deform_grid(X, displacement, order=3)
|
| 203 |
+
|
| 204 |
+
# the gradient w.r.t. X can be computed in the normal TensorFlow manner
|
| 205 |
+
[dX] = tf.gradients(X_deformed, X, dY)
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
### PyTorch wrapper
|
| 210 |
+
|
| 211 |
+
The `elasticdeform.torch` module provides a wrapper for `deform_grid` in PyTorch.
|
| 212 |
+
The function uses PyTorch Tensors as input and output, but otherwise uses
|
| 213 |
+
the same parameters.
|
| 214 |
+
```python
|
| 215 |
+
import numpy
|
| 216 |
+
import elasticdeform.torch as etorch
|
| 217 |
+
|
| 218 |
+
displacement_val = numpy.random.randn(2, 3, 3) * 5
|
| 219 |
+
X_val = numpy.random.rand(200, 300)
|
| 220 |
+
dY_val = numpy.random.rand(200, 300)
|
| 221 |
+
|
| 222 |
+
# construct PyTorch input and top gradient
|
| 223 |
+
displacement = torch.tensor(displacement_val)
|
| 224 |
+
X = torch.tensor(X_val, requires_grad=True)
|
| 225 |
+
dY = torch.tensor(dY_val)
|
| 226 |
+
|
| 227 |
+
# the deform_grid function is similar to the plain Python equivalent,
|
| 228 |
+
# but it accepts and returns PyTorch Tensors
|
| 229 |
+
X_deformed = etorch.deform_grid(X, displacement, order=3)
|
| 230 |
+
|
| 231 |
+
# the gradient w.r.t. X can be computed in the normal PyTorch manner
|
| 232 |
+
X_deformed.backward(dY)
|
| 233 |
+
print(X.grad)
|
| 234 |
+
```
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
License information
|
| 238 |
+
-------------------
|
| 239 |
+
|
| 240 |
+
This library was written by [Gijs van Tulder](https://vantulder.net/) at the
|
| 241 |
+
[Biomedical Imaging Group Rotterdam](https://www.bigr.nl/),
|
| 242 |
+
Erasmus MC, Rotterdam, the Netherlands
|
| 243 |
+
|
| 244 |
+
It is inspired by a similar, Python-based implementation by
|
| 245 |
+
[Florian Calvet](https://github.com/fcalvet/image_tools).
|
| 246 |
+
This C-based implementation gives the same results, but is faster and has
|
| 247 |
+
a gradient implementation.
|
| 248 |
+
|
| 249 |
+
This C implementation includes a modified version of the `NI_GeometricTransform`
|
| 250 |
+
from [SciPy's ndimage library](https://github.com/scipy/scipy/blob/28636fbc3f16d562eab7b823546276111f6da98a/scipy/ndimage/src/ni_interpolation.c#L242).
|
| 251 |
+
|
| 252 |
+
This code is made available under the BSD license. See ``LICENSE.txt`` for details.
|
| 253 |
+
|
| 254 |
+
If you want to cite this library, please see [](https://zenodo.org/badge/latestdoi/145003699).
|
| 255 |
+
|
| 256 |
+
* <https://github.com/gvtulder/elasticdeform>
|
| 257 |
+
* <https://elasticdeform.readthedocs.io/>
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
elasticdeform-0.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
elasticdeform-0.5.0.dist-info/LICENSE.txt,sha256=7H_YOPM9NItKUzAOjYBnzK5RAotUfqQ_ThdrXbiJH3M,1637
|
| 3 |
+
elasticdeform-0.5.0.dist-info/METADATA,sha256=eZy982kQWVqy6q-huxzyU9wxqLMALxtRkscAKHTkyr0,9567
|
| 4 |
+
elasticdeform-0.5.0.dist-info/RECORD,,
|
| 5 |
+
elasticdeform-0.5.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
elasticdeform-0.5.0.dist-info/WHEEL,sha256=-ijGDuALlPxm3HbhKntps0QzHsi-DPlXqgerYTTJkFE,148
|
| 7 |
+
elasticdeform-0.5.0.dist-info/top_level.txt,sha256=ZM7bPik9OVDSTGfhzPEg-PncgbSY4xea5S79ZyexpgM,14
|
| 8 |
+
elasticdeform/__init__.py,sha256=zgz0CT5K208v_FoSuMirfLSMUFG4akp4pfWlsTkhiRo,79
|
| 9 |
+
elasticdeform/__pycache__/__init__.cpython-38.pyc,,
|
| 10 |
+
elasticdeform/__pycache__/deform_grid.cpython-38.pyc,,
|
| 11 |
+
elasticdeform/__pycache__/tf.cpython-38.pyc,,
|
| 12 |
+
elasticdeform/__pycache__/torch.cpython-38.pyc,,
|
| 13 |
+
elasticdeform/_deform_grid.cpython-38-x86_64-linux-gnu.so,sha256=zmaCHsClHG-T0bbvonOKXRnig2qtIqBMnsKj5iyYiOY,219000
|
| 14 |
+
elasticdeform/deform_grid.py,sha256=LXKntsIcAtWCCOTFcTzZH0h2r5tdiUxVvbx44iZE5Es,17986
|
| 15 |
+
elasticdeform/tf.py,sha256=oqAL248OQPbJ7OveTzj8s7XYLlNbY1F7VtAF2pPhZj4,2707
|
| 16 |
+
elasticdeform/torch.py,sha256=dyJJ5hQY71WLGjsNz8IeOJ5JuZyMzz6Q9_t9e9_pWGc,2178
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/REQUESTED
ADDED
|
File without changes
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.37.1)
|
| 3 |
+
Root-Is-Purelib: false
|
| 4 |
+
Tag: cp38-cp38-manylinux_2_17_x86_64
|
| 5 |
+
Tag: cp38-cp38-manylinux2014_x86_64
|
| 6 |
+
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/elasticdeform-0.5.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
elasticdeform
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/__init__.py
ADDED
|
@@ -0,0 +1,737 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
ftfy: fixes text for you
|
| 3 |
+
|
| 4 |
+
This is a module for making text less broken. See the `fix_text` function
|
| 5 |
+
for more information.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import unicodedata
|
| 9 |
+
import warnings
|
| 10 |
+
from typing import List, NamedTuple, Optional, Tuple, Union, no_type_check
|
| 11 |
+
|
| 12 |
+
from ftfy import bad_codecs
|
| 13 |
+
from ftfy import chardata, fixes
|
| 14 |
+
from ftfy.badness import is_bad
|
| 15 |
+
from ftfy.formatting import display_ljust
|
| 16 |
+
|
| 17 |
+
__version__ = "6.1.1"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Though this function does nothing, it lets linters know that we're using
|
| 21 |
+
# ftfy.bad_codecs. See the docstring in `bad_codecs/__init__.py` for more.
|
| 22 |
+
bad_codecs.ok()
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ExplainedText(NamedTuple):
|
| 26 |
+
"""
|
| 27 |
+
The return type from ftfy's functions that provide an "explanation" of which
|
| 28 |
+
steps it applied to fix the text, such as :func:`fix_and_explain()`.
|
| 29 |
+
|
| 30 |
+
When the 'explain' option is disabled, these functions return the same
|
| 31 |
+
type, but the `explanation` will be None.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
text: str
|
| 35 |
+
explanation: Optional[List[Tuple[str, str]]]
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class TextFixerConfig(NamedTuple):
|
| 39 |
+
r"""
|
| 40 |
+
A TextFixerConfig object stores configuration options for ftfy.
|
| 41 |
+
|
| 42 |
+
It's implemented as a namedtuple with defaults, so you can instantiate
|
| 43 |
+
it by providing the values to change from their defaults as keyword arguments.
|
| 44 |
+
For example, to disable 'unescape_html' and keep the rest of the defaults::
|
| 45 |
+
|
| 46 |
+
TextFixerConfig(unescape_html=False)
|
| 47 |
+
|
| 48 |
+
Here are the options and their default values:
|
| 49 |
+
|
| 50 |
+
- `unescape_html`: "auto"
|
| 51 |
+
|
| 52 |
+
Configures whether to replace HTML entities such as & with the character
|
| 53 |
+
they represent. "auto" says to do this by default, but disable it when a
|
| 54 |
+
literal < character appears, indicating that the input is actual HTML and
|
| 55 |
+
entities should be preserved. The value can be True, to always enable this
|
| 56 |
+
fixer, or False, to always disable it.
|
| 57 |
+
|
| 58 |
+
- `remove_terminal_escapes`: True
|
| 59 |
+
|
| 60 |
+
Removes "ANSI" terminal escapes, such as for changing the color of text in a
|
| 61 |
+
terminal window.
|
| 62 |
+
|
| 63 |
+
- `fix_encoding`: True
|
| 64 |
+
|
| 65 |
+
Detect mojibake and attempt to fix it by decoding the text in a different
|
| 66 |
+
encoding standard.
|
| 67 |
+
|
| 68 |
+
The following four options affect `fix_encoding` works, and do nothing if
|
| 69 |
+
`fix_encoding` is False:
|
| 70 |
+
|
| 71 |
+
- `restore_byte_a0`: True
|
| 72 |
+
|
| 73 |
+
Allow a literal space (U+20) to be interpreted as a non-breaking space
|
| 74 |
+
(U+A0) when that would make it part of a fixable mojibake string.
|
| 75 |
+
|
| 76 |
+
Because spaces are very common characters, this could lead to false
|
| 77 |
+
positives, but we try to apply it only when there's strong evidence for
|
| 78 |
+
mojibake. Disabling `restore_byte_a0` is safer from false positives,
|
| 79 |
+
but creates false negatives.
|
| 80 |
+
|
| 81 |
+
- `replace_lossy_sequences`: True
|
| 82 |
+
|
| 83 |
+
Detect mojibake that has been partially replaced by the characters
|
| 84 |
+
'�' or '?'. If the mojibake could be decoded otherwise, replace the
|
| 85 |
+
detected sequence with '�'.
|
| 86 |
+
|
| 87 |
+
- `decode_inconsistent_utf8`: True
|
| 88 |
+
|
| 89 |
+
When we see sequences that distinctly look like UTF-8 mojibake, but
|
| 90 |
+
there's no consistent way to reinterpret the string in a new encoding,
|
| 91 |
+
replace the mojibake with the appropriate UTF-8 characters anyway.
|
| 92 |
+
|
| 93 |
+
This helps to decode strings that are concatenated from different
|
| 94 |
+
encodings.
|
| 95 |
+
|
| 96 |
+
- `fix_c1_controls`: True
|
| 97 |
+
|
| 98 |
+
Replace C1 control characters (the useless characters U+80 - U+9B that
|
| 99 |
+
come from Latin-1) with their Windows-1252 equivalents, like HTML5 does,
|
| 100 |
+
even if the whole string doesn't decode as Latin-1.
|
| 101 |
+
|
| 102 |
+
- `fix_latin_ligatures`: True
|
| 103 |
+
|
| 104 |
+
Replace common Latin-alphabet ligatures, such as ``fi``, with the
|
| 105 |
+
letters they're made of.
|
| 106 |
+
|
| 107 |
+
- `fix_character_width`: True
|
| 108 |
+
|
| 109 |
+
Replace fullwidth Latin characters and halfwidth Katakana with
|
| 110 |
+
their more standard widths.
|
| 111 |
+
|
| 112 |
+
- `uncurl_quotes`: True
|
| 113 |
+
|
| 114 |
+
Replace curly quotes with straight quotes.
|
| 115 |
+
|
| 116 |
+
- `fix_line_breaks`: True
|
| 117 |
+
|
| 118 |
+
Replace various forms of line breaks with the standard Unix line
|
| 119 |
+
break, ``\n``.
|
| 120 |
+
|
| 121 |
+
- `fix_surrogates`: True
|
| 122 |
+
|
| 123 |
+
Replace sequences of UTF-16 surrogate codepoints with the character
|
| 124 |
+
they were meant to encode. This fixes text that was decoded with the
|
| 125 |
+
obsolete UCS-2 standard, and allows it to support high-numbered
|
| 126 |
+
codepoints such as emoji.
|
| 127 |
+
|
| 128 |
+
- `remove_control_chars`: True
|
| 129 |
+
|
| 130 |
+
Remove certain control characters that have no displayed effect on text.
|
| 131 |
+
|
| 132 |
+
- `normalization`: "NFC"
|
| 133 |
+
|
| 134 |
+
Choose what kind of Unicode normalization is applied. Usually, we apply
|
| 135 |
+
NFC normalization, so that letters followed by combining characters become
|
| 136 |
+
single combined characters.
|
| 137 |
+
|
| 138 |
+
Changing this to "NFKC" applies more compatibility conversions, such as
|
| 139 |
+
replacing the 'micro sign' with a standard Greek lowercase mu, which looks
|
| 140 |
+
identical. However, some NFKC normalizations change the meaning of text,
|
| 141 |
+
such as converting "10³" to "103".
|
| 142 |
+
|
| 143 |
+
`normalization` can be None, to apply no normalization.
|
| 144 |
+
|
| 145 |
+
- `max_decode_length`: 1_000_000
|
| 146 |
+
|
| 147 |
+
The maximum size of "segment" that ftfy will try to fix all at once.
|
| 148 |
+
|
| 149 |
+
- `explain`: True
|
| 150 |
+
|
| 151 |
+
Whether to compute 'explanations', lists describing what ftfy changed.
|
| 152 |
+
When this is False, the explanation will be None, and the code that
|
| 153 |
+
builds the explanation will be skipped, possibly saving time.
|
| 154 |
+
|
| 155 |
+
Functions that accept TextFixerConfig and don't return an explanation
|
| 156 |
+
will automatically set `explain` to False.
|
| 157 |
+
"""
|
| 158 |
+
unescape_html: Union[str, bool] = "auto"
|
| 159 |
+
remove_terminal_escapes: bool = True
|
| 160 |
+
fix_encoding: bool = True
|
| 161 |
+
restore_byte_a0: bool = True
|
| 162 |
+
replace_lossy_sequences: bool = True
|
| 163 |
+
decode_inconsistent_utf8: bool = True
|
| 164 |
+
fix_c1_controls: bool = True
|
| 165 |
+
fix_latin_ligatures: bool = True
|
| 166 |
+
fix_character_width: bool = True
|
| 167 |
+
uncurl_quotes: bool = True
|
| 168 |
+
fix_line_breaks: bool = True
|
| 169 |
+
fix_surrogates: bool = True
|
| 170 |
+
remove_control_chars: bool = True
|
| 171 |
+
normalization: Optional[str] = "NFC"
|
| 172 |
+
max_decode_length: int = 1000000
|
| 173 |
+
explain: bool = True
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _config_from_kwargs(config: TextFixerConfig, kwargs: dict) -> TextFixerConfig:
|
| 177 |
+
"""
|
| 178 |
+
Handle parameters provided as keyword arguments to ftfy's top-level
|
| 179 |
+
functions, converting them into a TextFixerConfig.
|
| 180 |
+
"""
|
| 181 |
+
if "fix_entities" in kwargs:
|
| 182 |
+
warnings.warn(
|
| 183 |
+
"`fix_entities` has been renamed to `unescape_html`", DeprecationWarning
|
| 184 |
+
)
|
| 185 |
+
kwargs = kwargs.copy()
|
| 186 |
+
kwargs["unescape_html"] = kwargs["fix_entities"]
|
| 187 |
+
del kwargs["fix_entities"]
|
| 188 |
+
config = config._replace(**kwargs)
|
| 189 |
+
return config
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
FIXERS = {
|
| 193 |
+
"unescape_html": fixes.unescape_html,
|
| 194 |
+
"remove_terminal_escapes": fixes.remove_terminal_escapes,
|
| 195 |
+
"restore_byte_a0": fixes.restore_byte_a0,
|
| 196 |
+
"replace_lossy_sequences": fixes.replace_lossy_sequences,
|
| 197 |
+
"decode_inconsistent_utf8": fixes.decode_inconsistent_utf8,
|
| 198 |
+
"fix_c1_controls": fixes.fix_c1_controls,
|
| 199 |
+
"fix_latin_ligatures": fixes.fix_latin_ligatures,
|
| 200 |
+
"fix_character_width": fixes.fix_character_width,
|
| 201 |
+
"uncurl_quotes": fixes.uncurl_quotes,
|
| 202 |
+
"fix_line_breaks": fixes.fix_line_breaks,
|
| 203 |
+
"fix_surrogates": fixes.fix_surrogates,
|
| 204 |
+
"remove_control_chars": fixes.remove_control_chars,
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
BYTES_ERROR_TEXT = """Hey wait, this isn't Unicode.
|
| 209 |
+
|
| 210 |
+
ftfy is designed to fix problems with text. Treating bytes like they're
|
| 211 |
+
interchangeable with Unicode text is usually something that introduces
|
| 212 |
+
problems with text.
|
| 213 |
+
|
| 214 |
+
You should first decode these bytes from the encoding you think they're in.
|
| 215 |
+
If you're not sure what encoding they're in:
|
| 216 |
+
|
| 217 |
+
- First, try to find out. 'utf-8' is a good assumption.
|
| 218 |
+
- If the encoding is simply unknowable, try running your bytes through
|
| 219 |
+
ftfy.guess_bytes. As the name implies, this may not always be accurate.
|
| 220 |
+
|
| 221 |
+
For more information on the distinction between bytes and text, read the
|
| 222 |
+
Python Unicode HOWTO:
|
| 223 |
+
|
| 224 |
+
http://docs.python.org/3/howto/unicode.html
|
| 225 |
+
"""
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def _try_fix(
|
| 229 |
+
fixer_name: str, text: str, config: TextFixerConfig, steps: Optional[list]
|
| 230 |
+
) -> str:
|
| 231 |
+
"""
|
| 232 |
+
A helper function used across several 'fixer' steps, deciding whether to
|
| 233 |
+
apply the fix and whether to record the fix in `steps`.
|
| 234 |
+
"""
|
| 235 |
+
if getattr(config, fixer_name):
|
| 236 |
+
fixer = FIXERS[fixer_name]
|
| 237 |
+
fixed = fixer(text)
|
| 238 |
+
if steps is not None and fixed != text:
|
| 239 |
+
steps.append(("apply", fixer_name))
|
| 240 |
+
return fixed
|
| 241 |
+
|
| 242 |
+
return text
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def fix_text(text: str, config: Optional[TextFixerConfig] = None, **kwargs) -> str:
|
| 246 |
+
r"""
|
| 247 |
+
Given Unicode text as input, fix inconsistencies and glitches in it,
|
| 248 |
+
such as mojibake (text that was decoded in the wrong encoding).
|
| 249 |
+
|
| 250 |
+
Let's start with some examples:
|
| 251 |
+
|
| 252 |
+
>>> fix_text('✔ No problems')
|
| 253 |
+
'✔ No problems'
|
| 254 |
+
|
| 255 |
+
>>> print(fix_text("¯\\_(ã\x83\x84)_/¯"))
|
| 256 |
+
¯\_(ツ)_/¯
|
| 257 |
+
|
| 258 |
+
>>> fix_text('Broken text… it’s flubberific!')
|
| 259 |
+
"Broken text... it's flubberific!"
|
| 260 |
+
|
| 261 |
+
>>> fix_text('LOUD NOISES')
|
| 262 |
+
'LOUD NOISES'
|
| 263 |
+
|
| 264 |
+
ftfy applies a number of different fixes to the text, and can accept
|
| 265 |
+
configuration to select which fixes to apply.
|
| 266 |
+
|
| 267 |
+
The configuration takes the form of a :class:`TextFixerConfig` object,
|
| 268 |
+
and you can see a description of the options in that class's docstring
|
| 269 |
+
or in the full documentation at ftfy.readthedocs.org.
|
| 270 |
+
|
| 271 |
+
For convenience and backward compatibility, the configuration can also
|
| 272 |
+
take the form of keyword arguments, which will set the equivalently-named
|
| 273 |
+
fields of the TextFixerConfig object.
|
| 274 |
+
|
| 275 |
+
For example, here are two ways to fix text but skip the "uncurl_quotes"
|
| 276 |
+
step::
|
| 277 |
+
|
| 278 |
+
fix_text(text, TextFixerConfig(uncurl_quotes=False))
|
| 279 |
+
fix_text(text, uncurl_quotes=False)
|
| 280 |
+
|
| 281 |
+
This function fixes text in independent segments, which are usually lines
|
| 282 |
+
of text, or arbitrarily broken up every 1 million codepoints (configurable
|
| 283 |
+
with `config.max_decode_length`) if there aren't enough line breaks. The
|
| 284 |
+
bound on segment lengths helps to avoid unbounded slowdowns.
|
| 285 |
+
|
| 286 |
+
ftfy can also provide an 'explanation', a list of transformations it applied
|
| 287 |
+
to the text that would fix more text like it. This function doesn't provide
|
| 288 |
+
explanations (because there may be different fixes for different segments
|
| 289 |
+
of text).
|
| 290 |
+
|
| 291 |
+
To get an explanation, use the :func:`fix_and_explain()` function, which
|
| 292 |
+
fixes the string in one segment and explains what it fixed.
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
if config is None:
|
| 296 |
+
config = TextFixerConfig(explain=False)
|
| 297 |
+
config = _config_from_kwargs(config, kwargs)
|
| 298 |
+
if isinstance(text, bytes):
|
| 299 |
+
raise UnicodeError(BYTES_ERROR_TEXT)
|
| 300 |
+
|
| 301 |
+
out = []
|
| 302 |
+
pos = 0
|
| 303 |
+
while pos < len(text):
|
| 304 |
+
textbreak = text.find("\n", pos) + 1
|
| 305 |
+
if textbreak == 0:
|
| 306 |
+
textbreak = len(text)
|
| 307 |
+
if (textbreak - pos) > config.max_decode_length:
|
| 308 |
+
textbreak = pos + config.max_decode_length
|
| 309 |
+
|
| 310 |
+
segment = text[pos:textbreak]
|
| 311 |
+
if config.unescape_html == "auto" and "<" in segment:
|
| 312 |
+
config = config._replace(unescape_html=False)
|
| 313 |
+
fixed_segment, _ = fix_and_explain(segment, config)
|
| 314 |
+
out.append(fixed_segment)
|
| 315 |
+
pos = textbreak
|
| 316 |
+
return "".join(out)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def fix_and_explain(
|
| 320 |
+
text: str, config: Optional[TextFixerConfig] = None, **kwargs
|
| 321 |
+
) -> ExplainedText:
|
| 322 |
+
"""
|
| 323 |
+
Fix text as a single segment, returning the fixed text and an explanation
|
| 324 |
+
of what was fixed.
|
| 325 |
+
|
| 326 |
+
The explanation is a list of steps that can be applied with
|
| 327 |
+
:func:`apply_plan`, or if config.explain is False, it will be None.
|
| 328 |
+
"""
|
| 329 |
+
if config is None:
|
| 330 |
+
config = TextFixerConfig()
|
| 331 |
+
if isinstance(text, bytes):
|
| 332 |
+
raise UnicodeError(BYTES_ERROR_TEXT)
|
| 333 |
+
config = _config_from_kwargs(config, kwargs)
|
| 334 |
+
|
| 335 |
+
if config.unescape_html == "auto" and "<" in text:
|
| 336 |
+
config = config._replace(unescape_html=False)
|
| 337 |
+
|
| 338 |
+
if config.explain:
|
| 339 |
+
steps: Optional[List[Tuple[str, str]]] = []
|
| 340 |
+
else:
|
| 341 |
+
# If explanations aren't desired, `steps` will be None
|
| 342 |
+
steps = None
|
| 343 |
+
|
| 344 |
+
while True:
|
| 345 |
+
origtext = text
|
| 346 |
+
|
| 347 |
+
text = _try_fix("unescape_html", text, config, steps)
|
| 348 |
+
|
| 349 |
+
if config.fix_encoding:
|
| 350 |
+
if steps is None:
|
| 351 |
+
text = fix_encoding(text)
|
| 352 |
+
else:
|
| 353 |
+
text, encoding_steps = fix_encoding_and_explain(text, config)
|
| 354 |
+
if encoding_steps is not None:
|
| 355 |
+
steps.extend(encoding_steps)
|
| 356 |
+
|
| 357 |
+
for fixer in [
|
| 358 |
+
"fix_c1_controls",
|
| 359 |
+
"fix_latin_ligatures",
|
| 360 |
+
"fix_character_width",
|
| 361 |
+
"uncurl_quotes",
|
| 362 |
+
"fix_line_breaks",
|
| 363 |
+
"fix_surrogates",
|
| 364 |
+
"remove_terminal_escapes",
|
| 365 |
+
"remove_control_chars",
|
| 366 |
+
]:
|
| 367 |
+
text = _try_fix(fixer, text, config, steps)
|
| 368 |
+
|
| 369 |
+
if config.normalization is not None:
|
| 370 |
+
fixed = unicodedata.normalize(config.normalization, text)
|
| 371 |
+
if steps is not None and fixed != text:
|
| 372 |
+
steps.append(("normalize", config.normalization))
|
| 373 |
+
text = fixed
|
| 374 |
+
|
| 375 |
+
if text == origtext:
|
| 376 |
+
return ExplainedText(text, steps)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def fix_encoding_and_explain(
|
| 380 |
+
text: str, config: Optional[TextFixerConfig] = None, **kwargs
|
| 381 |
+
) -> ExplainedText:
|
| 382 |
+
"""
|
| 383 |
+
Apply the steps of ftfy that detect mojibake and fix it. Returns the fixed
|
| 384 |
+
text and a list explaining what was fixed.
|
| 385 |
+
|
| 386 |
+
This includes fixing text by encoding and decoding it in different encodings,
|
| 387 |
+
as well as the subordinate fixes `restore_byte_a0`, `replace_lossy_sequences`,
|
| 388 |
+
`decode_inconsistent_utf8`, and `fix_c1_controls`.
|
| 389 |
+
|
| 390 |
+
Examples::
|
| 391 |
+
|
| 392 |
+
>>> fix_encoding_and_explain("só")
|
| 393 |
+
ExplainedText(text='só', explanation=[('encode', 'latin-1'), ('decode', 'utf-8')])
|
| 394 |
+
|
| 395 |
+
>>> result = fix_encoding_and_explain("voilà le travail")
|
| 396 |
+
>>> result.text
|
| 397 |
+
'voilà le travail'
|
| 398 |
+
>>> result.explanation
|
| 399 |
+
[('encode', 'latin-1'), ('transcode', 'restore_byte_a0'), ('decode', 'utf-8')]
|
| 400 |
+
|
| 401 |
+
"""
|
| 402 |
+
if config is None:
|
| 403 |
+
config = TextFixerConfig()
|
| 404 |
+
if isinstance(text, bytes):
|
| 405 |
+
raise UnicodeError(BYTES_ERROR_TEXT)
|
| 406 |
+
config = _config_from_kwargs(config, kwargs)
|
| 407 |
+
|
| 408 |
+
if not config.fix_encoding:
|
| 409 |
+
# A weird trivial case: we're asked to fix the encoding, but skip
|
| 410 |
+
# fixing the encoding
|
| 411 |
+
return ExplainedText(text, [])
|
| 412 |
+
|
| 413 |
+
plan_so_far: List[Tuple[str, str]] = []
|
| 414 |
+
while True:
|
| 415 |
+
prevtext = text
|
| 416 |
+
text, plan = _fix_encoding_one_step_and_explain(text, config)
|
| 417 |
+
if plan is not None:
|
| 418 |
+
plan_so_far.extend(plan)
|
| 419 |
+
if text == prevtext:
|
| 420 |
+
return ExplainedText(text, plan_so_far)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def _fix_encoding_one_step_and_explain(
|
| 424 |
+
text: str, config: TextFixerConfig
|
| 425 |
+
) -> ExplainedText:
|
| 426 |
+
"""
|
| 427 |
+
Perform one step of fixing the encoding of text.
|
| 428 |
+
"""
|
| 429 |
+
if config is None:
|
| 430 |
+
config = TextFixerConfig()
|
| 431 |
+
|
| 432 |
+
if len(text) == 0:
|
| 433 |
+
return ExplainedText(text, [])
|
| 434 |
+
|
| 435 |
+
# The first plan is to return ASCII text unchanged, as well as text
|
| 436 |
+
# that doesn't look like it contains mojibake
|
| 437 |
+
if chardata.possible_encoding(text, "ascii") or not is_bad(text):
|
| 438 |
+
return ExplainedText(text, [])
|
| 439 |
+
|
| 440 |
+
# As we go through the next step, remember the possible encodings
|
| 441 |
+
# that we encounter but don't successfully fix yet. We may need them
|
| 442 |
+
# later.
|
| 443 |
+
possible_1byte_encodings = []
|
| 444 |
+
|
| 445 |
+
# Suppose the text was supposed to be UTF-8, but it was decoded using
|
| 446 |
+
# a single-byte encoding instead. When these cases can be fixed, they
|
| 447 |
+
# are usually the correct thing to do, so try them next.
|
| 448 |
+
for encoding in chardata.CHARMAP_ENCODINGS:
|
| 449 |
+
if chardata.possible_encoding(text, encoding):
|
| 450 |
+
possible_1byte_encodings.append(encoding)
|
| 451 |
+
encoded_bytes = text.encode(encoding)
|
| 452 |
+
encode_step = ("encode", encoding)
|
| 453 |
+
transcode_steps = []
|
| 454 |
+
|
| 455 |
+
# Now, find out if it's UTF-8 (or close enough). Otherwise,
|
| 456 |
+
# remember the encoding for later.
|
| 457 |
+
try:
|
| 458 |
+
decoding = "utf-8"
|
| 459 |
+
# Check encoded_bytes for sequences that would be UTF-8,
|
| 460 |
+
# except they have b' ' where b'\xa0' would belong.
|
| 461 |
+
if config.restore_byte_a0 and chardata.ALTERED_UTF8_RE.search(
|
| 462 |
+
encoded_bytes
|
| 463 |
+
):
|
| 464 |
+
replaced_bytes = fixes.restore_byte_a0(encoded_bytes)
|
| 465 |
+
if replaced_bytes != encoded_bytes:
|
| 466 |
+
transcode_steps.append(("transcode", "restore_byte_a0"))
|
| 467 |
+
encoded_bytes = replaced_bytes
|
| 468 |
+
|
| 469 |
+
# Replace sequences where information has been lost
|
| 470 |
+
if config.replace_lossy_sequences and encoding.startswith("sloppy"):
|
| 471 |
+
replaced_bytes = fixes.replace_lossy_sequences(encoded_bytes)
|
| 472 |
+
if replaced_bytes != encoded_bytes:
|
| 473 |
+
transcode_steps.append(("transcode", "replace_lossy_sequences"))
|
| 474 |
+
encoded_bytes = replaced_bytes
|
| 475 |
+
|
| 476 |
+
if 0xED in encoded_bytes or 0xC0 in encoded_bytes:
|
| 477 |
+
decoding = "utf-8-variants"
|
| 478 |
+
|
| 479 |
+
decode_step = ("decode", decoding)
|
| 480 |
+
steps = [encode_step] + transcode_steps + [decode_step]
|
| 481 |
+
fixed = encoded_bytes.decode(decoding)
|
| 482 |
+
return ExplainedText(fixed, steps)
|
| 483 |
+
|
| 484 |
+
except UnicodeDecodeError:
|
| 485 |
+
pass
|
| 486 |
+
|
| 487 |
+
# Look for a-hat-euro sequences that remain, and fix them in isolation.
|
| 488 |
+
if config.decode_inconsistent_utf8 and chardata.UTF8_DETECTOR_RE.search(text):
|
| 489 |
+
steps = [("apply", "decode_inconsistent_utf8")]
|
| 490 |
+
fixed = fixes.decode_inconsistent_utf8(text)
|
| 491 |
+
if fixed != text:
|
| 492 |
+
return ExplainedText(fixed, steps)
|
| 493 |
+
|
| 494 |
+
# The next most likely case is that this is Latin-1 that was intended to
|
| 495 |
+
# be read as Windows-1252, because those two encodings in particular are
|
| 496 |
+
# easily confused.
|
| 497 |
+
if "latin-1" in possible_1byte_encodings:
|
| 498 |
+
if "windows-1252" in possible_1byte_encodings:
|
| 499 |
+
# This text is in the intersection of Latin-1 and
|
| 500 |
+
# Windows-1252, so it's probably legit.
|
| 501 |
+
return ExplainedText(text, [])
|
| 502 |
+
else:
|
| 503 |
+
# Otherwise, it means we have characters that are in Latin-1 but
|
| 504 |
+
# not in Windows-1252. Those are C1 control characters. Nobody
|
| 505 |
+
# wants those. Assume they were meant to be Windows-1252.
|
| 506 |
+
try:
|
| 507 |
+
fixed = text.encode("latin-1").decode("windows-1252")
|
| 508 |
+
if fixed != text:
|
| 509 |
+
steps = [("encode", "latin-1"), ("decode", "windows-1252")]
|
| 510 |
+
return ExplainedText(fixed, steps)
|
| 511 |
+
except UnicodeDecodeError:
|
| 512 |
+
pass
|
| 513 |
+
|
| 514 |
+
# Fix individual characters of Latin-1 with a less satisfying explanation
|
| 515 |
+
if config.fix_c1_controls and chardata.C1_CONTROL_RE.search(text):
|
| 516 |
+
steps = [("transcode", "fix_c1_controls")]
|
| 517 |
+
fixed = fixes.fix_c1_controls(text)
|
| 518 |
+
return ExplainedText(fixed, steps)
|
| 519 |
+
|
| 520 |
+
# The cases that remain are mixups between two different single-byte
|
| 521 |
+
# encodings, and not the common case of Latin-1 vs. Windows-1252.
|
| 522 |
+
#
|
| 523 |
+
# With the new heuristic in 6.0, it's possible that we're closer to solving
|
| 524 |
+
# these in some cases. It would require a lot of testing and tuning, though.
|
| 525 |
+
# For now, we leave the text unchanged in these cases.
|
| 526 |
+
return ExplainedText(text, [])
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def fix_encoding(text: str, config: TextFixerConfig = None, **kwargs):
|
| 530 |
+
"""
|
| 531 |
+
Apply just the encoding-fixing steps of ftfy to this text. Returns the
|
| 532 |
+
fixed text, discarding the explanation.
|
| 533 |
+
|
| 534 |
+
>>> fix_encoding("ó")
|
| 535 |
+
'ó'
|
| 536 |
+
>>> fix_encoding("&ATILDE;&SUP3;")
|
| 537 |
+
'&ATILDE;&SUP3;'
|
| 538 |
+
"""
|
| 539 |
+
if config is None:
|
| 540 |
+
config = TextFixerConfig(explain=False)
|
| 541 |
+
config = _config_from_kwargs(config, kwargs)
|
| 542 |
+
fixed, _explan = fix_encoding_and_explain(text, config)
|
| 543 |
+
return fixed
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
# Some alternate names for the main functions
|
| 547 |
+
ftfy = fix_text
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def fix_text_segment(text: str, config: TextFixerConfig = None, **kwargs):
|
| 551 |
+
"""
|
| 552 |
+
Fix text as a single segment, with a consistent sequence of steps that
|
| 553 |
+
are applied to fix the text. Discard the explanation.
|
| 554 |
+
"""
|
| 555 |
+
if config is None:
|
| 556 |
+
config = TextFixerConfig(explain=False)
|
| 557 |
+
config = _config_from_kwargs(config, kwargs)
|
| 558 |
+
fixed, _explan = fix_and_explain(text, config)
|
| 559 |
+
return fixed
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def fix_file(input_file, encoding=None, config=None, **kwargs):
|
| 563 |
+
"""
|
| 564 |
+
Fix text that is found in a file.
|
| 565 |
+
|
| 566 |
+
If the file is being read as Unicode text, use that. If it's being read as
|
| 567 |
+
bytes, then we hope an encoding was supplied. If not, unfortunately, we
|
| 568 |
+
have to guess what encoding it is. We'll try a few common encodings, but we
|
| 569 |
+
make no promises. See the `guess_bytes` function for how this is done.
|
| 570 |
+
|
| 571 |
+
The output is a stream of fixed lines of text.
|
| 572 |
+
"""
|
| 573 |
+
if config is None:
|
| 574 |
+
config = TextFixerConfig()
|
| 575 |
+
config = _config_from_kwargs(config, kwargs)
|
| 576 |
+
|
| 577 |
+
for line in input_file:
|
| 578 |
+
if isinstance(line, bytes):
|
| 579 |
+
if encoding is None:
|
| 580 |
+
line, encoding = guess_bytes(line)
|
| 581 |
+
else:
|
| 582 |
+
line = line.decode(encoding)
|
| 583 |
+
if config.unescape_html == "auto" and "<" in line:
|
| 584 |
+
config = config._replace(unescape_html=False)
|
| 585 |
+
|
| 586 |
+
fixed_line, _explan = fix_and_explain(line, config)
|
| 587 |
+
yield fixed_line
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
def guess_bytes(bstring):
|
| 591 |
+
"""
|
| 592 |
+
NOTE: Using `guess_bytes` is not the recommended way of using ftfy. ftfy
|
| 593 |
+
is not designed to be an encoding detector.
|
| 594 |
+
|
| 595 |
+
In the unfortunate situation that you have some bytes in an unknown
|
| 596 |
+
encoding, ftfy can guess a reasonable strategy for decoding them, by trying
|
| 597 |
+
a few common encodings that can be distinguished from each other.
|
| 598 |
+
|
| 599 |
+
Unlike the rest of ftfy, this may not be accurate, and it may *create*
|
| 600 |
+
Unicode problems instead of solving them!
|
| 601 |
+
|
| 602 |
+
The encodings we try here are:
|
| 603 |
+
|
| 604 |
+
- UTF-16 with a byte order mark, because a UTF-16 byte order mark looks
|
| 605 |
+
like nothing else
|
| 606 |
+
- UTF-8, because it's the global standard, which has been used by a
|
| 607 |
+
majority of the Web since 2008
|
| 608 |
+
- "utf-8-variants", or buggy implementations of UTF-8
|
| 609 |
+
- MacRoman, because Microsoft Office thinks it's still a thing, and it
|
| 610 |
+
can be distinguished by its line breaks. (If there are no line breaks in
|
| 611 |
+
the string, though, you're out of luck.)
|
| 612 |
+
- "sloppy-windows-1252", the Latin-1-like encoding that is the most common
|
| 613 |
+
single-byte encoding.
|
| 614 |
+
"""
|
| 615 |
+
if isinstance(bstring, str):
|
| 616 |
+
raise UnicodeError(
|
| 617 |
+
"This string was already decoded as Unicode. You should pass "
|
| 618 |
+
"bytes to guess_bytes, not Unicode."
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
if bstring.startswith(b"\xfe\xff") or bstring.startswith(b"\xff\xfe"):
|
| 622 |
+
return bstring.decode("utf-16"), "utf-16"
|
| 623 |
+
|
| 624 |
+
byteset = set(bstring)
|
| 625 |
+
try:
|
| 626 |
+
if 0xED in byteset or 0xC0 in byteset:
|
| 627 |
+
# Byte 0xed can be used to encode a range of codepoints that
|
| 628 |
+
# are UTF-16 surrogates. UTF-8 does not use UTF-16 surrogates,
|
| 629 |
+
# so when we see 0xed, it's very likely we're being asked to
|
| 630 |
+
# decode CESU-8, the variant that encodes UTF-16 surrogates
|
| 631 |
+
# instead of the original characters themselves.
|
| 632 |
+
#
|
| 633 |
+
# This will occasionally trigger on standard UTF-8, as there
|
| 634 |
+
# are some Korean characters that also use byte 0xed, but that's
|
| 635 |
+
# not harmful because standard UTF-8 characters will decode the
|
| 636 |
+
# same way in our 'utf-8-variants' codec.
|
| 637 |
+
#
|
| 638 |
+
# Byte 0xc0 is impossible because, numerically, it would only
|
| 639 |
+
# encode characters lower than U+0040. Those already have
|
| 640 |
+
# single-byte representations, and UTF-8 requires using the
|
| 641 |
+
# shortest possible representation. However, Java hides the null
|
| 642 |
+
# codepoint, U+0000, in a non-standard longer representation -- it
|
| 643 |
+
# encodes it as 0xc0 0x80 instead of 0x00, guaranteeing that 0x00
|
| 644 |
+
# will never appear in the encoded bytes.
|
| 645 |
+
#
|
| 646 |
+
# The 'utf-8-variants' decoder can handle both of these cases, as
|
| 647 |
+
# well as standard UTF-8, at the cost of a bit of speed.
|
| 648 |
+
return bstring.decode("utf-8-variants"), "utf-8-variants"
|
| 649 |
+
else:
|
| 650 |
+
return bstring.decode("utf-8"), "utf-8"
|
| 651 |
+
except UnicodeDecodeError:
|
| 652 |
+
pass
|
| 653 |
+
|
| 654 |
+
if 0x0D in byteset and 0x0A not in byteset:
|
| 655 |
+
# Files that contain CR and not LF are likely to be MacRoman.
|
| 656 |
+
return bstring.decode("macroman"), "macroman"
|
| 657 |
+
|
| 658 |
+
return bstring.decode("sloppy-windows-1252"), "sloppy-windows-1252"
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
@no_type_check
|
| 662 |
+
def apply_plan(text: str, plan: List[Tuple[str, str]]):
|
| 663 |
+
"""
|
| 664 |
+
Apply a plan for fixing the encoding of text.
|
| 665 |
+
|
| 666 |
+
The plan is a list of tuples of the form (operation, arg).
|
| 667 |
+
|
| 668 |
+
`operation` is one of:
|
| 669 |
+
|
| 670 |
+
- `'encode'`: convert a string to bytes, using `arg` as the encoding
|
| 671 |
+
- `'decode'`: convert bytes to a string, using `arg` as the encoding
|
| 672 |
+
- `'transcode'`: convert bytes to bytes, using the function named `arg`
|
| 673 |
+
- `'apply'`: convert a string to a string, using the function named `arg`
|
| 674 |
+
|
| 675 |
+
The functions that can be applied by 'transcode' and 'apply' are
|
| 676 |
+
specifically those that appear in the dictionary named `FIXERS`. They
|
| 677 |
+
can also can be imported from the `ftfy.fixes` module.
|
| 678 |
+
|
| 679 |
+
Example::
|
| 680 |
+
|
| 681 |
+
>>> mojibake = "schön"
|
| 682 |
+
>>> text, plan = fix_and_explain(mojibake)
|
| 683 |
+
>>> apply_plan(mojibake, plan)
|
| 684 |
+
'schön'
|
| 685 |
+
"""
|
| 686 |
+
obj = text
|
| 687 |
+
for operation, encoding in plan:
|
| 688 |
+
if operation == "encode":
|
| 689 |
+
obj = obj.encode(encoding)
|
| 690 |
+
elif operation == "decode":
|
| 691 |
+
obj = obj.decode(encoding)
|
| 692 |
+
elif operation in ("transcode", "apply"):
|
| 693 |
+
if encoding in FIXERS:
|
| 694 |
+
obj = FIXERS[encoding](obj)
|
| 695 |
+
else:
|
| 696 |
+
raise ValueError("Unknown function to apply: %s" % encoding)
|
| 697 |
+
else:
|
| 698 |
+
raise ValueError("Unknown plan step: %s" % operation)
|
| 699 |
+
|
| 700 |
+
return obj
|
| 701 |
+
|
| 702 |
+
|
| 703 |
+
def explain_unicode(text: str):
|
| 704 |
+
"""
|
| 705 |
+
A utility method that's useful for debugging mysterious Unicode.
|
| 706 |
+
|
| 707 |
+
It breaks down a string, showing you for each codepoint its number in
|
| 708 |
+
hexadecimal, its glyph, its category in the Unicode standard, and its name
|
| 709 |
+
in the Unicode standard.
|
| 710 |
+
|
| 711 |
+
>>> explain_unicode('(╯°□°)╯︵ ┻━┻')
|
| 712 |
+
U+0028 ( [Ps] LEFT PARENTHESIS
|
| 713 |
+
U+256F ╯ [So] BOX DRAWINGS LIGHT ARC UP AND LEFT
|
| 714 |
+
U+00B0 ° [So] DEGREE SIGN
|
| 715 |
+
U+25A1 □ [So] WHITE SQUARE
|
| 716 |
+
U+00B0 ° [So] DEGREE SIGN
|
| 717 |
+
U+0029 ) [Pe] RIGHT PARENTHESIS
|
| 718 |
+
U+256F ╯ [So] BOX DRAWINGS LIGHT ARC UP AND LEFT
|
| 719 |
+
U+FE35 ︵ [Ps] PRESENTATION FORM FOR VERTICAL LEFT PARENTHESIS
|
| 720 |
+
U+0020 [Zs] SPACE
|
| 721 |
+
U+253B ┻ [So] BOX DRAWINGS HEAVY UP AND HORIZONTAL
|
| 722 |
+
U+2501 ━ [So] BOX DRAWINGS HEAVY HORIZONTAL
|
| 723 |
+
U+253B ┻ [So] BOX DRAWINGS HEAVY UP AND HORIZONTAL
|
| 724 |
+
"""
|
| 725 |
+
for char in text:
|
| 726 |
+
if char.isprintable():
|
| 727 |
+
display = char
|
| 728 |
+
else:
|
| 729 |
+
display = char.encode("unicode-escape").decode("ascii")
|
| 730 |
+
print(
|
| 731 |
+
"U+{code:04X} {display} [{category}] {name}".format(
|
| 732 |
+
display=display_ljust(display, 7),
|
| 733 |
+
code=ord(char),
|
| 734 |
+
category=unicodedata.category(char),
|
| 735 |
+
name=unicodedata.name(char, "<unknown>"),
|
| 736 |
+
)
|
| 737 |
+
)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/badness.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
`ftfy.badness` contains a heuristic that detects likely mojibake.
|
| 3 |
+
|
| 4 |
+
This heuristic signals to ftfy which segments of text need to be fixed, and
|
| 5 |
+
also indicates when the text can stop being fixed.
|
| 6 |
+
|
| 7 |
+
The design of this heuristic is that we categorize the approximately 400
|
| 8 |
+
Unicode characters that occur in UTF-8 mojibake, specifically the characters
|
| 9 |
+
that come from mixing up UTF-8 with the other encodings we support. We
|
| 10 |
+
identify sequences and contexts of these characters that are much more likely
|
| 11 |
+
to be mojibake than intended strings, such as lowercase accented letters
|
| 12 |
+
followed immediately by currency symbols.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import warnings
|
| 16 |
+
import re
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# There are only 403 characters that occur in known UTF-8 mojibake, and we can
|
| 20 |
+
# characterize them:
|
| 21 |
+
|
| 22 |
+
MOJIBAKE_CATEGORIES = {
|
| 23 |
+
# Characters that appear in many different contexts. Sequences that contain
|
| 24 |
+
# them are not inherently mojibake
|
| 25 |
+
"common": (
|
| 26 |
+
"\N{NO-BREAK SPACE}"
|
| 27 |
+
"\N{SOFT HYPHEN}"
|
| 28 |
+
"\N{MIDDLE DOT}"
|
| 29 |
+
"\N{ACUTE ACCENT}"
|
| 30 |
+
"\N{EN DASH}"
|
| 31 |
+
"\N{EM DASH}"
|
| 32 |
+
"\N{HORIZONTAL BAR}"
|
| 33 |
+
"\N{HORIZONTAL ELLIPSIS}"
|
| 34 |
+
"\N{RIGHT SINGLE QUOTATION MARK}"
|
| 35 |
+
),
|
| 36 |
+
# the C1 control character range, which have no uses outside of mojibake anymore
|
| 37 |
+
"c1": "\x80-\x9f",
|
| 38 |
+
# Characters that are nearly 100% used in mojibake
|
| 39 |
+
"bad": (
|
| 40 |
+
"\N{BROKEN BAR}"
|
| 41 |
+
"\N{CURRENCY SIGN}"
|
| 42 |
+
"\N{DIAERESIS}"
|
| 43 |
+
"\N{NOT SIGN}"
|
| 44 |
+
"\N{MACRON}"
|
| 45 |
+
"\N{PILCROW SIGN}"
|
| 46 |
+
"\N{SECTION SIGN}"
|
| 47 |
+
"\N{CEDILLA}"
|
| 48 |
+
"\N{LATIN SMALL LETTER F WITH HOOK}"
|
| 49 |
+
"\N{MODIFIER LETTER CIRCUMFLEX ACCENT}" # it's not a modifier
|
| 50 |
+
"\N{CARON}"
|
| 51 |
+
"\N{BREVE}"
|
| 52 |
+
"\N{OGONEK}"
|
| 53 |
+
"\N{SMALL TILDE}"
|
| 54 |
+
"\N{DAGGER}"
|
| 55 |
+
"\N{DOUBLE DAGGER}"
|
| 56 |
+
"\N{PER MILLE SIGN}"
|
| 57 |
+
"\N{REVERSED NOT SIGN}"
|
| 58 |
+
"\N{LOZENGE}"
|
| 59 |
+
"\ufffd"
|
| 60 |
+
# Theoretically these would appear in 'numeric' contexts, but when they
|
| 61 |
+
# co-occur with other mojibake characters, it's not really ambiguous
|
| 62 |
+
"\N{FEMININE ORDINAL INDICATOR}"
|
| 63 |
+
"\N{MASCULINE ORDINAL INDICATOR}"
|
| 64 |
+
),
|
| 65 |
+
"currency": (
|
| 66 |
+
"\N{CENT SIGN}"
|
| 67 |
+
"\N{POUND SIGN}"
|
| 68 |
+
"\N{YEN SIGN}"
|
| 69 |
+
"\N{PESETA SIGN}"
|
| 70 |
+
"\N{EURO SIGN}"
|
| 71 |
+
),
|
| 72 |
+
"start_punctuation": (
|
| 73 |
+
"\N{INVERTED EXCLAMATION MARK}"
|
| 74 |
+
"\N{LEFT-POINTING DOUBLE ANGLE QUOTATION MARK}"
|
| 75 |
+
"\N{INVERTED QUESTION MARK}"
|
| 76 |
+
"\N{COPYRIGHT SIGN}"
|
| 77 |
+
"\N{GREEK TONOS}"
|
| 78 |
+
"\N{GREEK DIALYTIKA TONOS}"
|
| 79 |
+
"\N{LEFT SINGLE QUOTATION MARK}"
|
| 80 |
+
"\N{SINGLE LOW-9 QUOTATION MARK}"
|
| 81 |
+
"\N{LEFT DOUBLE QUOTATION MARK}"
|
| 82 |
+
"\N{DOUBLE LOW-9 QUOTATION MARK}"
|
| 83 |
+
"\N{BULLET}"
|
| 84 |
+
"\N{SINGLE LEFT-POINTING ANGLE QUOTATION MARK}"
|
| 85 |
+
"\uf8ff" # OS-specific symbol, usually the Apple logo
|
| 86 |
+
),
|
| 87 |
+
"end_punctuation": (
|
| 88 |
+
"\N{REGISTERED SIGN}"
|
| 89 |
+
"\N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
|
| 90 |
+
"\N{DOUBLE ACUTE ACCENT}"
|
| 91 |
+
"\N{RIGHT DOUBLE QUOTATION MARK}"
|
| 92 |
+
"\N{SINGLE RIGHT-POINTING ANGLE QUOTATION MARK}"
|
| 93 |
+
"\N{TRADE MARK SIGN}"
|
| 94 |
+
),
|
| 95 |
+
"numeric": (
|
| 96 |
+
"\N{SUPERSCRIPT TWO}"
|
| 97 |
+
"\N{SUPERSCRIPT THREE}"
|
| 98 |
+
"\N{SUPERSCRIPT ONE}"
|
| 99 |
+
"\N{PLUS-MINUS SIGN}"
|
| 100 |
+
"\N{VULGAR FRACTION ONE QUARTER}"
|
| 101 |
+
"\N{VULGAR FRACTION ONE HALF}"
|
| 102 |
+
"\N{VULGAR FRACTION THREE QUARTERS}"
|
| 103 |
+
"\N{MULTIPLICATION SIGN}"
|
| 104 |
+
"\N{MICRO SIGN}"
|
| 105 |
+
"\N{DIVISION SIGN}"
|
| 106 |
+
"\N{FRACTION SLASH}"
|
| 107 |
+
"\N{PARTIAL DIFFERENTIAL}"
|
| 108 |
+
"\N{INCREMENT}"
|
| 109 |
+
"\N{N-ARY PRODUCT}"
|
| 110 |
+
"\N{N-ARY SUMMATION}"
|
| 111 |
+
"\N{SQUARE ROOT}"
|
| 112 |
+
"\N{INFINITY}"
|
| 113 |
+
"\N{INTERSECTION}"
|
| 114 |
+
"\N{INTEGRAL}"
|
| 115 |
+
"\N{ALMOST EQUAL TO}"
|
| 116 |
+
"\N{NOT EQUAL TO}"
|
| 117 |
+
"\N{IDENTICAL TO}"
|
| 118 |
+
"\N{LESS-THAN OR EQUAL TO}"
|
| 119 |
+
"\N{GREATER-THAN OR EQUAL TO}"
|
| 120 |
+
"\N{NUMERO SIGN}"
|
| 121 |
+
),
|
| 122 |
+
# Letters that might be used to make emoticon faces (kaomoji), and
|
| 123 |
+
# therefore might need to appear in more improbable-looking contexts.
|
| 124 |
+
#
|
| 125 |
+
# These are concatenated character ranges for use in a regex. I know
|
| 126 |
+
# they look like faces themselves. I think expressing the ranges like
|
| 127 |
+
# this helps to illustrate why we need to be careful with these
|
| 128 |
+
# characters.
|
| 129 |
+
"kaomoji": (
|
| 130 |
+
"Ò-Ö"
|
| 131 |
+
"Ù-Ü"
|
| 132 |
+
"ò-ö"
|
| 133 |
+
"ø-ü"
|
| 134 |
+
"\N{LATIN CAPITAL LETTER O WITH DOUBLE ACUTE}"
|
| 135 |
+
"\N{DEGREE SIGN}"
|
| 136 |
+
),
|
| 137 |
+
"upper_accented": (
|
| 138 |
+
# LATIN CAPITAL LETTER A WITH GRAVE - LATIN CAPITAL LETTER N WITH TILDE
|
| 139 |
+
"\xc0-\xd1"
|
| 140 |
+
# skip capital O's and U's that could be used in kaomoji, but
|
| 141 |
+
# include Ø because it's very common in Arabic mojibake:
|
| 142 |
+
"\N{LATIN CAPITAL LETTER O WITH STROKE}"
|
| 143 |
+
"\N{LATIN CAPITAL LETTER U WITH DIAERESIS}"
|
| 144 |
+
"\N{LATIN CAPITAL LETTER Y WITH ACUTE}"
|
| 145 |
+
"\N{LATIN CAPITAL LETTER A WITH BREVE}"
|
| 146 |
+
"\N{LATIN CAPITAL LETTER A WITH OGONEK}"
|
| 147 |
+
"\N{LATIN CAPITAL LETTER C WITH ACUTE}"
|
| 148 |
+
"\N{LATIN CAPITAL LETTER C WITH CARON}"
|
| 149 |
+
"\N{LATIN CAPITAL LETTER D WITH CARON}"
|
| 150 |
+
"\N{LATIN CAPITAL LETTER D WITH STROKE}"
|
| 151 |
+
"\N{LATIN CAPITAL LETTER E WITH OGONEK}"
|
| 152 |
+
"\N{LATIN CAPITAL LETTER E WITH CARON}"
|
| 153 |
+
"\N{LATIN CAPITAL LETTER G WITH BREVE}"
|
| 154 |
+
"\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}"
|
| 155 |
+
"\N{LATIN CAPITAL LETTER L WITH ACUTE}"
|
| 156 |
+
"\N{LATIN CAPITAL LETTER L WITH CARON}"
|
| 157 |
+
"\N{LATIN CAPITAL LETTER L WITH STROKE}"
|
| 158 |
+
"\N{LATIN CAPITAL LETTER N WITH ACUTE}"
|
| 159 |
+
"\N{LATIN CAPITAL LETTER N WITH CARON}"
|
| 160 |
+
"\N{LATIN CAPITAL LIGATURE OE}"
|
| 161 |
+
"\N{LATIN CAPITAL LETTER R WITH CARON}"
|
| 162 |
+
"\N{LATIN CAPITAL LETTER S WITH ACUTE}"
|
| 163 |
+
"\N{LATIN CAPITAL LETTER S WITH CEDILLA}"
|
| 164 |
+
"\N{LATIN CAPITAL LETTER S WITH CARON}"
|
| 165 |
+
"\N{LATIN CAPITAL LETTER T WITH CEDILLA}"
|
| 166 |
+
"\N{LATIN CAPITAL LETTER T WITH CARON}"
|
| 167 |
+
"\N{LATIN CAPITAL LETTER U WITH RING ABOVE}"
|
| 168 |
+
"\N{LATIN CAPITAL LETTER U WITH DOUBLE ACUTE}"
|
| 169 |
+
"\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}"
|
| 170 |
+
"\N{LATIN CAPITAL LETTER Z WITH ACUTE}"
|
| 171 |
+
"\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}"
|
| 172 |
+
"\N{LATIN CAPITAL LETTER Z WITH CARON}"
|
| 173 |
+
"\N{CYRILLIC CAPITAL LETTER GHE WITH UPTURN}"
|
| 174 |
+
),
|
| 175 |
+
"lower_accented": (
|
| 176 |
+
"\N{LATIN SMALL LETTER SHARP S}"
|
| 177 |
+
# LATIN SMALL LETTER A WITH GRAVE - LATIN SMALL LETTER N WITH TILDE
|
| 178 |
+
"\xe0-\xf1"
|
| 179 |
+
# skip o's and u's that could be used in kaomoji
|
| 180 |
+
"\N{LATIN SMALL LETTER A WITH BREVE}"
|
| 181 |
+
"\N{LATIN SMALL LETTER A WITH OGONEK}"
|
| 182 |
+
"\N{LATIN SMALL LETTER C WITH ACUTE}"
|
| 183 |
+
"\N{LATIN SMALL LETTER C WITH CARON}"
|
| 184 |
+
"\N{LATIN SMALL LETTER D WITH CARON}"
|
| 185 |
+
"\N{LATIN SMALL LETTER D WITH STROKE}"
|
| 186 |
+
"\N{LATIN SMALL LETTER E WITH OGONEK}"
|
| 187 |
+
"\N{LATIN SMALL LETTER E WITH CARON}"
|
| 188 |
+
"\N{LATIN SMALL LETTER G WITH BREVE}"
|
| 189 |
+
"\N{LATIN SMALL LETTER L WITH ACUTE}"
|
| 190 |
+
"\N{LATIN SMALL LETTER L WITH CARON}"
|
| 191 |
+
"\N{LATIN SMALL LETTER L WITH STROKE}"
|
| 192 |
+
"\N{LATIN SMALL LIGATURE OE}"
|
| 193 |
+
"\N{LATIN SMALL LETTER R WITH ACUTE}"
|
| 194 |
+
"\N{LATIN SMALL LETTER S WITH ACUTE}"
|
| 195 |
+
"\N{LATIN SMALL LETTER S WITH CEDILLA}"
|
| 196 |
+
"\N{LATIN SMALL LETTER S WITH CARON}"
|
| 197 |
+
"\N{LATIN SMALL LETTER T WITH CARON}"
|
| 198 |
+
"\N{LATIN SMALL LETTER U WITH DIAERESIS}"
|
| 199 |
+
"\N{LATIN SMALL LETTER Z WITH ACUTE}"
|
| 200 |
+
"\N{LATIN SMALL LETTER Z WITH DOT ABOVE}"
|
| 201 |
+
"\N{LATIN SMALL LETTER Z WITH CARON}"
|
| 202 |
+
"\N{CYRILLIC SMALL LETTER GHE WITH UPTURN}"
|
| 203 |
+
"\N{LATIN SMALL LIGATURE FI}"
|
| 204 |
+
"\N{LATIN SMALL LIGATURE FL}"
|
| 205 |
+
),
|
| 206 |
+
"upper_common": (
|
| 207 |
+
"\N{LATIN CAPITAL LETTER THORN}"
|
| 208 |
+
"\N{GREEK CAPITAL LETTER ALPHA}-\N{GREEK CAPITAL LETTER OMEGA}"
|
| 209 |
+
# not included under 'accented' because these can commonly
|
| 210 |
+
# occur at ends of words, in positions where they'd be detected
|
| 211 |
+
# as mojibake
|
| 212 |
+
"\N{GREEK CAPITAL LETTER ALPHA WITH TONOS}"
|
| 213 |
+
"\N{GREEK CAPITAL LETTER EPSILON WITH TONOS}"
|
| 214 |
+
"\N{GREEK CAPITAL LETTER ETA WITH TONOS}"
|
| 215 |
+
"\N{GREEK CAPITAL LETTER IOTA WITH TONOS}"
|
| 216 |
+
"\N{GREEK CAPITAL LETTER OMICRON WITH TONOS}"
|
| 217 |
+
"\N{GREEK CAPITAL LETTER UPSILON WITH TONOS}"
|
| 218 |
+
"\N{GREEK CAPITAL LETTER OMEGA WITH TONOS}"
|
| 219 |
+
"\N{GREEK CAPITAL LETTER IOTA WITH DIALYTIKA}"
|
| 220 |
+
"\N{GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA}"
|
| 221 |
+
"\N{CYRILLIC CAPITAL LETTER IO}-\N{CYRILLIC CAPITAL LETTER YA}"
|
| 222 |
+
),
|
| 223 |
+
"lower_common": (
|
| 224 |
+
# lowercase thorn does not appear in mojibake
|
| 225 |
+
"\N{GREEK SMALL LETTER ALPHA}-\N{GREEK SMALL LETTER OMEGA}"
|
| 226 |
+
"\N{GREEK SMALL LETTER ALPHA WITH TONOS}"
|
| 227 |
+
"\N{GREEK SMALL LETTER EPSILON WITH TONOS}"
|
| 228 |
+
"\N{GREEK SMALL LETTER ETA WITH TONOS}"
|
| 229 |
+
"\N{GREEK SMALL LETTER IOTA WITH TONOS}"
|
| 230 |
+
"\N{GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS}"
|
| 231 |
+
"\N{CYRILLIC SMALL LETTER A}-\N{CYRILLIC SMALL LETTER DZHE}"
|
| 232 |
+
),
|
| 233 |
+
"box": (
|
| 234 |
+
# omit the single horizontal line, might be used in kaomoji
|
| 235 |
+
"│┌┐┘├┤┬┼"
|
| 236 |
+
"\N{BOX DRAWINGS DOUBLE HORIZONTAL}-\N{BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL}"
|
| 237 |
+
"▀▄█▌▐░▒▓"
|
| 238 |
+
),
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# We can now build a regular expression that detects unlikely juxtapositions
|
| 243 |
+
# of characters, mostly based on their categories.
|
| 244 |
+
#
|
| 245 |
+
# Another regular expression, which detects sequences that look more specifically
|
| 246 |
+
# like UTF-8 mojibake, appears in chardata.py.
|
| 247 |
+
#
|
| 248 |
+
# This is a verbose regular expression, with whitespace added for somewhat more
|
| 249 |
+
# readability. Remember that the only spaces that count as literal spaces in this
|
| 250 |
+
# expression are ones inside character classes (square brackets).
|
| 251 |
+
|
| 252 |
+
BADNESS_RE = re.compile(
|
| 253 |
+
r"""
|
| 254 |
+
[{c1}]
|
| 255 |
+
|
|
| 256 |
+
[{bad}{lower_accented}{upper_accented}{box}{start_punctuation}{end_punctuation}{currency}{numeric}] [{bad}]
|
| 257 |
+
|
|
| 258 |
+
[a-zA-Z] [{lower_common}{upper_common}] [{bad}]
|
| 259 |
+
|
|
| 260 |
+
[{bad}] [{lower_accented}{upper_accented}{box}{start_punctuation}{end_punctuation}{currency}{numeric}]
|
| 261 |
+
|
|
| 262 |
+
[{lower_accented}{lower_common}{box}{end_punctuation}{currency}{numeric}] [{upper_accented}]
|
| 263 |
+
|
|
| 264 |
+
[{box}{end_punctuation}{currency}{numeric}] [{lower_accented}]
|
| 265 |
+
|
|
| 266 |
+
# leave out [upper_accented][currency] without further info, because it's used in some
|
| 267 |
+
# fancy leetspeak-esque writing
|
| 268 |
+
[{lower_accented}{box}{end_punctuation}] [{currency}]
|
| 269 |
+
|
|
| 270 |
+
\s [{upper_accented}] [{currency}]
|
| 271 |
+
|
|
| 272 |
+
[{upper_accented}{box}] [{numeric}]
|
| 273 |
+
|
|
| 274 |
+
[{lower_accented}{upper_accented}{box}{currency}{end_punctuation}] [{start_punctuation}] [{numeric}]
|
| 275 |
+
|
|
| 276 |
+
[{lower_accented}{upper_accented}{currency}{numeric}{box}] [{end_punctuation}] [{start_punctuation}]
|
| 277 |
+
|
|
| 278 |
+
[{currency}{numeric}{box}] [{start_punctuation}]
|
| 279 |
+
|
|
| 280 |
+
[a-z] [{upper_accented}] [{start_punctuation}{currency}]
|
| 281 |
+
|
|
| 282 |
+
[{box}] [{kaomoji}]
|
| 283 |
+
|
|
| 284 |
+
[{lower_accented}{upper_accented}{currency}{numeric}{start_punctuation}{end_punctuation}] [{box}]
|
| 285 |
+
|
|
| 286 |
+
[{box}] [{end_punctuation}]
|
| 287 |
+
|
|
| 288 |
+
[{lower_accented}{upper_accented}] [{end_punctuation}] \w
|
| 289 |
+
|
|
| 290 |
+
|
| 291 |
+
# The ligature œ when not followed by an unaccented Latin letter
|
| 292 |
+
[Œœ][^A-Za-z]
|
| 293 |
+
|
|
| 294 |
+
|
| 295 |
+
# Common Windows-1252 2-character mojibake that isn't covered by the cases above
|
| 296 |
+
[ÂÃÎÐ][€Šš¢£Ÿž\xa0\xad®©°·»{start_punctuation}{end_punctuation}–—´]
|
| 297 |
+
|
|
| 298 |
+
× [²³]
|
| 299 |
+
|
|
| 300 |
+
# Windows-1252 mojibake of Arabic words needs to include the 'common' characters.
|
| 301 |
+
# To compensate, we require four characters to be matched.
|
| 302 |
+
[ØÙ] [{common}{currency}{bad}{numeric}{start_punctuation}ŸŠ®°µ»]
|
| 303 |
+
[ØÙ] [{common}{currency}{bad}{numeric}{start_punctuation}ŸŠ®°µ»]
|
| 304 |
+
|
|
| 305 |
+
|
| 306 |
+
# Windows-1252 mojibake that starts 3-character sequences for some South Asian
|
| 307 |
+
# alphabets
|
| 308 |
+
à[²µ¹¼½¾]
|
| 309 |
+
|
|
| 310 |
+
|
| 311 |
+
# MacRoman mojibake that isn't covered by the cases above
|
| 312 |
+
√[±∂†≠®™´≤≥¥µø]
|
| 313 |
+
|
|
| 314 |
+
≈[°¢]
|
| 315 |
+
|
|
| 316 |
+
‚Ä[ìîïòôúùû†°¢π]
|
| 317 |
+
|
|
| 318 |
+
‚[âó][àä°ê]
|
| 319 |
+
|
|
| 320 |
+
|
| 321 |
+
# Windows-1251 mojibake of characters in the U+2000 range
|
| 322 |
+
вЂ
|
| 323 |
+
|
|
| 324 |
+
|
| 325 |
+
# Windows-1251 mojibake of Latin-1 characters and/or the Cyrillic alphabet.
|
| 326 |
+
# Because the 2-character sequences involved here may be common, we require
|
| 327 |
+
# seeing a 3-character sequence.
|
| 328 |
+
[ВГРС][{c1}{bad}{start_punctuation}{end_punctuation}{currency}°µ][ВГРС]
|
| 329 |
+
|
|
| 330 |
+
# A distinctive five-character sequence of Cyrillic letters, which can be
|
| 331 |
+
# Windows-1251 mojibake on top of Latin-1 mojibake of Windows-1252 characters.
|
| 332 |
+
# Require a Latin letter nearby.
|
| 333 |
+
ГўВЂВ.[A-Za-z ]
|
| 334 |
+
|
|
| 335 |
+
|
| 336 |
+
# Windows-1252 encodings of 'à' and 'á', as well as \xa0 itself
|
| 337 |
+
Ã[\xa0¡]
|
| 338 |
+
|
|
| 339 |
+
[a-z]\s?[ÃÂ][ ]
|
| 340 |
+
|
|
| 341 |
+
^[ÃÂ][ ]
|
| 342 |
+
|
|
| 343 |
+
|
| 344 |
+
# Cases where  precedes a character as an encoding of exactly the same
|
| 345 |
+
# character, and the character is common enough
|
| 346 |
+
[a-z.,?!{end_punctuation}] Â [ {start_punctuation}{end_punctuation}]
|
| 347 |
+
|
|
| 348 |
+
|
| 349 |
+
# Windows-1253 mojibake of characters in the U+2000 range
|
| 350 |
+
β€[™\xa0Ά\xad®°]
|
| 351 |
+
|
|
| 352 |
+
|
| 353 |
+
# Windows-1253 mojibake of Latin-1 characters and/or the Greek alphabet
|
| 354 |
+
[ΒΓΞΟ][{c1}{bad}{start_punctuation}{end_punctuation}{currency}°][ΒΓΞΟ]
|
| 355 |
+
""".format(
|
| 356 |
+
**MOJIBAKE_CATEGORIES
|
| 357 |
+
),
|
| 358 |
+
re.VERBOSE,
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def sequence_weirdness(text):
|
| 363 |
+
"""
|
| 364 |
+
This was the name of the heuristic used in ftfy 2.x through 5.x. As an
|
| 365 |
+
attempt at compatibility with external code that calls the heuristic
|
| 366 |
+
directly, we redirect to our new heuristic, :func:`badness`.
|
| 367 |
+
"""
|
| 368 |
+
warnings.warn(
|
| 369 |
+
"`sequence_weirdness()` is an old heuristic, and the current "
|
| 370 |
+
"closest equivalent is `ftfy.badness.badness()`"
|
| 371 |
+
)
|
| 372 |
+
return badness(text)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def badness(text):
|
| 376 |
+
"""
|
| 377 |
+
Get the 'badness' of a sequence of text, counting the number of unlikely
|
| 378 |
+
character sequences. A badness greater than 0 indicates that some of it
|
| 379 |
+
seems to be mojibake.
|
| 380 |
+
"""
|
| 381 |
+
return len(BADNESS_RE.findall(text))
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def is_bad(text):
|
| 385 |
+
"""
|
| 386 |
+
Returns true iff the given text looks like it contains mojibake.
|
| 387 |
+
|
| 388 |
+
This can be faster than `badness`, because it returns when the first match
|
| 389 |
+
is found to a regex instead of counting matches. Note that as strings get
|
| 390 |
+
longer, they have a higher chance of returning True for `is_bad(string)`.
|
| 391 |
+
"""
|
| 392 |
+
return bool(BADNESS_RE.search(text))
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/chardata.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This gives other modules access to the gritty details about characters and the
|
| 3 |
+
encodings that use them.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import html
|
| 7 |
+
import itertools
|
| 8 |
+
import re
|
| 9 |
+
import unicodedata
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# These are the encodings we will try to fix in ftfy, in the
|
| 13 |
+
# order that they should be tried.
|
| 14 |
+
CHARMAP_ENCODINGS = [
|
| 15 |
+
"latin-1",
|
| 16 |
+
"sloppy-windows-1252",
|
| 17 |
+
"sloppy-windows-1251",
|
| 18 |
+
"sloppy-windows-1250",
|
| 19 |
+
"sloppy-windows-1253",
|
| 20 |
+
"sloppy-windows-1254",
|
| 21 |
+
"iso-8859-2",
|
| 22 |
+
"macroman",
|
| 23 |
+
"cp437",
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
SINGLE_QUOTE_RE = re.compile("[\u02bc\u2018-\u201b]")
|
| 27 |
+
DOUBLE_QUOTE_RE = re.compile("[\u201c-\u201f]")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _build_regexes():
|
| 31 |
+
"""
|
| 32 |
+
ENCODING_REGEXES contain reasonably fast ways to detect if we
|
| 33 |
+
could represent a given string in a given encoding. The simplest one is
|
| 34 |
+
the 'ascii' detector, which of course just determines if all characters
|
| 35 |
+
are between U+0000 and U+007F.
|
| 36 |
+
"""
|
| 37 |
+
# Define a regex that matches ASCII text.
|
| 38 |
+
encoding_regexes = {"ascii": re.compile("^[\x00-\x7f]*$")}
|
| 39 |
+
|
| 40 |
+
for encoding in CHARMAP_ENCODINGS:
|
| 41 |
+
# Make a sequence of characters that bytes \x80 to \xFF decode to
|
| 42 |
+
# in each encoding, as well as byte \x1A, which is used to represent
|
| 43 |
+
# the replacement character � in the sloppy-* encodings.
|
| 44 |
+
byte_range = bytes(list(range(0x80, 0x100)) + [0x1A])
|
| 45 |
+
charlist = byte_range.decode(encoding)
|
| 46 |
+
|
| 47 |
+
# The rest of the ASCII bytes -- bytes \x00 to \x19 and \x1B
|
| 48 |
+
# to \x7F -- will decode as those ASCII characters in any encoding we
|
| 49 |
+
# support, so we can just include them as ranges. This also lets us
|
| 50 |
+
# not worry about escaping regex special characters, because all of
|
| 51 |
+
# them are in the \x1B to \x7F range.
|
| 52 |
+
regex = "^[\x00-\x19\x1b-\x7f{0}]*$".format(charlist)
|
| 53 |
+
encoding_regexes[encoding] = re.compile(regex)
|
| 54 |
+
return encoding_regexes
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
ENCODING_REGEXES = _build_regexes()
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _build_html_entities():
|
| 61 |
+
entities = {}
|
| 62 |
+
# Create a dictionary based on the built-in HTML5 entity dictionary.
|
| 63 |
+
# Add a limited set of HTML entities that we'll also decode if they've
|
| 64 |
+
# been case-folded to uppercase, such as decoding &NTILDE; as "Ñ".
|
| 65 |
+
for name, char in html.entities.html5.items():
|
| 66 |
+
if name.endswith(";"):
|
| 67 |
+
entities["&" + name] = char
|
| 68 |
+
|
| 69 |
+
# Restrict the set of characters we can attempt to decode if their
|
| 70 |
+
# name has been uppercased. If we tried to handle all entity names,
|
| 71 |
+
# the results would be ambiguous.
|
| 72 |
+
if name == name.lower():
|
| 73 |
+
name_upper = name.upper()
|
| 74 |
+
entity_upper = "&" + name_upper
|
| 75 |
+
if html.unescape(entity_upper) == entity_upper:
|
| 76 |
+
entities[entity_upper] = char.upper()
|
| 77 |
+
return entities
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
HTML_ENTITY_RE = re.compile(r"&#?[0-9A-Za-z]{1,24};")
|
| 81 |
+
HTML_ENTITIES = _build_html_entities()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def possible_encoding(text, encoding):
|
| 85 |
+
"""
|
| 86 |
+
Given text and a single-byte encoding, check whether that text could have
|
| 87 |
+
been decoded from that single-byte encoding.
|
| 88 |
+
|
| 89 |
+
In other words, check whether it can be encoded in that encoding, possibly
|
| 90 |
+
sloppily.
|
| 91 |
+
"""
|
| 92 |
+
return bool(ENCODING_REGEXES[encoding].match(text))
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _build_control_char_mapping():
|
| 96 |
+
"""
|
| 97 |
+
Build a translate mapping that strips likely-unintended control characters.
|
| 98 |
+
See :func:`ftfy.fixes.remove_control_chars` for a description of these
|
| 99 |
+
codepoint ranges and why they should be removed.
|
| 100 |
+
"""
|
| 101 |
+
control_chars = {}
|
| 102 |
+
|
| 103 |
+
for i in itertools.chain(
|
| 104 |
+
range(0x00, 0x09),
|
| 105 |
+
[0x0B],
|
| 106 |
+
range(0x0E, 0x20),
|
| 107 |
+
[0x7F],
|
| 108 |
+
range(0x206A, 0x2070),
|
| 109 |
+
[0xFEFF],
|
| 110 |
+
range(0xFFF9, 0xFFFD),
|
| 111 |
+
):
|
| 112 |
+
control_chars[i] = None
|
| 113 |
+
|
| 114 |
+
return control_chars
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
CONTROL_CHARS = _build_control_char_mapping()
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# Recognize UTF-8 sequences that would be valid if it weren't for a b'\xa0'
|
| 121 |
+
# that some Windows-1252 program converted to a plain space.
|
| 122 |
+
#
|
| 123 |
+
# The smaller values are included on a case-by-case basis, because we don't want
|
| 124 |
+
# to decode likely input sequences to unlikely characters. These are the ones
|
| 125 |
+
# that *do* form likely characters before 0xa0:
|
| 126 |
+
#
|
| 127 |
+
# 0xc2 -> U+A0 NO-BREAK SPACE
|
| 128 |
+
# 0xc3 -> U+E0 LATIN SMALL LETTER A WITH GRAVE
|
| 129 |
+
# 0xc5 -> U+160 LATIN CAPITAL LETTER S WITH CARON
|
| 130 |
+
# 0xce -> U+3A0 GREEK CAPITAL LETTER PI
|
| 131 |
+
# 0xd0 -> U+420 CYRILLIC CAPITAL LETTER ER
|
| 132 |
+
# 0xd9 -> U+660 ARABIC-INDIC DIGIT ZERO
|
| 133 |
+
#
|
| 134 |
+
# In three-character sequences, we exclude some lead bytes in some cases.
|
| 135 |
+
#
|
| 136 |
+
# When the lead byte is immediately followed by 0xA0, we shouldn't accept
|
| 137 |
+
# a space there, because it leads to some less-likely character ranges:
|
| 138 |
+
#
|
| 139 |
+
# 0xe0 -> Samaritan script
|
| 140 |
+
# 0xe1 -> Mongolian script (corresponds to Latin-1 'á' which is too common)
|
| 141 |
+
#
|
| 142 |
+
# We accept 0xe2 and 0xe3, which cover many scripts. Bytes 0xe4 and
|
| 143 |
+
# higher point mostly to CJK characters, which we generally don't want to
|
| 144 |
+
# decode near Latin lowercase letters.
|
| 145 |
+
#
|
| 146 |
+
# In four-character sequences, the lead byte must be F0, because that accounts
|
| 147 |
+
# for almost all of the usage of high-numbered codepoints (tag characters whose
|
| 148 |
+
# UTF-8 starts with the byte F3 are only used in some rare new emoji sequences).
|
| 149 |
+
#
|
| 150 |
+
# This is meant to be applied to encodings of text that tests true for `is_bad`.
|
| 151 |
+
# Any of these could represent characters that legitimately appear surrounded by
|
| 152 |
+
# spaces, particularly U+C5 (Å), which is a word in multiple languages!
|
| 153 |
+
#
|
| 154 |
+
# We should consider checking for b'\x85' being converted to ... in the future.
|
| 155 |
+
# I've seen it once, but the text still wasn't recoverable.
|
| 156 |
+
|
| 157 |
+
ALTERED_UTF8_RE = re.compile(
|
| 158 |
+
b"[\xc2\xc3\xc5\xce\xd0\xd9][ ]"
|
| 159 |
+
b"|[\xe2\xe3][ ][\x80-\x84\x86-\x9f\xa1-\xbf]"
|
| 160 |
+
b"|[\xe0-\xe3][\x80-\x84\x86-\x9f\xa1-\xbf][ ]"
|
| 161 |
+
b"|[\xf0][ ][\x80-\xbf][\x80-\xbf]"
|
| 162 |
+
b"|[\xf0][\x80-\xbf][ ][\x80-\xbf]"
|
| 163 |
+
b"|[\xf0][\x80-\xbf][\x80-\xbf][ ]"
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
# This expression matches UTF-8 and CESU-8 sequences where some of the
|
| 168 |
+
# continuation bytes have been lost. The byte 0x1a (sometimes written as ^Z) is
|
| 169 |
+
# used within ftfy to represent a byte that produced the replacement character
|
| 170 |
+
# \ufffd. We don't know which byte it was, but we can at least decode the UTF-8
|
| 171 |
+
# sequence as \ufffd instead of failing to re-decode it at all.
|
| 172 |
+
#
|
| 173 |
+
# In some cases, we allow the ASCII '?' in place of \ufffd, but at most once per
|
| 174 |
+
# sequence.
|
| 175 |
+
LOSSY_UTF8_RE = re.compile(
|
| 176 |
+
b"[\xc2-\xdf][\x1a]"
|
| 177 |
+
b"|[\xc2-\xc3][?]"
|
| 178 |
+
b"|\xed[\xa0-\xaf][\x1a?]\xed[\xb0-\xbf][\x1a?\x80-\xbf]"
|
| 179 |
+
b"|\xed[\xa0-\xaf][\x1a?\x80-\xbf]\xed[\xb0-\xbf][\x1a?]"
|
| 180 |
+
b"|[\xe0-\xef][\x1a?][\x1a\x80-\xbf]"
|
| 181 |
+
b"|[\xe0-\xef][\x1a\x80-\xbf][\x1a?]"
|
| 182 |
+
b"|[\xf0-\xf4][\x1a?][\x1a\x80-\xbf][\x1a\x80-\xbf]"
|
| 183 |
+
b"|[\xf0-\xf4][\x1a\x80-\xbf][\x1a?][\x1a\x80-\xbf]"
|
| 184 |
+
b"|[\xf0-\xf4][\x1a\x80-\xbf][\x1a\x80-\xbf][\x1a?]"
|
| 185 |
+
b"|\x1a"
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# This regex matches C1 control characters, which occupy some of the positions
|
| 190 |
+
# in the Latin-1 character map that Windows assigns to other characters instead.
|
| 191 |
+
C1_CONTROL_RE = re.compile(r"[\x80-\x9f]")
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
# A translate mapping that breaks ligatures made of Latin letters. While
|
| 195 |
+
# ligatures may be important to the representation of other languages, in Latin
|
| 196 |
+
# letters they tend to represent a copy/paste error. It omits ligatures such
|
| 197 |
+
# as æ that are frequently used intentionally.
|
| 198 |
+
#
|
| 199 |
+
# This list additionally includes some Latin digraphs that represent two
|
| 200 |
+
# characters for legacy encoding reasons, not for typographical reasons.
|
| 201 |
+
#
|
| 202 |
+
# Ligatures and digraphs may also be separated by NFKC normalization, but that
|
| 203 |
+
# is sometimes more normalization than you want.
|
| 204 |
+
|
| 205 |
+
LIGATURES = {
|
| 206 |
+
ord("IJ"): "IJ", # Dutch ligatures
|
| 207 |
+
ord("ij"): "ij",
|
| 208 |
+
ord("ʼn"): "ʼn", # Afrikaans digraph meant to avoid auto-curled quote
|
| 209 |
+
ord("DZ"): "DZ", # Serbian/Croatian digraphs for Cyrillic conversion
|
| 210 |
+
ord("Dz"): "Dz",
|
| 211 |
+
ord("dz"): "dz",
|
| 212 |
+
ord("DŽ"): "DŽ",
|
| 213 |
+
ord("Dž"): "Dž",
|
| 214 |
+
ord("dž"): "dž",
|
| 215 |
+
ord("LJ"): "LJ",
|
| 216 |
+
ord("Lj"): "Lj",
|
| 217 |
+
ord("lj"): "lj",
|
| 218 |
+
ord("NJ"): "NJ",
|
| 219 |
+
ord("Nj"): "Nj",
|
| 220 |
+
ord("nj"): "nj",
|
| 221 |
+
ord("ff"): "ff", # Latin typographical ligatures
|
| 222 |
+
ord("fi"): "fi",
|
| 223 |
+
ord("fl"): "fl",
|
| 224 |
+
ord("ffi"): "ffi",
|
| 225 |
+
ord("ffl"): "ffl",
|
| 226 |
+
ord("ſt"): "ſt",
|
| 227 |
+
ord("st"): "st",
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def _build_width_map():
|
| 232 |
+
"""
|
| 233 |
+
Build a translate mapping that replaces halfwidth and fullwidth forms
|
| 234 |
+
with their standard-width forms.
|
| 235 |
+
"""
|
| 236 |
+
# Though it's not listed as a fullwidth character, we'll want to convert
|
| 237 |
+
# U+3000 IDEOGRAPHIC SPACE to U+20 SPACE on the same principle, so start
|
| 238 |
+
# with that in the dictionary.
|
| 239 |
+
width_map = {0x3000: " "}
|
| 240 |
+
for i in range(0xFF01, 0xFFF0):
|
| 241 |
+
char = chr(i)
|
| 242 |
+
alternate = unicodedata.normalize("NFKC", char)
|
| 243 |
+
if alternate != char:
|
| 244 |
+
width_map[i] = alternate
|
| 245 |
+
return width_map
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
WIDTH_MAP = _build_width_map()
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
# Character classes that help us pinpoint embedded mojibake. These can
|
| 252 |
+
# include common characters, because we'll also check them for 'badness'.
|
| 253 |
+
UTF8_CLUES = {
|
| 254 |
+
# Letters that decode to 0xC2 - 0xDF in a Latin-1-like encoding
|
| 255 |
+
"utf8_first_of_2": (
|
| 256 |
+
"ÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßĂĆČĎĐĘĚĞİĹŃŇŐŘŞŢŮŰ"
|
| 257 |
+
"ΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ"
|
| 258 |
+
),
|
| 259 |
+
# Letters that decode to 0xE0 - 0xEF in a Latin-1-like encoding
|
| 260 |
+
"utf8_first_of_3": ("àáâãäåæçèéêëìíîïăćčďęěĺŕΰαβγδεζηθικλμνξοабвгдежзийклмноп"),
|
| 261 |
+
# Letters that decode to 0xF0 or 0xF3 in a Latin-1-like encoding.
|
| 262 |
+
# (Other leading bytes correspond only to unassigned codepoints)
|
| 263 |
+
"utf8_first_of_4": ("ðóđğπσру"),
|
| 264 |
+
# Letters that decode to 0x80 - 0xBF in a Latin-1-like encoding,
|
| 265 |
+
# including a space standing in for 0xA0
|
| 266 |
+
"utf8_continuation": (
|
| 267 |
+
"\x80-\xbf"
|
| 268 |
+
"ĄąĽľŁłŒœŚśŞşŠšŤťŸŹźŻżŽžƒˆˇ˘˛˜˝΄΅"
|
| 269 |
+
"ΆΈΉΊΌΎΏЁЂЃЄЅІЇЈЉЊЋЌЎЏёђѓєѕіїјљњћќўџҐґ"
|
| 270 |
+
"–—―‘’‚“”„†‡•…‰‹›€№™"
|
| 271 |
+
" "
|
| 272 |
+
),
|
| 273 |
+
# Letters that decode to 0x80 - 0xBF in a Latin-1-like encoding,
|
| 274 |
+
# and don't usually stand for themselves when adjacent to mojibake.
|
| 275 |
+
# This excludes spaces, dashes, quotation marks, and ellipses.
|
| 276 |
+
"utf8_continuation_strict": (
|
| 277 |
+
"\x80-\xbf"
|
| 278 |
+
"ĄąĽľŁłŒœŚśŞşŠšŤťŸŹźŻżŽžƒˆˇ˘˛˜˝΄΅"
|
| 279 |
+
"ΆΈΉΊΌΎΏЁЂЃЄЅІЇЈЉЊЋЌЎЏёђѓєѕіїјљњћќўџҐґ"
|
| 280 |
+
"†‡•‰‹›€№™"
|
| 281 |
+
),
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
# This regex uses UTF8_CLUES to find sequences of likely mojibake.
|
| 285 |
+
# It matches them with + so that several adjacent UTF-8-looking sequences
|
| 286 |
+
# get coalesced into one, allowing them to be fixed more efficiently
|
| 287 |
+
# and not requiring every individual subsequence to be detected as 'badness'.
|
| 288 |
+
#
|
| 289 |
+
# We accept spaces in place of "utf8_continuation", because spaces might have
|
| 290 |
+
# been intended to be U+A0 NO-BREAK SPACE.
|
| 291 |
+
#
|
| 292 |
+
# We do a lookbehind to make sure the previous character isn't a
|
| 293 |
+
# "utf8_continuation_strict" character, so that we don't fix just a few
|
| 294 |
+
# characters in a huge garble and make the situation worse.
|
| 295 |
+
#
|
| 296 |
+
# Unfortunately, the matches to this regular expression won't show their
|
| 297 |
+
# surrounding context, and including context would make the expression much
|
| 298 |
+
# less efficient. The 'badness' rules that require context, such as a preceding
|
| 299 |
+
# lowercase letter, will prevent some cases of inconsistent UTF-8 from being
|
| 300 |
+
# fixed when they don't see it.
|
| 301 |
+
UTF8_DETECTOR_RE = re.compile(
|
| 302 |
+
"""
|
| 303 |
+
(?<! [{utf8_continuation_strict}])
|
| 304 |
+
(
|
| 305 |
+
[{utf8_first_of_2}] [{utf8_continuation}]
|
| 306 |
+
|
|
| 307 |
+
[{utf8_first_of_3}] [{utf8_continuation}]{{2}}
|
| 308 |
+
|
|
| 309 |
+
[{utf8_first_of_4}] [{utf8_continuation}]{{3}}
|
| 310 |
+
)+
|
| 311 |
+
""".format(
|
| 312 |
+
**UTF8_CLUES
|
| 313 |
+
),
|
| 314 |
+
re.VERBOSE,
|
| 315 |
+
)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/cli.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A command-line utility for fixing text found in a file.
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
from ftfy import __version__, fix_file, TextFixerConfig
|
| 8 |
+
|
| 9 |
+
ENCODE_ERROR_TEXT_UNIX = """ftfy error:
|
| 10 |
+
Unfortunately, this output stream does not support Unicode.
|
| 11 |
+
|
| 12 |
+
Your system locale may be very old or misconfigured. You should use a locale
|
| 13 |
+
that supports UTF-8. One way to do this is to `export LANG=C.UTF-8`.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
ENCODE_ERROR_TEXT_WINDOWS = """ftfy error:
|
| 17 |
+
Unfortunately, this output stream does not support Unicode.
|
| 18 |
+
|
| 19 |
+
You might be trying to output to the Windows Command Prompt (cmd.exe), which
|
| 20 |
+
does not fully support Unicode for historical reasons. In general, we recommend
|
| 21 |
+
finding a way to run Python without using cmd.exe.
|
| 22 |
+
|
| 23 |
+
You can work around this problem by using the '-o filename' option in ftfy to
|
| 24 |
+
output to a file instead.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
DECODE_ERROR_TEXT = """ftfy error:
|
| 28 |
+
This input couldn't be decoded as %r. We got the following error:
|
| 29 |
+
|
| 30 |
+
%s
|
| 31 |
+
|
| 32 |
+
ftfy works best when its input is in a known encoding. You can use `ftfy -g`
|
| 33 |
+
to guess, if you're desperate. Otherwise, give the encoding name with the
|
| 34 |
+
`-e` option, such as `ftfy -e latin-1`.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
SAME_FILE_ERROR_TEXT = """ftfy error:
|
| 38 |
+
Can't read and write the same file. Please output to a new file instead.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def main():
|
| 43 |
+
"""
|
| 44 |
+
Run ftfy as a command-line utility.
|
| 45 |
+
"""
|
| 46 |
+
import argparse
|
| 47 |
+
|
| 48 |
+
parser = argparse.ArgumentParser(
|
| 49 |
+
description="ftfy (fixes text for you), version %s" % __version__
|
| 50 |
+
)
|
| 51 |
+
parser.add_argument(
|
| 52 |
+
"filename",
|
| 53 |
+
default="-",
|
| 54 |
+
nargs="?",
|
| 55 |
+
help="The file whose Unicode is to be fixed. Defaults "
|
| 56 |
+
"to -, meaning standard input.",
|
| 57 |
+
)
|
| 58 |
+
parser.add_argument(
|
| 59 |
+
"-o",
|
| 60 |
+
"--output",
|
| 61 |
+
type=str,
|
| 62 |
+
default="-",
|
| 63 |
+
help="The file to output to. Defaults to -, meaning " "standard output.",
|
| 64 |
+
)
|
| 65 |
+
parser.add_argument(
|
| 66 |
+
"-g",
|
| 67 |
+
"--guess",
|
| 68 |
+
action="store_true",
|
| 69 |
+
help="Ask ftfy to guess the encoding of your input. "
|
| 70 |
+
"This is risky. Overrides -e.",
|
| 71 |
+
)
|
| 72 |
+
parser.add_argument(
|
| 73 |
+
"-e",
|
| 74 |
+
"--encoding",
|
| 75 |
+
type=str,
|
| 76 |
+
default="utf-8",
|
| 77 |
+
help="The encoding of the input. Defaults to UTF-8.",
|
| 78 |
+
)
|
| 79 |
+
parser.add_argument(
|
| 80 |
+
"-n",
|
| 81 |
+
"--normalization",
|
| 82 |
+
type=str,
|
| 83 |
+
default="NFC",
|
| 84 |
+
help="The normalization of Unicode to apply. "
|
| 85 |
+
'Defaults to NFC. Can be "none".',
|
| 86 |
+
)
|
| 87 |
+
parser.add_argument(
|
| 88 |
+
"--preserve-entities",
|
| 89 |
+
action="store_true",
|
| 90 |
+
help="Leave HTML entities as they are. The default "
|
| 91 |
+
"is to decode them, as long as no HTML tags "
|
| 92 |
+
"have appeared in the file.",
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
args = parser.parse_args()
|
| 96 |
+
|
| 97 |
+
encoding = args.encoding
|
| 98 |
+
if args.guess:
|
| 99 |
+
encoding = None
|
| 100 |
+
|
| 101 |
+
if args.filename == "-":
|
| 102 |
+
# Get a standard input stream made of bytes, so we can decode it as
|
| 103 |
+
# whatever encoding is necessary.
|
| 104 |
+
file = sys.stdin.buffer
|
| 105 |
+
else:
|
| 106 |
+
file = open(args.filename, "rb")
|
| 107 |
+
|
| 108 |
+
if args.output == "-":
|
| 109 |
+
outfile = sys.stdout
|
| 110 |
+
else:
|
| 111 |
+
if os.path.realpath(args.output) == os.path.realpath(args.filename):
|
| 112 |
+
sys.stderr.write(SAME_FILE_ERROR_TEXT)
|
| 113 |
+
sys.exit(1)
|
| 114 |
+
outfile = open(args.output, "w", encoding="utf-8")
|
| 115 |
+
|
| 116 |
+
normalization = args.normalization
|
| 117 |
+
if normalization.lower() == "none":
|
| 118 |
+
normalization = None
|
| 119 |
+
|
| 120 |
+
if args.preserve_entities:
|
| 121 |
+
unescape_html = False
|
| 122 |
+
else:
|
| 123 |
+
unescape_html = "auto"
|
| 124 |
+
|
| 125 |
+
config = TextFixerConfig(unescape_html=unescape_html, normalization=normalization)
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
for line in fix_file(file, encoding=encoding, config=config):
|
| 129 |
+
try:
|
| 130 |
+
outfile.write(line)
|
| 131 |
+
except UnicodeEncodeError:
|
| 132 |
+
if sys.platform == "win32":
|
| 133 |
+
sys.stderr.write(ENCODE_ERROR_TEXT_WINDOWS)
|
| 134 |
+
else:
|
| 135 |
+
sys.stderr.write(ENCODE_ERROR_TEXT_UNIX)
|
| 136 |
+
sys.exit(1)
|
| 137 |
+
except UnicodeDecodeError as err:
|
| 138 |
+
sys.stderr.write(DECODE_ERROR_TEXT % (encoding, err))
|
| 139 |
+
sys.exit(1)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
if __name__ == "__main__":
|
| 143 |
+
main()
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/fixes.py
ADDED
|
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The `ftfy.fixes` module contains the individual fixes that :func:`ftfy.fix_text`
|
| 3 |
+
can perform, and provides the functions that are named in "explanations"
|
| 4 |
+
such as the output of :func:`ftfy.fix_and_explain`.
|
| 5 |
+
|
| 6 |
+
Two of these functions are particularly useful on their own, as more robust
|
| 7 |
+
versions of functions in the Python standard library:
|
| 8 |
+
|
| 9 |
+
- :func:`ftfy.fixes.decode_escapes`
|
| 10 |
+
- :func:`ftfy.fixes.unescape_html`
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import codecs
|
| 14 |
+
import html
|
| 15 |
+
import re
|
| 16 |
+
import warnings
|
| 17 |
+
|
| 18 |
+
import ftfy
|
| 19 |
+
from ftfy.chardata import (
|
| 20 |
+
ALTERED_UTF8_RE,
|
| 21 |
+
C1_CONTROL_RE,
|
| 22 |
+
CONTROL_CHARS,
|
| 23 |
+
DOUBLE_QUOTE_RE,
|
| 24 |
+
HTML_ENTITIES,
|
| 25 |
+
HTML_ENTITY_RE,
|
| 26 |
+
LIGATURES,
|
| 27 |
+
LOSSY_UTF8_RE,
|
| 28 |
+
SINGLE_QUOTE_RE,
|
| 29 |
+
UTF8_DETECTOR_RE,
|
| 30 |
+
WIDTH_MAP,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from ftfy.badness import is_bad
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def fix_encoding_and_explain(text):
|
| 37 |
+
"""
|
| 38 |
+
Deprecated copy of `ftfy.fix_encoding_and_explain()`.
|
| 39 |
+
"""
|
| 40 |
+
warnings.warn(
|
| 41 |
+
"`fix_encoding_and_explain()` has moved to the main module of ftfy.",
|
| 42 |
+
DeprecationWarning,
|
| 43 |
+
)
|
| 44 |
+
return ftfy.fix_encoding_and_explain(text)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def fix_encoding(text):
|
| 48 |
+
"""
|
| 49 |
+
Deprecated copy of `ftfy.fix_encoding()`.
|
| 50 |
+
"""
|
| 51 |
+
warnings.warn(
|
| 52 |
+
"`fix_encoding()` has moved to the main module of ftfy.", DeprecationWarning
|
| 53 |
+
)
|
| 54 |
+
return ftfy.fix_encoding(text)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def apply_plan(text, plan):
|
| 58 |
+
"""
|
| 59 |
+
Deprecated copy of `ftfy.apply_plan()`.
|
| 60 |
+
"""
|
| 61 |
+
warnings.warn(
|
| 62 |
+
"`apply_plan()` has moved to the main module of ftfy.", DeprecationWarning
|
| 63 |
+
)
|
| 64 |
+
return ftfy.apply_plan(text, plan)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _unescape_fixup(match):
|
| 68 |
+
"""
|
| 69 |
+
Replace one matched HTML entity with the character it represents,
|
| 70 |
+
if possible.
|
| 71 |
+
"""
|
| 72 |
+
text = match.group(0)
|
| 73 |
+
if text in HTML_ENTITIES:
|
| 74 |
+
return HTML_ENTITIES[text]
|
| 75 |
+
elif text.startswith("&#"):
|
| 76 |
+
unescaped = html.unescape(text)
|
| 77 |
+
|
| 78 |
+
# If html.unescape only decoded part of the string, that's not what
|
| 79 |
+
# we want. The semicolon should be consumed.
|
| 80 |
+
if ";" in unescaped:
|
| 81 |
+
return text
|
| 82 |
+
else:
|
| 83 |
+
return unescaped
|
| 84 |
+
else:
|
| 85 |
+
return text
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def unescape_html(text):
|
| 89 |
+
"""
|
| 90 |
+
Decode HTML entities and character references, including some nonstandard
|
| 91 |
+
ones written in all-caps.
|
| 92 |
+
|
| 93 |
+
Python has a built-in called `html.unescape` that can decode HTML escapes,
|
| 94 |
+
including a bunch of messy edge cases such as decoding escapes without
|
| 95 |
+
semicolons such as "&".
|
| 96 |
+
|
| 97 |
+
If you know you've got HTML-escaped text, applying `html.unescape` is the
|
| 98 |
+
right way to convert it to plain text. But in ambiguous situations, that
|
| 99 |
+
would create false positives. For example, the informally written text
|
| 100 |
+
"this¬ that" should not automatically be decoded as "this¬ that".
|
| 101 |
+
|
| 102 |
+
In this function, we decode the escape sequences that appear in the
|
| 103 |
+
`html.entities.html5` dictionary, as long as they are the unambiguous ones
|
| 104 |
+
that end in semicolons.
|
| 105 |
+
|
| 106 |
+
We also decode all-caps versions of Latin letters and common symbols.
|
| 107 |
+
If a database contains the name 'P&EACUTE;REZ', we can read that and intuit
|
| 108 |
+
that it was supposed to say 'PÉREZ'. This is limited to a smaller set of
|
| 109 |
+
entities, because there are many instances where entity names are
|
| 110 |
+
case-sensitive in complicated ways.
|
| 111 |
+
|
| 112 |
+
>>> unescape_html('<tag>')
|
| 113 |
+
'<tag>'
|
| 114 |
+
|
| 115 |
+
>>> unescape_html('𝒥ohn ℋancock')
|
| 116 |
+
'𝒥ohn ℋancock'
|
| 117 |
+
|
| 118 |
+
>>> unescape_html('✓')
|
| 119 |
+
'✓'
|
| 120 |
+
|
| 121 |
+
>>> unescape_html('Pérez')
|
| 122 |
+
'Pérez'
|
| 123 |
+
|
| 124 |
+
>>> unescape_html('P&EACUTE;REZ')
|
| 125 |
+
'PÉREZ'
|
| 126 |
+
|
| 127 |
+
>>> unescape_html('BUNDESSTRA&SZLIG;E')
|
| 128 |
+
'BUNDESSTRASSE'
|
| 129 |
+
|
| 130 |
+
>>> unescape_html('ñ Ñ &NTILDE; &nTILDE;')
|
| 131 |
+
'ñ Ñ Ñ &nTILDE;'
|
| 132 |
+
"""
|
| 133 |
+
return HTML_ENTITY_RE.sub(_unescape_fixup, text)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
ANSI_RE = re.compile("\033\\[((?:\\d|;)*)([a-zA-Z])")
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def remove_terminal_escapes(text):
|
| 140 |
+
r"""
|
| 141 |
+
Strip out "ANSI" terminal escape sequences, such as those that produce
|
| 142 |
+
colored text on Unix.
|
| 143 |
+
|
| 144 |
+
>>> print(remove_terminal_escapes(
|
| 145 |
+
... "\033[36;44mI'm blue, da ba dee da ba doo...\033[0m"
|
| 146 |
+
... ))
|
| 147 |
+
I'm blue, da ba dee da ba doo...
|
| 148 |
+
"""
|
| 149 |
+
return ANSI_RE.sub("", text)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def uncurl_quotes(text):
|
| 153 |
+
r"""
|
| 154 |
+
Replace curly quotation marks with straight equivalents.
|
| 155 |
+
|
| 156 |
+
>>> print(uncurl_quotes('\u201chere\u2019s a test\u201d'))
|
| 157 |
+
"here's a test"
|
| 158 |
+
"""
|
| 159 |
+
return SINGLE_QUOTE_RE.sub("'", DOUBLE_QUOTE_RE.sub('"', text))
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def fix_latin_ligatures(text):
|
| 163 |
+
"""
|
| 164 |
+
Replace single-character ligatures of Latin letters, such as 'fi', with the
|
| 165 |
+
characters that they contain, as in 'fi'. Latin ligatures are usually not
|
| 166 |
+
intended in text strings (though they're lovely in *rendered* text). If
|
| 167 |
+
you have such a ligature in your string, it is probably a result of a
|
| 168 |
+
copy-and-paste glitch.
|
| 169 |
+
|
| 170 |
+
We leave ligatures in other scripts alone to be safe. They may be intended,
|
| 171 |
+
and removing them may lose information. If you want to take apart nearly
|
| 172 |
+
all ligatures, use NFKC normalization.
|
| 173 |
+
|
| 174 |
+
>>> print(fix_latin_ligatures("fluffiest"))
|
| 175 |
+
fluffiest
|
| 176 |
+
"""
|
| 177 |
+
return text.translate(LIGATURES)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def fix_character_width(text):
|
| 181 |
+
"""
|
| 182 |
+
The ASCII characters, katakana, and Hangul characters have alternate
|
| 183 |
+
"halfwidth" or "fullwidth" forms that help text line up in a grid.
|
| 184 |
+
|
| 185 |
+
If you don't need these width properties, you probably want to replace
|
| 186 |
+
these characters with their standard form, which is what this function
|
| 187 |
+
does.
|
| 188 |
+
|
| 189 |
+
Note that this replaces the ideographic space, U+3000, with the ASCII
|
| 190 |
+
space, U+20.
|
| 191 |
+
|
| 192 |
+
>>> print(fix_character_width("LOUD NOISES"))
|
| 193 |
+
LOUD NOISES
|
| 194 |
+
>>> print(fix_character_width("Uターン")) # this means "U-turn"
|
| 195 |
+
Uターン
|
| 196 |
+
"""
|
| 197 |
+
return text.translate(WIDTH_MAP)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def fix_line_breaks(text):
|
| 201 |
+
r"""
|
| 202 |
+
Convert all line breaks to Unix style.
|
| 203 |
+
|
| 204 |
+
This will convert the following sequences into the standard \\n
|
| 205 |
+
line break:
|
| 206 |
+
|
| 207 |
+
- CRLF (\\r\\n), used on Windows and in some communication protocols
|
| 208 |
+
- CR (\\r), once used on Mac OS Classic, and now kept alive by misguided
|
| 209 |
+
software such as Microsoft Office for Mac
|
| 210 |
+
- LINE SEPARATOR (\\u2028) and PARAGRAPH SEPARATOR (\\u2029), defined by
|
| 211 |
+
Unicode and used to sow confusion and discord
|
| 212 |
+
- NEXT LINE (\\x85), a C1 control character that is certainly not what you
|
| 213 |
+
meant
|
| 214 |
+
|
| 215 |
+
The NEXT LINE character is a bit of an odd case, because it
|
| 216 |
+
usually won't show up if `fix_encoding` is also being run.
|
| 217 |
+
\\x85 is very common mojibake for \\u2026, HORIZONTAL ELLIPSIS.
|
| 218 |
+
|
| 219 |
+
>>> print(fix_line_breaks(
|
| 220 |
+
... "This string is made of two things:\u2029"
|
| 221 |
+
... "1. Unicode\u2028"
|
| 222 |
+
... "2. Spite"
|
| 223 |
+
... ))
|
| 224 |
+
This string is made of two things:
|
| 225 |
+
1. Unicode
|
| 226 |
+
2. Spite
|
| 227 |
+
|
| 228 |
+
For further testing and examples, let's define a function to make sure
|
| 229 |
+
we can see the control characters in their escaped form:
|
| 230 |
+
|
| 231 |
+
>>> def eprint(text):
|
| 232 |
+
... print(text.encode('unicode-escape').decode('ascii'))
|
| 233 |
+
|
| 234 |
+
>>> eprint(fix_line_breaks("Content-type: text/plain\r\n\r\nHi."))
|
| 235 |
+
Content-type: text/plain\n\nHi.
|
| 236 |
+
|
| 237 |
+
>>> eprint(fix_line_breaks("This is how Microsoft \r trolls Mac users"))
|
| 238 |
+
This is how Microsoft \n trolls Mac users
|
| 239 |
+
|
| 240 |
+
>>> eprint(fix_line_breaks("What is this \x85 I don't even"))
|
| 241 |
+
What is this \n I don't even
|
| 242 |
+
"""
|
| 243 |
+
return (
|
| 244 |
+
text.replace("\r\n", "\n")
|
| 245 |
+
.replace("\r", "\n")
|
| 246 |
+
.replace("\u2028", "\n")
|
| 247 |
+
.replace("\u2029", "\n")
|
| 248 |
+
.replace("\u0085", "\n")
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
SURROGATE_RE = re.compile("[\ud800-\udfff]")
|
| 253 |
+
SURROGATE_PAIR_RE = re.compile("[\ud800-\udbff][\udc00-\udfff]")
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def convert_surrogate_pair(match):
|
| 257 |
+
"""
|
| 258 |
+
Convert a surrogate pair to the single codepoint it represents.
|
| 259 |
+
|
| 260 |
+
This implements the formula described at:
|
| 261 |
+
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
|
| 262 |
+
"""
|
| 263 |
+
pair = match.group(0)
|
| 264 |
+
codept = 0x10000 + (ord(pair[0]) - 0xD800) * 0x400 + (ord(pair[1]) - 0xDC00)
|
| 265 |
+
return chr(codept)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def fix_surrogates(text):
|
| 269 |
+
"""
|
| 270 |
+
Replace 16-bit surrogate codepoints with the characters they represent
|
| 271 |
+
(when properly paired), or with \ufffd otherwise.
|
| 272 |
+
|
| 273 |
+
>>> high_surrogate = chr(0xd83d)
|
| 274 |
+
>>> low_surrogate = chr(0xdca9)
|
| 275 |
+
>>> print(fix_surrogates(high_surrogate + low_surrogate))
|
| 276 |
+
💩
|
| 277 |
+
>>> print(fix_surrogates(low_surrogate + high_surrogate))
|
| 278 |
+
��
|
| 279 |
+
|
| 280 |
+
The above doctest had to be very carefully written, because even putting
|
| 281 |
+
the Unicode escapes of the surrogates in the docstring was causing
|
| 282 |
+
various tools to fail, which I think just goes to show why this fixer is
|
| 283 |
+
necessary.
|
| 284 |
+
"""
|
| 285 |
+
if SURROGATE_RE.search(text):
|
| 286 |
+
text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text)
|
| 287 |
+
text = SURROGATE_RE.sub("\ufffd", text)
|
| 288 |
+
return text
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def remove_control_chars(text):
|
| 292 |
+
"""
|
| 293 |
+
Remove various control characters that you probably didn't intend to be in
|
| 294 |
+
your text. Many of these characters appear in the table of "Characters not
|
| 295 |
+
suitable for use with markup" at
|
| 296 |
+
http://www.unicode.org/reports/tr20/tr20-9.html.
|
| 297 |
+
|
| 298 |
+
This includes:
|
| 299 |
+
|
| 300 |
+
- ASCII control characters, except for the important whitespace characters
|
| 301 |
+
(U+00 to U+08, U+0B, U+0E to U+1F, U+7F)
|
| 302 |
+
- Deprecated Arabic control characters (U+206A to U+206F)
|
| 303 |
+
- Interlinear annotation characters (U+FFF9 to U+FFFB)
|
| 304 |
+
- The Object Replacement Character (U+FFFC)
|
| 305 |
+
- The byte order mark (U+FEFF)
|
| 306 |
+
|
| 307 |
+
However, these similar characters are left alone:
|
| 308 |
+
|
| 309 |
+
- Control characters that produce whitespace (U+09, U+0A, U+0C, U+0D,
|
| 310 |
+
U+2028, and U+2029)
|
| 311 |
+
- C1 control characters (U+80 to U+9F) -- even though they are basically
|
| 312 |
+
never used intentionally, they are important clues about what mojibake
|
| 313 |
+
has happened
|
| 314 |
+
- Control characters that affect glyph rendering, such as joiners and
|
| 315 |
+
right-to-left marks (U+200C to U+200F, U+202A to U+202E)
|
| 316 |
+
- Musical notation control characters (U+1D173 to U+1D17A) because wow if
|
| 317 |
+
you're using those you probably have a good reason
|
| 318 |
+
- Tag characters, because they are now used in emoji sequences such as
|
| 319 |
+
"Flag of Wales"
|
| 320 |
+
"""
|
| 321 |
+
return text.translate(CONTROL_CHARS)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def remove_bom(text):
|
| 325 |
+
r"""
|
| 326 |
+
Remove a byte-order mark that was accidentally decoded as if it were part
|
| 327 |
+
of the text.
|
| 328 |
+
|
| 329 |
+
>>> print(remove_bom(chr(0xfeff) + "Where do you want to go today?"))
|
| 330 |
+
Where do you want to go today?
|
| 331 |
+
"""
|
| 332 |
+
return text.lstrip(chr(0xFEFF))
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
# Define a regex to match valid escape sequences in Python string literals.
|
| 336 |
+
ESCAPE_SEQUENCE_RE = re.compile(
|
| 337 |
+
r"""
|
| 338 |
+
( \\U........ # 8-digit hex escapes
|
| 339 |
+
| \\u.... # 4-digit hex escapes
|
| 340 |
+
| \\x.. # 2-digit hex escapes
|
| 341 |
+
| \\[0-7]{1,3} # Octal escapes
|
| 342 |
+
| \\N\{[^}]+\} # Unicode characters by name
|
| 343 |
+
| \\[\\'"abfnrtv] # Single-character escapes
|
| 344 |
+
)""",
|
| 345 |
+
re.UNICODE | re.VERBOSE,
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def decode_escapes(text):
|
| 350 |
+
r"""
|
| 351 |
+
Decode backslashed escape sequences, including \\x, \\u, and \\U character
|
| 352 |
+
references, even in the presence of other Unicode.
|
| 353 |
+
|
| 354 |
+
This function has to be called specifically. It's not run automatically by
|
| 355 |
+
ftfy, because escaped text is not necessarily a mistake, and there is no
|
| 356 |
+
way to distinguish when it is.
|
| 357 |
+
|
| 358 |
+
This is what Python's "string-escape" and "unicode-escape" codecs were
|
| 359 |
+
meant to do, but in contrast, this actually works. It will decode the
|
| 360 |
+
string exactly the same way that the Python interpreter decodes its string
|
| 361 |
+
literals.
|
| 362 |
+
|
| 363 |
+
>>> factoid = '\\u20a1 is the currency symbol for the colón.'
|
| 364 |
+
>>> print(factoid[1:])
|
| 365 |
+
u20a1 is the currency symbol for the colón.
|
| 366 |
+
>>> print(decode_escapes(factoid))
|
| 367 |
+
₡ is the currency symbol for the colón.
|
| 368 |
+
|
| 369 |
+
Even though Python itself can read string literals with a combination of
|
| 370 |
+
escapes and literal Unicode -- you're looking at one right now -- the
|
| 371 |
+
"unicode-escape" codec doesn't work on literal Unicode. (See
|
| 372 |
+
http://stackoverflow.com/a/24519338/773754 for more details.)
|
| 373 |
+
|
| 374 |
+
Instead, this function searches for just the parts of a string that
|
| 375 |
+
represent escape sequences, and decodes them, leaving the rest alone. All
|
| 376 |
+
valid escape sequences are made of ASCII characters, and this allows
|
| 377 |
+
"unicode-escape" to work correctly.
|
| 378 |
+
"""
|
| 379 |
+
|
| 380 |
+
def decode_match(match):
|
| 381 |
+
"Given a regex match, decode the escape sequence it contains."
|
| 382 |
+
return codecs.decode(match.group(0), "unicode-escape")
|
| 383 |
+
|
| 384 |
+
return ESCAPE_SEQUENCE_RE.sub(decode_match, text)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
# This regex implements an exception to restore_byte_a0, so we can decode the
|
| 388 |
+
# very common mojibake of (for example) "Ã la mode" as "à la mode", not "àla
|
| 389 |
+
# mode".
|
| 390 |
+
#
|
| 391 |
+
# If byte C3 appears with a single space after it -- most commonly this shows
|
| 392 |
+
# up as " Ã " appearing as an entire word -- we'll insert \xa0 while keeping
|
| 393 |
+
# the space. Without this change, we would decode "à" as the start of the next
|
| 394 |
+
# word, such as "àla". It's almost always intended to be a separate word, as in
|
| 395 |
+
# "à la", but when mojibake turns this into "Ã\xa0 la", the two kinds of spaces
|
| 396 |
+
# get coalesced into "Ã la".
|
| 397 |
+
#
|
| 398 |
+
# We make exceptions for the Portuguese words "às", "àquele", "àquela",
|
| 399 |
+
# "àquilo" and their plurals -- these are contractions of, for example, "a
|
| 400 |
+
# aquele" and are very common. Note that the final letter is important to
|
| 401 |
+
# distinguish this case from French "à quel point".
|
| 402 |
+
#
|
| 403 |
+
# Other instances in Portuguese, such as "àfrica", seem to be typos (intended
|
| 404 |
+
# to be "África" with the accent in the other direction).
|
| 405 |
+
#
|
| 406 |
+
# Unfortunately, "à" is a common letter in Catalan, and mojibake of words that
|
| 407 |
+
# contain it will end up with inserted spaces. We can't do the right thing with
|
| 408 |
+
# every word. The cost is that the mojibake text "fà cil" will be interpreted as
|
| 409 |
+
# "fà cil", not "fàcil".
|
| 410 |
+
A_GRAVE_WORD_RE = re.compile(b"\xc3 (?! |quele|quela|quilo|s )")
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def restore_byte_a0(byts):
|
| 414 |
+
"""
|
| 415 |
+
Some mojibake has been additionally altered by a process that said "hmm,
|
| 416 |
+
byte A0, that's basically a space!" and replaced it with an ASCII space.
|
| 417 |
+
When the A0 is part of a sequence that we intend to decode as UTF-8,
|
| 418 |
+
changing byte A0 to 20 would make it fail to decode.
|
| 419 |
+
|
| 420 |
+
This process finds sequences that would convincingly decode as UTF-8 if
|
| 421 |
+
byte 20 were changed to A0, and puts back the A0. For the purpose of
|
| 422 |
+
deciding whether this is a good idea, this step gets a cost of twice
|
| 423 |
+
the number of bytes that are changed.
|
| 424 |
+
|
| 425 |
+
This is used as a step within `fix_encoding`.
|
| 426 |
+
"""
|
| 427 |
+
byts = A_GRAVE_WORD_RE.sub(b"\xc3\xa0 ", byts)
|
| 428 |
+
|
| 429 |
+
def replacement(match):
|
| 430 |
+
"The function to apply when this regex matches."
|
| 431 |
+
return match.group(0).replace(b"\x20", b"\xa0")
|
| 432 |
+
|
| 433 |
+
return ALTERED_UTF8_RE.sub(replacement, byts)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def replace_lossy_sequences(byts):
|
| 437 |
+
"""
|
| 438 |
+
This function identifies sequences where information has been lost in
|
| 439 |
+
a "sloppy" codec, indicated by byte 1A, and if they would otherwise look
|
| 440 |
+
like a UTF-8 sequence, it replaces them with the UTF-8 sequence for U+FFFD.
|
| 441 |
+
|
| 442 |
+
A further explanation:
|
| 443 |
+
|
| 444 |
+
ftfy can now fix text in a few cases that it would previously fix
|
| 445 |
+
incompletely, because of the fact that it can't successfully apply the fix
|
| 446 |
+
to the entire string. A very common case of this is when characters have
|
| 447 |
+
been erroneously decoded as windows-1252, but instead of the "sloppy"
|
| 448 |
+
windows-1252 that passes through unassigned bytes, the unassigned bytes get
|
| 449 |
+
turned into U+FFFD (�), so we can't tell what they were.
|
| 450 |
+
|
| 451 |
+
This most commonly happens with curly quotation marks that appear
|
| 452 |
+
``“ like this �``.
|
| 453 |
+
|
| 454 |
+
We can do better by building on ftfy's "sloppy codecs" to let them handle
|
| 455 |
+
less-sloppy but more-lossy text. When they encounter the character ``�``,
|
| 456 |
+
instead of refusing to encode it, they encode it as byte 1A -- an
|
| 457 |
+
ASCII control code called SUBSTITUTE that once was meant for about the same
|
| 458 |
+
purpose. We can then apply a fixer that looks for UTF-8 sequences where
|
| 459 |
+
some continuation bytes have been replaced by byte 1A, and decode the whole
|
| 460 |
+
sequence as �; if that doesn't work, it'll just turn the byte back into �
|
| 461 |
+
itself.
|
| 462 |
+
|
| 463 |
+
As a result, the above text ``“ like this �`` will decode as
|
| 464 |
+
``“ like this �``.
|
| 465 |
+
|
| 466 |
+
If U+1A was actually in the original string, then the sloppy codecs will
|
| 467 |
+
not be used, and this function will not be run, so your weird control
|
| 468 |
+
character will be left alone but wacky fixes like this won't be possible.
|
| 469 |
+
|
| 470 |
+
This is used as a transcoder within `fix_encoding`.
|
| 471 |
+
"""
|
| 472 |
+
return LOSSY_UTF8_RE.sub("\ufffd".encode("utf-8"), byts)
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def decode_inconsistent_utf8(text):
|
| 476 |
+
"""
|
| 477 |
+
Sometimes, text from one encoding ends up embedded within text from a
|
| 478 |
+
different one. This is common enough that we need to be able to fix it.
|
| 479 |
+
|
| 480 |
+
This is used as a transcoder within `fix_encoding`.
|
| 481 |
+
"""
|
| 482 |
+
|
| 483 |
+
def fix_embedded_mojibake(match):
|
| 484 |
+
substr = match.group(0)
|
| 485 |
+
|
| 486 |
+
# Require the match to be shorter, so that this doesn't recurse infinitely
|
| 487 |
+
if len(substr) < len(text) and is_bad(substr):
|
| 488 |
+
return ftfy.fix_encoding(substr)
|
| 489 |
+
else:
|
| 490 |
+
return substr
|
| 491 |
+
|
| 492 |
+
return UTF8_DETECTOR_RE.sub(fix_embedded_mojibake, text)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def _c1_fixer(match):
|
| 496 |
+
return match.group(0).encode("latin-1").decode("sloppy-windows-1252")
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
def fix_c1_controls(text):
|
| 500 |
+
"""
|
| 501 |
+
If text still contains C1 control characters, treat them as their
|
| 502 |
+
Windows-1252 equivalents. This matches what Web browsers do.
|
| 503 |
+
"""
|
| 504 |
+
return C1_CONTROL_RE.sub(_c1_fixer, text)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/formatting.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module provides functions for justifying Unicode text in a monospaced
|
| 3 |
+
display such as a terminal.
|
| 4 |
+
|
| 5 |
+
We used to have our own implementation here, but now we mostly rely on
|
| 6 |
+
the 'wcwidth' library.
|
| 7 |
+
"""
|
| 8 |
+
from unicodedata import normalize
|
| 9 |
+
|
| 10 |
+
from wcwidth import wcswidth, wcwidth
|
| 11 |
+
from ftfy.fixes import remove_terminal_escapes
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def character_width(char: str) -> int:
|
| 15 |
+
r"""
|
| 16 |
+
Determine the width that a character is likely to be displayed as in
|
| 17 |
+
a monospaced terminal. The width for a printable character will
|
| 18 |
+
always be 0, 1, or 2.
|
| 19 |
+
|
| 20 |
+
Nonprintable or control characters will return -1, a convention that comes
|
| 21 |
+
from wcwidth.
|
| 22 |
+
|
| 23 |
+
>>> character_width('車')
|
| 24 |
+
2
|
| 25 |
+
>>> character_width('A')
|
| 26 |
+
1
|
| 27 |
+
>>> character_width('\N{ZERO WIDTH JOINER}')
|
| 28 |
+
0
|
| 29 |
+
>>> character_width('\n')
|
| 30 |
+
-1
|
| 31 |
+
"""
|
| 32 |
+
return wcwidth(char)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def monospaced_width(text: str) -> int:
|
| 36 |
+
r"""
|
| 37 |
+
Return the number of character cells that this string is likely to occupy
|
| 38 |
+
when displayed in a monospaced, modern, Unicode-aware terminal emulator.
|
| 39 |
+
We refer to this as the "display width" of the string.
|
| 40 |
+
|
| 41 |
+
This can be useful for formatting text that may contain non-spacing
|
| 42 |
+
characters, or CJK characters that take up two character cells.
|
| 43 |
+
|
| 44 |
+
Returns -1 if the string contains a non-printable or control character.
|
| 45 |
+
|
| 46 |
+
>>> monospaced_width('ちゃぶ台返し')
|
| 47 |
+
12
|
| 48 |
+
>>> len('ちゃぶ台返し')
|
| 49 |
+
6
|
| 50 |
+
>>> monospaced_width('owl\N{SOFT HYPHEN}flavored')
|
| 51 |
+
12
|
| 52 |
+
>>> monospaced_width('example\x80')
|
| 53 |
+
-1
|
| 54 |
+
|
| 55 |
+
A more complex example: The Korean word 'ibnida' can be written with 3
|
| 56 |
+
pre-composed characters or 7 jamo. Either way, it *looks* the same and
|
| 57 |
+
takes up 6 character cells.
|
| 58 |
+
|
| 59 |
+
>>> monospaced_width('입니다')
|
| 60 |
+
6
|
| 61 |
+
>>> monospaced_width('\u110b\u1175\u11b8\u1102\u1175\u1103\u1161')
|
| 62 |
+
6
|
| 63 |
+
|
| 64 |
+
The word "blue" with terminal escapes to make it blue still takes up only
|
| 65 |
+
4 characters, when shown as intended.
|
| 66 |
+
>>> monospaced_width('\x1b[34mblue\x1b[m')
|
| 67 |
+
4
|
| 68 |
+
"""
|
| 69 |
+
# NFC-normalize the text first, so that we don't need special cases for
|
| 70 |
+
# Hangul jamo.
|
| 71 |
+
#
|
| 72 |
+
# Remove terminal escapes before calculating width, because if they are
|
| 73 |
+
# displayed as intended, they will have zero width.
|
| 74 |
+
return wcswidth(remove_terminal_escapes(normalize("NFC", text)))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def display_ljust(text, width, fillchar=" "):
|
| 78 |
+
"""
|
| 79 |
+
Return `text` left-justified in a Unicode string whose display width,
|
| 80 |
+
in a monospaced terminal, should be at least `width` character cells.
|
| 81 |
+
The rest of the string will be padded with `fillchar`, which must be
|
| 82 |
+
a width-1 character.
|
| 83 |
+
|
| 84 |
+
"Left" here means toward the beginning of the string, which may actually
|
| 85 |
+
appear on the right in an RTL context. This is similar to the use of the
|
| 86 |
+
word "left" in "left parenthesis".
|
| 87 |
+
|
| 88 |
+
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
|
| 89 |
+
>>> for line in lines:
|
| 90 |
+
... print(display_ljust(line, 20, '▒'))
|
| 91 |
+
Table flip▒▒▒▒▒▒▒▒▒▒
|
| 92 |
+
(╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒
|
| 93 |
+
ちゃぶ台返し▒▒▒▒▒▒▒▒
|
| 94 |
+
|
| 95 |
+
This example, and the similar ones that follow, should come out justified
|
| 96 |
+
correctly when viewed in a monospaced terminal. It will probably not look
|
| 97 |
+
correct if you're viewing this code or documentation in a Web browser.
|
| 98 |
+
"""
|
| 99 |
+
if character_width(fillchar) != 1:
|
| 100 |
+
raise ValueError("The padding character must have display width 1")
|
| 101 |
+
|
| 102 |
+
text_width = monospaced_width(text)
|
| 103 |
+
if text_width == -1:
|
| 104 |
+
# There's a control character here, so just don't add padding
|
| 105 |
+
return text
|
| 106 |
+
|
| 107 |
+
padding = max(0, width - text_width)
|
| 108 |
+
return text + fillchar * padding
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def display_rjust(text, width, fillchar=" "):
|
| 112 |
+
"""
|
| 113 |
+
Return `text` right-justified in a Unicode string whose display width,
|
| 114 |
+
in a monospaced terminal, should be at least `width` character cells.
|
| 115 |
+
The rest of the string will be padded with `fillchar`, which must be
|
| 116 |
+
a width-1 character.
|
| 117 |
+
|
| 118 |
+
"Right" here means toward the end of the string, which may actually be on
|
| 119 |
+
the left in an RTL context. This is similar to the use of the word "right"
|
| 120 |
+
in "right parenthesis".
|
| 121 |
+
|
| 122 |
+
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
|
| 123 |
+
>>> for line in lines:
|
| 124 |
+
... print(display_rjust(line, 20, '▒'))
|
| 125 |
+
▒▒▒▒▒▒▒▒▒▒Table flip
|
| 126 |
+
▒▒▒▒▒▒▒(╯°□°)╯︵ ┻━┻
|
| 127 |
+
▒▒▒▒▒▒▒▒ちゃぶ台返し
|
| 128 |
+
"""
|
| 129 |
+
if character_width(fillchar) != 1:
|
| 130 |
+
raise ValueError("The padding character must have display width 1")
|
| 131 |
+
|
| 132 |
+
text_width = monospaced_width(text)
|
| 133 |
+
if text_width == -1:
|
| 134 |
+
return text
|
| 135 |
+
|
| 136 |
+
padding = max(0, width - text_width)
|
| 137 |
+
return fillchar * padding + text
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def display_center(text, width, fillchar=" "):
|
| 141 |
+
"""
|
| 142 |
+
Return `text` centered in a Unicode string whose display width, in a
|
| 143 |
+
monospaced terminal, should be at least `width` character cells. The rest
|
| 144 |
+
of the string will be padded with `fillchar`, which must be a width-1
|
| 145 |
+
character.
|
| 146 |
+
|
| 147 |
+
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
|
| 148 |
+
>>> for line in lines:
|
| 149 |
+
... print(display_center(line, 20, '▒'))
|
| 150 |
+
▒▒▒▒▒Table flip▒▒▒▒▒
|
| 151 |
+
▒▒▒(╯°□°)╯︵ ┻━┻▒▒▒▒
|
| 152 |
+
▒▒▒▒ちゃぶ台返し▒▒▒▒
|
| 153 |
+
"""
|
| 154 |
+
if character_width(fillchar) != 1:
|
| 155 |
+
raise ValueError("The padding character must have display width 1")
|
| 156 |
+
|
| 157 |
+
text_width = monospaced_width(text)
|
| 158 |
+
if text_width == -1:
|
| 159 |
+
return text
|
| 160 |
+
|
| 161 |
+
padding = max(0, width - text_width)
|
| 162 |
+
left_padding = padding // 2
|
| 163 |
+
right_padding = padding - left_padding
|
| 164 |
+
return fillchar * left_padding + text + fillchar * right_padding
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ftfy/py.typed
ADDED
|
File without changes
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google_auth-2.9.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: google-auth
|
| 3 |
+
Version: 2.9.0
|
| 4 |
+
Summary: Google Authentication Library
|
| 5 |
+
Home-page: https://github.com/googleapis/google-auth-library-python
|
| 6 |
+
Author: Google Cloud Platform
|
| 7 |
+
Author-email: googleapis-packages@google.com
|
| 8 |
+
License: Apache 2.0
|
| 9 |
+
Keywords: google auth oauth client
|
| 10 |
+
Classifier: Programming Language :: Python :: 3
|
| 11 |
+
Classifier: Programming Language :: Python :: 3.6
|
| 12 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 13 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 14 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 15 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 16 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 17 |
+
Classifier: Intended Audience :: Developers
|
| 18 |
+
Classifier: License :: OSI Approved :: Apache Software License
|
| 19 |
+
Classifier: Operating System :: POSIX
|
| 20 |
+
Classifier: Operating System :: Microsoft :: Windows
|
| 21 |
+
Classifier: Operating System :: MacOS :: MacOS X
|
| 22 |
+
Classifier: Operating System :: OS Independent
|
| 23 |
+
Classifier: Topic :: Internet :: WWW/HTTP
|
| 24 |
+
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*
|
| 25 |
+
License-File: LICENSE
|
| 26 |
+
Requires-Dist: cachetools (<6.0,>=2.0.0)
|
| 27 |
+
Requires-Dist: pyasn1-modules (>=0.2.1)
|
| 28 |
+
Requires-Dist: six (>=1.9.0)
|
| 29 |
+
Requires-Dist: enum34 (>=1.1.10) ; python_version < "3.4"
|
| 30 |
+
Requires-Dist: rsa (<4.6) ; python_version < "3.6"
|
| 31 |
+
Requires-Dist: rsa (<5,>=3.1.4) ; python_version >= "3.6"
|
| 32 |
+
Provides-Extra: aiohttp
|
| 33 |
+
Requires-Dist: requests (<3.0.0dev,>=2.20.0) ; extra == 'aiohttp'
|
| 34 |
+
Requires-Dist: aiohttp (<4.0.0dev,>=3.6.2) ; (python_version >= "3.6") and extra == 'aiohttp'
|
| 35 |
+
Provides-Extra: enterprise_cert
|
| 36 |
+
Requires-Dist: cryptography (==36.0.2) ; extra == 'enterprise_cert'
|
| 37 |
+
Requires-Dist: pyopenssl (==22.0.0) ; extra == 'enterprise_cert'
|
| 38 |
+
Provides-Extra: pyopenssl
|
| 39 |
+
Requires-Dist: pyopenssl (>=20.0.0) ; extra == 'pyopenssl'
|
| 40 |
+
Provides-Extra: reauth
|
| 41 |
+
Requires-Dist: pyu2f (>=0.1.5) ; extra == 'reauth'
|
| 42 |
+
|
| 43 |
+
Google Auth Python Library
|
| 44 |
+
==========================
|
| 45 |
+
|
| 46 |
+
|pypi|
|
| 47 |
+
|
| 48 |
+
This library simplifies using Google's various server-to-server authentication
|
| 49 |
+
mechanisms to access Google APIs.
|
| 50 |
+
|
| 51 |
+
.. |pypi| image:: https://img.shields.io/pypi/v/google-auth.svg
|
| 52 |
+
:target: https://pypi.python.org/pypi/google-auth
|
| 53 |
+
|
| 54 |
+
Installing
|
| 55 |
+
----------
|
| 56 |
+
|
| 57 |
+
You can install using `pip`_::
|
| 58 |
+
|
| 59 |
+
$ pip install google-auth
|
| 60 |
+
|
| 61 |
+
.. _pip: https://pip.pypa.io/en/stable/
|
| 62 |
+
|
| 63 |
+
For more information on setting up your Python development environment, please refer to `Python Development Environment Setup Guide`_ for Google Cloud Platform.
|
| 64 |
+
|
| 65 |
+
.. _`Python Development Environment Setup Guide`: https://cloud.google.com/python/setup
|
| 66 |
+
|
| 67 |
+
Supported Python Versions
|
| 68 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 69 |
+
Python >= 3.6
|
| 70 |
+
|
| 71 |
+
Unsupported Python Versions
|
| 72 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 73 |
+
- Python == 2.7: The last version of this library with support for Python 2.7
|
| 74 |
+
was `google.auth == 1.34.0`.
|
| 75 |
+
|
| 76 |
+
- Python 3.5: The last version of this library with support for Python 3.5
|
| 77 |
+
was `google.auth == 1.23.0`.
|
| 78 |
+
|
| 79 |
+
Documentation
|
| 80 |
+
-------------
|
| 81 |
+
|
| 82 |
+
Google Auth Python Library has usage and reference documentation at https://googleapis.dev/python/google-auth/latest/index.html.
|
| 83 |
+
|
| 84 |
+
Current Maintainers
|
| 85 |
+
-------------------
|
| 86 |
+
- `@busunkim96 <https://github.com/busunkim96>`_ (Bu Sun Kim)
|
| 87 |
+
|
| 88 |
+
Authors
|
| 89 |
+
-------
|
| 90 |
+
|
| 91 |
+
- `@theacodes <https://github.com/theacodes>`_ (Thea Flowers)
|
| 92 |
+
- `@dhermes <https://github.com/dhermes>`_ (Danny Hermes)
|
| 93 |
+
- `@lukesneeringer <https://github.com/lukesneeringer>`_ (Luke Sneeringer)
|
| 94 |
+
|
| 95 |
+
Contributing
|
| 96 |
+
------------
|
| 97 |
+
|
| 98 |
+
Contributions to this library are always welcome and highly encouraged.
|
| 99 |
+
|
| 100 |
+
See `CONTRIBUTING.rst`_ for more information on how to get started.
|
| 101 |
+
|
| 102 |
+
.. _CONTRIBUTING.rst: https://github.com/googleapis/google-auth-library-python/blob/main/CONTRIBUTING.rst
|
| 103 |
+
|
| 104 |
+
License
|
| 105 |
+
-------
|
| 106 |
+
|
| 107 |
+
Apache 2.0 - See `the LICENSE`_ for more information.
|
| 108 |
+
|
| 109 |
+
.. _the LICENSE: https://github.com/googleapis/google-auth-library-python/blob/main/LICENSE
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google_auth-2.9.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
google/auth/__init__.py,sha256=mP2W7WAbIFXBhMSL4JA70a9cNkaB7lT4JT7OEqdbAGc,988
|
| 2 |
+
google/auth/__pycache__/__init__.cpython-38.pyc,,
|
| 3 |
+
google/auth/__pycache__/_cloud_sdk.cpython-38.pyc,,
|
| 4 |
+
google/auth/__pycache__/_credentials_async.cpython-38.pyc,,
|
| 5 |
+
google/auth/__pycache__/_default.cpython-38.pyc,,
|
| 6 |
+
google/auth/__pycache__/_default_async.cpython-38.pyc,,
|
| 7 |
+
google/auth/__pycache__/_helpers.cpython-38.pyc,,
|
| 8 |
+
google/auth/__pycache__/_jwt_async.cpython-38.pyc,,
|
| 9 |
+
google/auth/__pycache__/_oauth2client.cpython-38.pyc,,
|
| 10 |
+
google/auth/__pycache__/_service_account_info.cpython-38.pyc,,
|
| 11 |
+
google/auth/__pycache__/app_engine.cpython-38.pyc,,
|
| 12 |
+
google/auth/__pycache__/aws.cpython-38.pyc,,
|
| 13 |
+
google/auth/__pycache__/credentials.cpython-38.pyc,,
|
| 14 |
+
google/auth/__pycache__/downscoped.cpython-38.pyc,,
|
| 15 |
+
google/auth/__pycache__/environment_vars.cpython-38.pyc,,
|
| 16 |
+
google/auth/__pycache__/exceptions.cpython-38.pyc,,
|
| 17 |
+
google/auth/__pycache__/external_account.cpython-38.pyc,,
|
| 18 |
+
google/auth/__pycache__/iam.cpython-38.pyc,,
|
| 19 |
+
google/auth/__pycache__/identity_pool.cpython-38.pyc,,
|
| 20 |
+
google/auth/__pycache__/impersonated_credentials.cpython-38.pyc,,
|
| 21 |
+
google/auth/__pycache__/jwt.cpython-38.pyc,,
|
| 22 |
+
google/auth/__pycache__/pluggable.cpython-38.pyc,,
|
| 23 |
+
google/auth/__pycache__/version.cpython-38.pyc,,
|
| 24 |
+
google/auth/_cloud_sdk.py,sha256=EV_MTDCbcBH7gizIBuLswVRIaxMc41dswdaWoFKku5k,5272
|
| 25 |
+
google/auth/_credentials_async.py,sha256=YfF4VPD7zMCciRxHIT91VofYH-A6whU2d4ih8LdEeuY,6841
|
| 26 |
+
google/auth/_default.py,sha256=O6V2_VA1CH7hxG_tO8Kac0HJzr4_bkF7Zys2zJQXbZk,25524
|
| 27 |
+
google/auth/_default_async.py,sha256=TDX8qpfXSDvor37qwnC6KYiTbrKOQR47U2sjeQCplTQ,11588
|
| 28 |
+
google/auth/_helpers.py,sha256=K7kfUjkcaib1lno6UIfgm0ISfruRLRW5rxZV8jNHjQc,7129
|
| 29 |
+
google/auth/_jwt_async.py,sha256=5mGab5CkdnBMkQkS4mtNkwFkktp1jBw6G1sYQk8bYKY,5972
|
| 30 |
+
google/auth/_oauth2client.py,sha256=GIFPiMNA1B6myoxpLiZ3wvM-nYnxEfuu2jyI4BbPbMQ,5879
|
| 31 |
+
google/auth/_service_account_info.py,sha256=wFGniKBrTgRUUKKiLSlqe2ZS81liqpnNvm9FEfBhsNk,2781
|
| 32 |
+
google/auth/app_engine.py,sha256=Y7EhTCnxkzM68eFXoIZ-jwgbxzxeXJ_273Uh3Kqb6fE,6026
|
| 33 |
+
google/auth/aws.py,sha256=2g_kEjVUGcqloiqKiA6KJJuMOxmGPC_eAIgh2sgX0lo,30616
|
| 34 |
+
google/auth/compute_engine/__init__.py,sha256=bv5BKOb55ai4fGii1AGJI_SHrFQ2y4mdc06-iS1IBV8,804
|
| 35 |
+
google/auth/compute_engine/__pycache__/__init__.cpython-38.pyc,,
|
| 36 |
+
google/auth/compute_engine/__pycache__/_metadata.cpython-38.pyc,,
|
| 37 |
+
google/auth/compute_engine/__pycache__/credentials.cpython-38.pyc,,
|
| 38 |
+
google/auth/compute_engine/_metadata.py,sha256=wfA9ln_0Qp_vkZxx6m1AAb4XeKVZxznceAXUHw7_NFE,9413
|
| 39 |
+
google/auth/compute_engine/credentials.py,sha256=KJZyLgXKidG9cLIAAiKli9qnniPRi4mb--l6zjBNF68,15833
|
| 40 |
+
google/auth/credentials.py,sha256=GZ6NGmyVfLOZ-xupAqA9Q5sghbF0imbz61Ppf1c-imQ,13082
|
| 41 |
+
google/auth/crypt/__init__.py,sha256=2RMYnusyqaL2oqL_mr1KGn5VXVyv0B4ceUPUtjpgW8A,3356
|
| 42 |
+
google/auth/crypt/__pycache__/__init__.cpython-38.pyc,,
|
| 43 |
+
google/auth/crypt/__pycache__/_cryptography_rsa.cpython-38.pyc,,
|
| 44 |
+
google/auth/crypt/__pycache__/_helpers.cpython-38.pyc,,
|
| 45 |
+
google/auth/crypt/__pycache__/_python_rsa.cpython-38.pyc,,
|
| 46 |
+
google/auth/crypt/__pycache__/base.cpython-38.pyc,,
|
| 47 |
+
google/auth/crypt/__pycache__/es256.cpython-38.pyc,,
|
| 48 |
+
google/auth/crypt/__pycache__/rsa.cpython-38.pyc,,
|
| 49 |
+
google/auth/crypt/_cryptography_rsa.py,sha256=p052XRANm64VOBbJfS3zrzYS2tF2dYoUVlTY_XWYE9o,4564
|
| 50 |
+
google/auth/crypt/_helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 51 |
+
google/auth/crypt/_python_rsa.py,sha256=koFSr7voyh8hB7_YQK5-Yhgc-5EVlZ0C0xpeCbUbYI4,6069
|
| 52 |
+
google/auth/crypt/base.py,sha256=I7h2p_DY-dhiVZsv81nvBB6k6ElBco4Xxtfw5kTtra8,4203
|
| 53 |
+
google/auth/crypt/es256.py,sha256=afZ7S3BPp0qIheewQNsKtqxXaa3l5CBbndzyzgqCJLc,5657
|
| 54 |
+
google/auth/crypt/rsa.py,sha256=QI17aKQsX3gdbJkBef-zsm-X_YBjBollCaoA65Am-WI,1109
|
| 55 |
+
google/auth/downscoped.py,sha256=RUmplzgmyd6ukbQHeYDpQ7k8pMPAuNi6sn0gBASu-1k,21157
|
| 56 |
+
google/auth/environment_vars.py,sha256=Khzm8IP3p0u7mB21EhVF2KmvswzeI_DXfTrroXK5yvA,3156
|
| 57 |
+
google/auth/exceptions.py,sha256=ADO90QEyDMHxN_Bi4QfZRXRJ38FCLw0SWFxJO_WjHkI,1968
|
| 58 |
+
google/auth/external_account.py,sha256=cGPOQn5LG9OobfwFKnLK6dJeZ7QngdRY5PSvixi90n8,20222
|
| 59 |
+
google/auth/iam.py,sha256=e5WleTmCBanS2vpJ3dPjyJ90_EZl7U_dQnSBTiNbSPw,3652
|
| 60 |
+
google/auth/identity_pool.py,sha256=zjizAoBQks9tJ650-eJUhV6eDYTN_8pNhHA4mEttUgk,11757
|
| 61 |
+
google/auth/impersonated_credentials.py,sha256=ZhNG4oZXtgByyocor1IWLbPMbTGKzamtIRtUAWPyDKs,15773
|
| 62 |
+
google/auth/jwt.py,sha256=CBllfPS0_eSpVxksfkzEqztGvzk4y69jyQTSJEQGiT8,30037
|
| 63 |
+
google/auth/pluggable.py,sha256=_yx64vid8kaeGj1pq57rbbq7ji0fltrslRujiA0clOs,13205
|
| 64 |
+
google/auth/transport/__init__.py,sha256=eD8Iua8Sjc5TBFzBqrCP4s_9PC9IouWiIRdqDlz2AXM,3438
|
| 65 |
+
google/auth/transport/__pycache__/__init__.cpython-38.pyc,,
|
| 66 |
+
google/auth/transport/__pycache__/_aiohttp_requests.cpython-38.pyc,,
|
| 67 |
+
google/auth/transport/__pycache__/_custom_tls_signer.cpython-38.pyc,,
|
| 68 |
+
google/auth/transport/__pycache__/_http_client.cpython-38.pyc,,
|
| 69 |
+
google/auth/transport/__pycache__/_mtls_helper.cpython-38.pyc,,
|
| 70 |
+
google/auth/transport/__pycache__/grpc.cpython-38.pyc,,
|
| 71 |
+
google/auth/transport/__pycache__/mtls.cpython-38.pyc,,
|
| 72 |
+
google/auth/transport/__pycache__/requests.cpython-38.pyc,,
|
| 73 |
+
google/auth/transport/__pycache__/urllib3.cpython-38.pyc,,
|
| 74 |
+
google/auth/transport/_aiohttp_requests.py,sha256=tJ9nKGMBpdpy5585F4629nFcd_ssN8w-0-WGnC7Q8Rw,14400
|
| 75 |
+
google/auth/transport/_custom_tls_signer.py,sha256=YvJmFg27AOjpJYcaKECyaAjyUZhQnbz4WLtC-KqYgpc,8400
|
| 76 |
+
google/auth/transport/_http_client.py,sha256=yy-cYYr87NWbMnRkIrS0wXvtyVkCa8G1zz23utqubPQ,3739
|
| 77 |
+
google/auth/transport/_mtls_helper.py,sha256=O0wKoHKnw2j9a035zImBDmuWbsQ2wjU0SrGsaUhMAuY,9086
|
| 78 |
+
google/auth/transport/grpc.py,sha256=3FBbfzNSwX_rtX5MaURLq15jpjjRmykPyQ_FB031LE8,13995
|
| 79 |
+
google/auth/transport/mtls.py,sha256=c6-0hJZ5xZA9lm_hy921UH30q07etKSh2hMan9YO5oI,3817
|
| 80 |
+
google/auth/transport/requests.py,sha256=Ijl-U2veKqvGH8M5365qYrAwT8UrxYCycbyR787pJuA,22877
|
| 81 |
+
google/auth/transport/urllib3.py,sha256=chpPDb3nsAmEVR67mf4CM79y4gLDpqseCbEs-0Jw2Zc,15812
|
| 82 |
+
google/auth/version.py,sha256=_4pIPChQJvCs4W3CkrqBfoDo7sUtOl5ryV2tHuMF5zM,597
|
| 83 |
+
google/oauth2/__init__.py,sha256=iDyTpxuh864rLf4YyINgeO4wMJLQ53EUMPJpiw1GA3U,619
|
| 84 |
+
google/oauth2/__pycache__/__init__.cpython-38.pyc,,
|
| 85 |
+
google/oauth2/__pycache__/_client.cpython-38.pyc,,
|
| 86 |
+
google/oauth2/__pycache__/_client_async.cpython-38.pyc,,
|
| 87 |
+
google/oauth2/__pycache__/_credentials_async.cpython-38.pyc,,
|
| 88 |
+
google/oauth2/__pycache__/_id_token_async.cpython-38.pyc,,
|
| 89 |
+
google/oauth2/__pycache__/_reauth_async.cpython-38.pyc,,
|
| 90 |
+
google/oauth2/__pycache__/_service_account_async.cpython-38.pyc,,
|
| 91 |
+
google/oauth2/__pycache__/challenges.cpython-38.pyc,,
|
| 92 |
+
google/oauth2/__pycache__/credentials.cpython-38.pyc,,
|
| 93 |
+
google/oauth2/__pycache__/gdch_credentials.cpython-38.pyc,,
|
| 94 |
+
google/oauth2/__pycache__/id_token.cpython-38.pyc,,
|
| 95 |
+
google/oauth2/__pycache__/reauth.cpython-38.pyc,,
|
| 96 |
+
google/oauth2/__pycache__/service_account.cpython-38.pyc,,
|
| 97 |
+
google/oauth2/__pycache__/sts.cpython-38.pyc,,
|
| 98 |
+
google/oauth2/__pycache__/utils.cpython-38.pyc,,
|
| 99 |
+
google/oauth2/_client.py,sha256=p0LTuNWNm0yFAo5EsuQA5Ly1Pyt6JImrgyqqddHNgoA,12831
|
| 100 |
+
google/oauth2/_client_async.py,sha256=WMpOuZzFp5iJ3ErWcozORzoQKObbxjzMDcgg0sLRCxo,9374
|
| 101 |
+
google/oauth2/_credentials_async.py,sha256=axe4Pdl6sqAg7rBJbIaJYn-hbdr-Zjr7Zci3uKN7Tjk,4257
|
| 102 |
+
google/oauth2/_id_token_async.py,sha256=teuRAbkny4mEzaYggkmPNpPlaMuafR5eikOo2g7Gj0E,10147
|
| 103 |
+
google/oauth2/_reauth_async.py,sha256=fp9HlfcyerR2gbp2P5rSMCfwblCrvcxjlK9FVpmZVYc,11655
|
| 104 |
+
google/oauth2/_service_account_async.py,sha256=5-HBGWoHhbWpCRbd34YiopQepEsEf8gSiuMlSm5hN84,5131
|
| 105 |
+
google/oauth2/challenges.py,sha256=uU5EWoCFlWzMsy9lhu_ZeTwupd2dLKW_ISURseV8LTE,6101
|
| 106 |
+
google/oauth2/credentials.py,sha256=luAnoH22A9-25OTLMxZ7zjStZr2FYMCk93U3aii1i8w,19467
|
| 107 |
+
google/oauth2/gdch_credentials.py,sha256=CY6iPnPuc2OCIe1Zujwg1Mu9QSl1iGJqGOy6TkUleHw,9007
|
| 108 |
+
google/oauth2/id_token.py,sha256=sBq3qbZIFeacHwGmhSMH8WH-3VeoGYTo1QlWaNkrIu0,12072
|
| 109 |
+
google/oauth2/reauth.py,sha256=C_5_xab0VKKQaBsbXYo5ixkBwmG8ZX_q6VKR2iAQ-wQ,12074
|
| 110 |
+
google/oauth2/service_account.py,sha256=RO6mZzcXoR4faooVTpF6cCEbu0pLXrn8dSXoeMNaDNo,25930
|
| 111 |
+
google/oauth2/sts.py,sha256=PPQLgvuClKRQj5mdmbF8toFhVd28bmBnlgatVYZ5_U4,6058
|
| 112 |
+
google/oauth2/utils.py,sha256=aZHOBG7VCDnltEk21bQHkoMPWbUWqyq_u1eNUGriIuc,6344
|
| 113 |
+
google_auth-2.9.0-py3.10-nspkg.pth,sha256=xH5gTxc4UipYP3qrbP-4CCHNGBV97eBR4QqhheCvBl4,539
|
| 114 |
+
google_auth-2.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 115 |
+
google_auth-2.9.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
| 116 |
+
google_auth-2.9.0.dist-info/METADATA,sha256=W_vl6bszdT200BnuGsms1KG9U8c5qnqOuKf9wKUohOA,3793
|
| 117 |
+
google_auth-2.9.0.dist-info/RECORD,,
|
| 118 |
+
google_auth-2.9.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 119 |
+
google_auth-2.9.0.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110
|
| 120 |
+
google_auth-2.9.0.dist-info/namespace_packages.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
|
| 121 |
+
google_auth-2.9.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google_auth-2.9.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.37.1)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py2-none-any
|
| 5 |
+
Tag: py3-none-any
|
| 6 |
+
|