diff --git a/.gitattributes b/.gitattributes
index 21659546fa5a144fd46dc1ce05f70aff390146fe..1710a0377ae1535ca8672565d0af8921989a5f2d 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -175,3 +175,4 @@ my_container_sandbox/workspace/anaconda3/pkgs/conda-package-handling-2.2.0-pyh38
my_container_sandbox/workspace/anaconda3/pkgs/sqlite-3.36.0-hc218d9a_0.conda filter=lfs diff=lfs merge=lfs -text
my_container_sandbox/workspace/anaconda3/pkgs/certifi-2021.5.30-py39h06a4308_0.conda filter=lfs diff=lfs merge=lfs -text
my_container_sandbox/workspace/anaconda3/pkgs/ncurses-6.2-he6710b0_1.conda filter=lfs diff=lfs merge=lfs -text
+my_container_sandbox/workspace/anaconda3/pkgs/pip-21.1.3-py39h06a4308_0.conda filter=lfs diff=lfs merge=lfs -text
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3c17d6828776ae4eb45dfed5874ed21fac42b7a
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/__init__.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/ascii.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/ascii.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc8b2a58f9aa7a3b7808957c7633ed8ad07ba93e
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/ascii.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/big5hkscs.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/big5hkscs.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88a2782f54fc32be2acfe9b35263881f83ca9394
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/big5hkscs.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/bz2_codec.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/bz2_codec.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..35b73bd708372ad5db01cdc59456d9e8d8e26462
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/bz2_codec.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/charmap.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/charmap.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71fe188929d69abaeeada9f7853f6b18d0f4ceb7
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/charmap.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1140.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1140.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec0ede8a42499f1e8664d4315ab2b330b2ae7e9d
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1140.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1255.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1255.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6bbc8ff8a4bda61fd8e48f7b218fca7838c26ea0
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1255.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1257.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1257.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be341cb5a4c7e497ed59a05bfe909f5d0d1cf81e
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1257.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1258.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1258.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e892292b2e548e92d9a4aae84452f21581c88edf
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp1258.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp437.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp437.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ad32f526bbe080541bf61083970bf5c0d96ec3c
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp437.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp500.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp500.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e37950ad7b511e47939070e5a8bc2ddf4b434ecf
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp500.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp857.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp857.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9606b3e57fa96d59e86276b4f1667f42c3965130
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp857.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp863.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp863.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99d28519c49f30be27b8186e56de04020a0c4053
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp863.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp864.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp864.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7179d93ed7ddbae023e60ab08ed69cd0e08463ab
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp864.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp869.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp869.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5168e7ebba75a8fcc8925e9686376ec3ba3d82c
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp869.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp874.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp874.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a436f93ba3c0d45995b18763763c5e4b8ea1a5d1
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp874.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp875.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp875.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7d833acca99be7fa138d87038089d2c4d2dbe6b
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/cp875.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_jisx0213.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_jisx0213.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86688649cba8b0d11295898f65138f9081d64dd9
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_jisx0213.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_jp.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_jp.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..076e6cf2eb64c6101fc4474d1ca4a92a107239bc
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_jp.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_kr.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_kr.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..30fff560bffe3d1ec56549794d2afdab1a5df70e
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/euc_kr.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gb18030.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gb18030.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4d6ca23123b2c5918d2fab0c9cbe7bd77831d33
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gb18030.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gb2312.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gb2312.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1db2acd1500b51ab5c768ed21465abb15fcfef1
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gb2312.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gbk.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gbk.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d2544b69d691197d30fc815a41af413ccc57598
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/gbk.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/hex_codec.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/hex_codec.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4189862be4adf6015d9e09d8bd5faf64e89333fa
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/hex_codec.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/idna.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/idna.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f77679e71ab98d7b68a2329810fa9f8270ad5ee
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/idna.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_jp.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_jp.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab2151145ddf251b3be85f0786ee277186eea84e
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_jp.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_jp_1.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_jp_1.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8193a4ab83d9a60a87aca1669f690dc1a49cb00
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_jp_1.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_kr.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_kr.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8cc1d84adc6b8371bc741a3af139c109f4f24e86
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso2022_kr.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_14.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_14.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..554e7bffe1a9726b325b9903a52a5a5f3c8d579c
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_14.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_2.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_2.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8c351550fa097cb8bc90a1495b840acc944087f
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_2.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_3.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_3.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26f4b0438f7d41d58a9dc8665b2dd6c921d127f1
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/iso8859_3.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/johab.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/johab.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ca40f35a4adb899d81cb617c8e7d8ddbb3f7bb97
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/johab.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/kz1048.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/kz1048.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f09fa9344c543308a444ce1e9be75bdf0fead44
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/kz1048.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/latin_1.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/latin_1.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..937982f1205b7382eaa02796088517d74b7bd48b
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/latin_1.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_croatian.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_croatian.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..12d9c8bac9de3a3f38b5f5f15728d3c6b3909c48
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_croatian.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_farsi.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_farsi.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..546bc7f437ffc17fef8a2fee2aefcaf05057b5aa
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_farsi.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_iceland.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_iceland.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c2de36bd632c3ba8e20401ea3568c8a5cfb61b3
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_iceland.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_latin2.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_latin2.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..149724295d6a25d201ceeb8928d9c22c502fcada
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_latin2.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_turkish.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_turkish.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f1ed5349f5e6a581a229b5de074ed5a4934b283
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/mac_turkish.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/punycode.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/punycode.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1416bdc6f39e0b74d78fda81b069b11b7e91660b
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/punycode.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/raw_unicode_escape.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/raw_unicode_escape.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93d280e137dad4eeaaa8a405ad1ad67a6d101194
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/raw_unicode_escape.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/rot_13.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/rot_13.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6aec7bcbf87e33674bb0c7922b52454d2484c60
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/rot_13.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/tis_620.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/tis_620.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd75a126c116c18adc5a9bb101fc9ae8dd799b53
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/tis_620.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/unicode_escape.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/unicode_escape.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..210bcf2cc0e8513dc23c1dd6d3a55e953b651a30
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/unicode_escape.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_16_be.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_16_be.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6ba4216fcfc5eb189b310b1786bd9ba3ffd0327
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_16_be.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_32_le.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_32_le.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..681df6aaa41130533324e65ff7173bd8a3c16794
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_32_le.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_7.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_7.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82f467d4074da1582474d60332560124e71638a5
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_7.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_8.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_8.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19a7a40cd2bb020c148bfcf662398c5b3a562c56
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/encodings/__pycache__/utf_8.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/pydoc_data/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/pydoc_data/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..558f3b5eae9532dde4bac1127784dce08f6a9615
Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/pydoc_data/__pycache__/__init__.cpython-38.pyc differ
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/LICENSE b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..4cac92a42621553dc7007145f1a3240b0e29505a
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/LICENSE
@@ -0,0 +1,636 @@
+The Python Imaging Library (PIL) is
+
+ Copyright © 1997-2011 by Secret Labs AB
+ Copyright © 1995-2011 by Fredrik Lundh
+
+Pillow is the friendly PIL fork. It is
+
+ Copyright © 2010-2022 by Alex Clark and contributors
+
+Like PIL, Pillow is licensed under the open source HPND License:
+
+By obtaining, using, and/or copying this software and/or its associated
+documentation, you agree that you have read, understood, and will comply
+with the following terms and conditions:
+
+Permission to use, copy, modify, and distribute this software and its
+associated documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appears in all copies, and that
+both that copyright notice and this permission notice appear in supporting
+documentation, and that the name of Secret Labs AB or the author not be
+used in advertising or publicity pertaining to distribution of the software
+without specific, written prior permission.
+
+SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
+IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL,
+INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+
+----
+
+FREETYPE2
+
+The FreeType 2 font engine is copyrighted work and cannot be used
+legally without a software license. In order to make this project
+usable to a vast majority of developers, we distribute it under two
+mutually exclusive open-source licenses.
+
+This means that *you* must choose *one* of the two licenses described
+below, then obey all its terms and conditions when using FreeType 2 in
+any of your projects or products.
+
+ - The FreeType License, found in the file `FTL.TXT', which is similar
+ to the original BSD license *with* an advertising clause that forces
+ you to explicitly cite the FreeType project in your product's
+ documentation. All details are in the license file. This license
+ is suited to products which don't use the GNU General Public
+ License.
+
+ Note that this license is compatible to the GNU General Public
+ License version 3, but not version 2.
+
+ - The GNU General Public License version 2, found in `GPLv2.TXT' (any
+ later version can be used also), for programs which already use the
+ GPL. Note that the FTL is incompatible with GPLv2 due to its
+ advertisement clause.
+
+The contributed BDF and PCF drivers come with a license similar to that
+of the X Window System. It is compatible to the above two licenses (see
+file src/bdf/README and src/pcf/README). The same holds for the files
+`fthash.c' and `fthash.h'; their code was part of the BDF driver in
+earlier FreeType versions.
+
+The gzip module uses the zlib license (see src/gzip/zlib.h) which too is
+compatible to the above two licenses.
+
+The MD5 checksum support (only used for debugging in development builds)
+is in the public domain.
+
+----
+
+HARFBUZZ
+
+HarfBuzz is licensed under the so-called "Old MIT" license. Details follow.
+For parts of HarfBuzz that are licensed under different licenses see individual
+files names COPYING in subdirectories where applicable.
+
+Copyright © 2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020 Google, Inc.
+Copyright © 2018,2019,2020 Ebrahim Byagowi
+Copyright © 2019,2020 Facebook, Inc.
+Copyright © 2012 Mozilla Foundation
+Copyright © 2011 Codethink Limited
+Copyright © 2008,2010 Nokia Corporation and/or its subsidiary(-ies)
+Copyright © 2009 Keith Stribley
+Copyright © 2009 Martin Hosken and SIL International
+Copyright © 2007 Chris Wilson
+Copyright © 2006 Behdad Esfahbod
+Copyright © 2005 David Turner
+Copyright © 2004,2007,2008,2009,2010 Red Hat, Inc.
+Copyright © 1998-2004 David Turner and Werner Lemberg
+
+For full copyright notices consult the individual files in the package.
+
+
+Permission is hereby granted, without written agreement and without
+license or royalty fees, to use, copy, modify, and distribute this
+software and its documentation for any purpose, provided that the
+above copyright notice and the following two paragraphs appear in
+all copies of this software.
+
+IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
+DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
+IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
+PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+
+----
+
+LCMS2
+
+Little CMS
+Copyright (c) 1998-2020 Marti Maria Saguer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+----
+
+LIBJPEG
+
+1. We don't promise that this software works. (But if you find any bugs,
+ please let us know!)
+2. You can use this software for whatever you want. You don't have to pay us.
+3. You may not pretend that you wrote this software. If you use it in a
+ program, you must acknowledge somewhere in your documentation that
+ you've used the IJG code.
+
+In legalese:
+
+The authors make NO WARRANTY or representation, either express or implied,
+with respect to this software, its quality, accuracy, merchantability, or
+fitness for a particular purpose. This software is provided "AS IS", and you,
+its user, assume the entire risk as to its quality and accuracy.
+
+This software is copyright (C) 1991-2020, Thomas G. Lane, Guido Vollbeding.
+All Rights Reserved except as specified below.
+
+Permission is hereby granted to use, copy, modify, and distribute this
+software (or portions thereof) for any purpose, without fee, subject to these
+conditions:
+(1) If any part of the source code for this software is distributed, then this
+README file must be included, with this copyright and no-warranty notice
+unaltered; and any additions, deletions, or changes to the original files
+must be clearly indicated in accompanying documentation.
+(2) If only executable code is distributed, then the accompanying
+documentation must state that "this software is based in part on the work of
+the Independent JPEG Group".
+(3) Permission for use of this software is granted only if the user accepts
+full responsibility for any undesirable consequences; the authors accept
+NO LIABILITY for damages of any kind.
+
+These conditions apply to any software derived from or based on the IJG code,
+not just to the unmodified library. If you use our work, you ought to
+acknowledge us.
+
+Permission is NOT granted for the use of any IJG author's name or company name
+in advertising or publicity relating to this software or products derived from
+it. This software may be referred to only as "the Independent JPEG Group's
+software".
+
+We specifically permit and encourage the use of this software as the basis of
+commercial products, provided that all warranty or liability claims are
+assumed by the product vendor.
+
+----
+
+LIBLZMA
+
+XZ Utils Licensing
+==================
+
+ Different licenses apply to different files in this package. Here
+ is a rough summary of which licenses apply to which parts of this
+ package (but check the individual files to be sure!):
+
+ - liblzma is in the public domain.
+
+ - xz, xzdec, and lzmadec command line tools are in the public
+ domain unless GNU getopt_long had to be compiled and linked
+ in from the lib directory. The getopt_long code is under
+ GNU LGPLv2.1+.
+
+ - The scripts to grep, diff, and view compressed files have been
+ adapted from gzip. These scripts and their documentation are
+ under GNU GPLv2+.
+
+ - All the documentation in the doc directory and most of the
+ XZ Utils specific documentation files in other directories
+ are in the public domain.
+
+ - Translated messages are in the public domain.
+
+ - The build system contains public domain files, and files that
+ are under GNU GPLv2+ or GNU GPLv3+. None of these files end up
+ in the binaries being built.
+
+ - Test files and test code in the tests directory, and debugging
+ utilities in the debug directory are in the public domain.
+
+ - The extra directory may contain public domain files, and files
+ that are under various free software licenses.
+
+ You can do whatever you want with the files that have been put into
+ the public domain. If you find public domain legally problematic,
+ take the previous sentence as a license grant. If you still find
+ the lack of copyright legally problematic, you have too many
+ lawyers.
+
+ As usual, this software is provided "as is", without any warranty.
+
+ If you copy significant amounts of public domain code from XZ Utils
+ into your project, acknowledging this somewhere in your software is
+ polite (especially if it is proprietary, non-free software), but
+ naturally it is not legally required. Here is an example of a good
+ notice to put into "about box" or into documentation:
+
+ This software includes code from XZ Utils .
+
+ The following license texts are included in the following files:
+ - COPYING.LGPLv2.1: GNU Lesser General Public License version 2.1
+ - COPYING.GPLv2: GNU General Public License version 2
+ - COPYING.GPLv3: GNU General Public License version 3
+
+ Note that the toolchain (compiler, linker etc.) may add some code
+ pieces that are copyrighted. Thus, it is possible that e.g. liblzma
+ binary wouldn't actually be in the public domain in its entirety
+ even though it contains no copyrighted code from the XZ Utils source
+ package.
+
+ If you have questions, don't hesitate to ask the author(s) for more
+ information.
+
+----
+
+LIBTIFF
+
+Copyright (c) 1988-1997 Sam Leffler
+Copyright (c) 1991-1997 Silicon Graphics, Inc.
+
+Permission to use, copy, modify, distribute, and sell this software and
+its documentation for any purpose is hereby granted without fee, provided
+that (i) the above copyright notices and this permission notice appear in
+all copies of the software and related documentation, and (ii) the names of
+Sam Leffler and Silicon Graphics may not be used in any advertising or
+publicity relating to the software without the specific, prior written
+permission of Sam Leffler and Silicon Graphics.
+
+THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
+EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
+WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR
+ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
+OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF
+LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+OF THIS SOFTWARE.
+
+----
+
+LIBWEBP
+
+Copyright (c) 2010, Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of Google nor the names of its contributors may
+ be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----
+
+OPENJPEG
+
+*
+ * The copyright in this software is being made available under the 2-clauses
+ * BSD License, included below. This software may be subject to other third
+ * party and contributor rights, including patent rights, and no such rights
+ * are granted under this license.
+ *
+ * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium
+ * Copyright (c) 2002-2014, Professor Benoit Macq
+ * Copyright (c) 2003-2014, Antonin Descampe
+ * Copyright (c) 2003-2009, Francois-Olivier Devaux
+ * Copyright (c) 2005, Herve Drolon, FreeImage Team
+ * Copyright (c) 2002-2003, Yannick Verschueren
+ * Copyright (c) 2001-2003, David Janssens
+ * Copyright (c) 2011-2012, Centre National d'Etudes Spatiales (CNES), France
+ * Copyright (c) 2012, CS Systemes d'Information, France
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+---
+
+COPYRIGHT NOTICE, DISCLAIMER, and LICENSE
+=========================================
+
+PNG Reference Library License version 2
+---------------------------------------
+
+ * Copyright (c) 1995-2019 The PNG Reference Library Authors.
+ * Copyright (c) 2018-2019 Cosmin Truta.
+ * Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson.
+ * Copyright (c) 1996-1997 Andreas Dilger.
+ * Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
+
+The software is supplied "as is", without warranty of any kind,
+express or implied, including, without limitation, the warranties
+of merchantability, fitness for a particular purpose, title, and
+non-infringement. In no event shall the Copyright owners, or
+anyone distributing the software, be liable for any damages or
+other liability, whether in contract, tort or otherwise, arising
+from, out of, or in connection with the software, or the use or
+other dealings in the software, even if advised of the possibility
+of such damage.
+
+Permission is hereby granted to use, copy, modify, and distribute
+this software, or portions hereof, for any purpose, without fee,
+subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you
+ must not claim that you wrote the original software. If you
+ use this software in a product, an acknowledgment in the product
+ documentation would be appreciated, but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 3. This Copyright notice may not be removed or altered from any
+ source or altered source distribution.
+
+
+PNG Reference Library License version 1 (for libpng 0.5 through 1.6.35)
+-----------------------------------------------------------------------
+
+libpng versions 1.0.7, July 1, 2000, through 1.6.35, July 15, 2018 are
+Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson, are
+derived from libpng-1.0.6, and are distributed according to the same
+disclaimer and license as libpng-1.0.6 with the following individuals
+added to the list of Contributing Authors:
+
+ Simon-Pierre Cadieux
+ Eric S. Raymond
+ Mans Rullgard
+ Cosmin Truta
+ Gilles Vollant
+ James Yu
+ Mandar Sahastrabuddhe
+ Google Inc.
+ Vadim Barkov
+
+and with the following additions to the disclaimer:
+
+ There is no warranty against interference with your enjoyment of
+ the library or against infringement. There is no warranty that our
+ efforts or the library will fulfill any of your particular purposes
+ or needs. This library is provided with all faults, and the entire
+ risk of satisfactory quality, performance, accuracy, and effort is
+ with the user.
+
+Some files in the "contrib" directory and some configure-generated
+files that are distributed with libpng have other copyright owners, and
+are released under other open source licenses.
+
+libpng versions 0.97, January 1998, through 1.0.6, March 20, 2000, are
+Copyright (c) 1998-2000 Glenn Randers-Pehrson, are derived from
+libpng-0.96, and are distributed according to the same disclaimer and
+license as libpng-0.96, with the following individuals added to the
+list of Contributing Authors:
+
+ Tom Lane
+ Glenn Randers-Pehrson
+ Willem van Schaik
+
+libpng versions 0.89, June 1996, through 0.96, May 1997, are
+Copyright (c) 1996-1997 Andreas Dilger, are derived from libpng-0.88,
+and are distributed according to the same disclaimer and license as
+libpng-0.88, with the following individuals added to the list of
+Contributing Authors:
+
+ John Bowler
+ Kevin Bracey
+ Sam Bushell
+ Magnus Holmgren
+ Greg Roelofs
+ Tom Tanner
+
+Some files in the "scripts" directory have other copyright owners,
+but are released under this license.
+
+libpng versions 0.5, May 1995, through 0.88, January 1996, are
+Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
+
+For the purposes of this copyright and license, "Contributing Authors"
+is defined as the following set of individuals:
+
+ Andreas Dilger
+ Dave Martindale
+ Guy Eric Schalnat
+ Paul Schmidt
+ Tim Wegner
+
+The PNG Reference Library is supplied "AS IS". The Contributing
+Authors and Group 42, Inc. disclaim all warranties, expressed or
+implied, including, without limitation, the warranties of
+merchantability and of fitness for any purpose. The Contributing
+Authors and Group 42, Inc. assume no liability for direct, indirect,
+incidental, special, exemplary, or consequential damages, which may
+result from the use of the PNG Reference Library, even if advised of
+the possibility of such damage.
+
+Permission is hereby granted to use, copy, modify, and distribute this
+source code, or portions hereof, for any purpose, without fee, subject
+to the following restrictions:
+
+ 1. The origin of this source code must not be misrepresented.
+
+ 2. Altered versions must be plainly marked as such and must not
+ be misrepresented as being the original source.
+
+ 3. This Copyright notice may not be removed or altered from any
+ source or altered source distribution.
+
+The Contributing Authors and Group 42, Inc. specifically permit,
+without fee, and encourage the use of this source code as a component
+to supporting the PNG file format in commercial products. If you use
+this source code in a product, acknowledgment is not required but would
+be appreciated.
+
+----
+
+RAQM
+
+The MIT License (MIT)
+
+Copyright © 2015 Information Technology Authority (ITA)
+Copyright © 2016 Khaled Hosny
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+----
+
+XAU
+
+Copyright 1988, 1993, 1994, 1998 The Open Group
+
+Permission to use, copy, modify, distribute, and sell this software and its
+documentation for any purpose is hereby granted without fee, provided that
+the above copyright notice appear in all copies and that both that
+copyright notice and this permission notice appear in supporting
+documentation.
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the name of The Open Group shall not be
+used in advertising or otherwise to promote the sale, use or other dealings
+in this Software without prior written authorization from The Open Group.
+
+----
+
+XCB
+
+Copyright (C) 2001-2006 Bart Massey, Jamey Sharp, and Josh Triplett.
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall
+be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the names of the authors
+or their institutions shall not be used in advertising or
+otherwise to promote the sale, use or other dealings in this
+Software without prior written authorization from the
+authors.
+
+----
+
+XDMCP
+
+Copyright 1989, 1998 The Open Group
+
+Permission to use, copy, modify, distribute, and sell this software and its
+documentation for any purpose is hereby granted without fee, provided that
+the above copyright notice appear in all copies and that both that
+copyright notice and this permission notice appear in supporting
+documentation.
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the name of The Open Group shall not be
+used in advertising or otherwise to promote the sale, use or other dealings
+in this Software without prior written authorization from The Open Group.
+
+Author: Keith Packard, MIT X Consortium
+
+----
+
+ZLIB
+
+ (C) 1995-2017 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
+
+If you use the zlib library in a product, we would appreciate *not* receiving
+lengthy legal documents to sign. The sources are provided for free but without
+warranty of any kind. The library has been entirely written by Jean-loup
+Gailly and Mark Adler; it does not include third-party code.
+
+If you redistribute modified sources, we would appreciate that you include in
+the file ChangeLog history information documenting your changes. Please read
+the FAQ for more information on the distribution of modified source versions.
\ No newline at end of file
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..cda43fa180b37c262b7d7f839e2952b44254fab5
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/METADATA
@@ -0,0 +1,164 @@
+Metadata-Version: 2.1
+Name: Pillow
+Version: 9.1.1
+Summary: Python Imaging Library (Fork)
+Home-page: https://python-pillow.org
+Author: Alex Clark (PIL Fork Author)
+Author-email: aclark@python-pillow.org
+License: HPND
+Project-URL: Documentation, https://pillow.readthedocs.io
+Project-URL: Source, https://github.com/python-pillow/Pillow
+Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=pypi
+Project-URL: Release notes, https://pillow.readthedocs.io/en/stable/releasenotes/index.html
+Project-URL: Changelog, https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst
+Project-URL: Twitter, https://twitter.com/PythonPillow
+Keywords: Imaging
+Classifier: Development Status :: 6 - Mature
+Classifier: License :: OSI Approved :: Historical Permission Notice and Disclaimer (HPND)
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Multimedia :: Graphics
+Classifier: Topic :: Multimedia :: Graphics :: Capture :: Digital Camera
+Classifier: Topic :: Multimedia :: Graphics :: Capture :: Screen Capture
+Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion
+Classifier: Topic :: Multimedia :: Graphics :: Viewers
+Requires-Python: >=3.7
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Provides-Extra: docs
+Requires-Dist: olefile ; extra == 'docs'
+Requires-Dist: sphinx (>=2.4) ; extra == 'docs'
+Requires-Dist: sphinx-copybutton ; extra == 'docs'
+Requires-Dist: sphinx-issues (>=3.0.1) ; extra == 'docs'
+Requires-Dist: sphinx-removed-in ; extra == 'docs'
+Requires-Dist: sphinx-rtd-theme (>=1.0) ; extra == 'docs'
+Requires-Dist: sphinxext-opengraph ; extra == 'docs'
+Provides-Extra: tests
+Requires-Dist: check-manifest ; extra == 'tests'
+Requires-Dist: coverage ; extra == 'tests'
+Requires-Dist: defusedxml ; extra == 'tests'
+Requires-Dist: markdown2 ; extra == 'tests'
+Requires-Dist: olefile ; extra == 'tests'
+Requires-Dist: packaging ; extra == 'tests'
+Requires-Dist: pyroma ; extra == 'tests'
+Requires-Dist: pytest ; extra == 'tests'
+Requires-Dist: pytest-cov ; extra == 'tests'
+Requires-Dist: pytest-timeout ; extra == 'tests'
+
+
+
+
+
+# Pillow
+
+## Python Imaging Library (Fork)
+
+Pillow is the friendly PIL fork by [Alex Clark and
+Contributors](https://github.com/python-pillow/Pillow/graphs/contributors).
+PIL is the Python Imaging Library by Fredrik Lundh and Contributors.
+As of 2019, Pillow development is
+[supported by Tidelift](https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=readme&utm_campaign=enterprise).
+
+
+
+ | docs |
+
+
+ |
+
+
+ | tests |
+
+
+
+
+
+
+
+
+
+
+
+ |
+
+
+ | package |
+
+
+
+
+
+ |
+
+
+ | social |
+
+
+
+ |
+
+
+
+## Overview
+
+The Python Imaging Library adds image processing capabilities to your Python interpreter.
+
+This library provides extensive file format support, an efficient internal representation, and fairly powerful image processing capabilities.
+
+The core image library is designed for fast access to data stored in a few basic pixel formats. It should provide a solid foundation for a general image processing tool.
+
+## More Information
+
+- [Documentation](https://pillow.readthedocs.io/)
+ - [Installation](https://pillow.readthedocs.io/en/latest/installation.html)
+ - [Handbook](https://pillow.readthedocs.io/en/latest/handbook/index.html)
+- [Contribute](https://github.com/python-pillow/Pillow/blob/main/.github/CONTRIBUTING.md)
+ - [Issues](https://github.com/python-pillow/Pillow/issues)
+ - [Pull requests](https://github.com/python-pillow/Pillow/pulls)
+- [Release notes](https://pillow.readthedocs.io/en/stable/releasenotes/index.html)
+- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
+ - [Pre-fork](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst#pre-fork)
+
+## Report a Vulnerability
+
+To report a security vulnerability, please follow the procedure described in the [Tidelift security policy](https://tidelift.com/docs/security).
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..dd9c5392aa6396fd25816c0647a3006f15c0f848
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/RECORD
@@ -0,0 +1,214 @@
+PIL/BdfFontFile.py,sha256=hRnSgFZOIiTgWfJIaRHRQpU4TKVok2E31KJY6sbZPwc,2817
+PIL/BlpImagePlugin.py,sha256=SVs3I88sIWw7ibWRnAzDd9T-dIrWD6x00ZAf2HgNjh8,16143
+PIL/BmpImagePlugin.py,sha256=d9hGPxD0wjT_qhchHtiigxYLlFAGG61WcdUqEHqleTk,16252
+PIL/BufrStubImagePlugin.py,sha256=DE_t_ch4-YH_oimXYNMCefin4kcru6Uc2H_OTmwR6y4,1518
+PIL/ContainerIO.py,sha256=1U15zUXjWO8uWK-MyCp66Eh7djQEU-oUeCDoBqewNkA,2883
+PIL/CurImagePlugin.py,sha256=er_bI3V1Ezly0QfFJq0fZMlGwrD5izDutwF1FrOwiMA,1679
+PIL/DcxImagePlugin.py,sha256=bfESLTji9GerqI4oYsy5oTFyRMlr2mjSsXzpY9IuLsk,2145
+PIL/DdsImagePlugin.py,sha256=-sz60zvpuz89nyUobCPhdf-KWaT1yyeEa5PbRlxLMOw,8071
+PIL/EpsImagePlugin.py,sha256=qUxbQVsnzRyveDs9b8Co98sd0klsKr5GLCWtY3xbmB8,11949
+PIL/ExifTags.py,sha256=0YRoKyMwPabWOZZgVeLL6mlaGjbZgfF-z8WuUc6Ibb0,9446
+PIL/FitsImagePlugin.py,sha256=15BrvLXsw0F8WjBbP6-1RPHbJ4Lbd39OP4wkikcstC0,1971
+PIL/FitsStubImagePlugin.py,sha256=ETXbjvAFVMPfm51RNvPuo2B_pNRrJVuNNX-7-RmLUqw,1718
+PIL/FliImagePlugin.py,sha256=fR-Z9uY1udQu6FvzSqZJ3DAmIAaJUKsCNbO7OHN39cY,4239
+PIL/FontFile.py,sha256=LkQcbwUu1C4fokMnbg-ao9ksp2RX-saaPRie-z2rpH4,2765
+PIL/FpxImagePlugin.py,sha256=nKGioxa5C0q9X9qva3t_htRV_3jXQcFkclVxTEaSusk,6658
+PIL/FtexImagePlugin.py,sha256=TkvwTKeFRd1Qhcg6GyTBuFPI118MnegxC-JUoJMQVyY,4175
+PIL/GbrImagePlugin.py,sha256=K-olSg1M2bF2IofUeLABXfI1JLdrWsgiiU6yUTPhSWM,2795
+PIL/GdImageFile.py,sha256=JFWSUssG1z1r884GQtBbZ3T7uhPF4cDXSuW3ctgf3TU,2465
+PIL/GifImagePlugin.py,sha256=xA8QdF_rPNvr7Ceegl0I58FxsTxYaxuM4xpk9JoSZ3k,34458
+PIL/GimpGradientFile.py,sha256=G0ClRmjRHIJoU0nmG-P-tgehLHZip5i0rY4-5pjJ7bc,3353
+PIL/GimpPaletteFile.py,sha256=MGpf0WF_yTtMAXWvO_wlurgv_y80SX66EXprl6UIunM,1274
+PIL/GribStubImagePlugin.py,sha256=CocpZJIN8ckBtMQbq1VMA7NEKW5gwlzQ_mRZhHoyZho,1513
+PIL/Hdf5StubImagePlugin.py,sha256=FJ7-Vz1KY-DEOfrZg3cCMmG_wTa_qf6p41991P2Wfks,1515
+PIL/IcnsImagePlugin.py,sha256=x8JjanvXt_2BS-Qg8Jqt9XPsrCkhN2ESYGoKIoJ2WII,11755
+PIL/IcoImagePlugin.py,sha256=ZSfs8e9qJxIzcNUxuRC8S4KJgmvdH7KZOBuc70Ho9H0,11551
+PIL/ImImagePlugin.py,sha256=76DvUbRkFQ_DkEdthbApsuliNc5-FQHX3mnrYZdOkt4,10729
+PIL/Image.py,sha256=wQ34jHUxgvY6Udbz9u398kzpnEeZybBo5_ic8rQ4fZ0,125350
+PIL/ImageChops.py,sha256=HOGSnuU4EcCbdeUzEGPm54zewppHWWe12XLyOLLPgCw,7297
+PIL/ImageCms.py,sha256=MJHg18tKXzGIV0KZib3NQDIyaGI8XTJGIwzKoswfVbk,37951
+PIL/ImageColor.py,sha256=2e9xfO08S6afUzoahUIzyMN8RJcQsMz9E92rFnEhfP0,8727
+PIL/ImageDraw.py,sha256=rvMmVCjqAo_PRk41fOuOh3kkXYYTY8KinMvLkQ0RhO8,34710
+PIL/ImageDraw2.py,sha256=oBhpBTZhx3bd4D0s8E2kDjBzgThRkDU_TE_987l501k,5019
+PIL/ImageEnhance.py,sha256=CJnCouiBmxN2fE0xW7m_uMdBqcm-Fp0S3ruHhkygal4,3190
+PIL/ImageFile.py,sha256=yuEWSrfgnFRaNgHEyUV0mgorTR82n74nSDsdVPwaLXQ,22701
+PIL/ImageFilter.py,sha256=Sx99ij57imObeBdiR5w6cuhEG682SkfqtXx_vW7T_mk,16142
+PIL/ImageFont.py,sha256=TcCig_Hw5DbOnsZsWdLQeDEqplJotI2wG_Viw7P9W6o,45963
+PIL/ImageGrab.py,sha256=4W_qGYMJv7-5kWIvKnb3PzFMqdQERV32c43z-onj1CI,3823
+PIL/ImageMath.py,sha256=OsrEDBmoonjeOdcbuYQFEoU1sRT4sSCNO95EAq_CA_s,7253
+PIL/ImageMode.py,sha256=ZyTPlast0KeEp0-lbRcBoztKQzUY3FRaMAZza0Lm_mE,3006
+PIL/ImageMorph.py,sha256=KL2843wgfLyXPOWEJnTXRvySfbpRrlTqA_0M1j5xuD0,7773
+PIL/ImageOps.py,sha256=-MBNR_kztrdN6IAwTVXXHL2vvdO8ZkZjK-vMXVpmv5w,20504
+PIL/ImagePalette.py,sha256=rOpqcuH5DhJXPEvREna3Dg1N7ZK3TfnXHu5eZyltZTs,7841
+PIL/ImagePath.py,sha256=lVmH1-lCd0SyrFoqyhlstAFW2iJuC14fPcW8iewvxCQ,336
+PIL/ImageQt.py,sha256=hECe1rZpv1teaR5exrP39NbWBKwNGD7X5zoA5id_UJo,6698
+PIL/ImageSequence.py,sha256=3djA7vDH6wafTGbt4e_lPlVhy2TaKfdSrA1XQ4n-Uoc,1850
+PIL/ImageShow.py,sha256=Q_c_v9sy3wNnCnz7Ce1aM5vG1q74lFJ_ur6XvlNQXqc,12249
+PIL/ImageStat.py,sha256=Wdxu473_-bf3MeXLEj-9GrRftp6Ju_F7Sl_EKgzKd1Y,3899
+PIL/ImageTk.py,sha256=f6GGmApnpacVAHyOOVgG5PSLG6OCQInb5-2CSYfyTKg,9148
+PIL/ImageTransform.py,sha256=oO7Ir7j_5r4DeoZ-ZgqW9FO099cP2gHdE32SQdfmW_s,2883
+PIL/ImageWin.py,sha256=1MQBJS7tVrQzI9jN0nmeNeFpIaq8fXra9kQocHkiFxM,7191
+PIL/ImtImagePlugin.py,sha256=v_P09UT1Ae_HNUS-lTcMWfDTedfBDf-krhJRckDW6tg,2203
+PIL/IptcImagePlugin.py,sha256=-RZBUUodHcF5wLKanW1MxJj7cbLOpx5LvXqm0vDM22U,5714
+PIL/Jpeg2KImagePlugin.py,sha256=M8xsol1019D8hwtooNey-AGiNGaPPOqOat_0w4Tojaw,10455
+PIL/JpegImagePlugin.py,sha256=LRZGSeeoCbOyF3ISZp2VDYZGg5uL2JXLDf5AOCv3ghQ,28561
+PIL/JpegPresets.py,sha256=6nVnX_H8eA8ZO7AOVvkUx8gEN6QfI8zKnV6od16XgWE,12347
+PIL/McIdasImagePlugin.py,sha256=LrP5nA7l8IQG3WhlMI0Xs8fGXY_uf6IDmzNCERl3tGw,1754
+PIL/MicImagePlugin.py,sha256=Eh94vjTurXYkmm27hhooyNm9NkWWyVxP8Nq4thNLV6Y,2607
+PIL/MpegImagePlugin.py,sha256=n16Zgdy8Hcfke16lQwZWs53PZq4BA_OxPCMPDkW62nw,1803
+PIL/MpoImagePlugin.py,sha256=C-oosMx-C7dZT4QODBNYbX6LtfeEUxdpQ15Ychx9SuY,4478
+PIL/MspImagePlugin.py,sha256=ftTl14BpW1i3os_OUfusc7t4tRzBP4RrLxp76Sf9X4I,5527
+PIL/PSDraw.py,sha256=xmJ6GVUvDm1SC3QuUpYdeNfGu9lYBLX1ndCt96tObcc,6719
+PIL/PaletteFile.py,sha256=s3KtsDuY5S04MKDyiXK3iIbiOGzV9PvCDUpOQHI7yqc,1106
+PIL/PalmImagePlugin.py,sha256=lTVwwSPFrQ-IPFGU8_gRCMZ1Lb73cuVhQ-nkx1Q0oqc,9108
+PIL/PcdImagePlugin.py,sha256=cnBm_xKcpLGT6hZ8QKai9Up0gZERMxZwhDXl1hQtBm0,1476
+PIL/PcfFontFile.py,sha256=njhgblsjSVcITVz1DpWdEligmJgPMh5nTk_zDDWWTik,6348
+PIL/PcxImagePlugin.py,sha256=J-Pm2QBt5Hi4ObPeXDnc87X7nl1hbtTGqy4sTov6tug,5864
+PIL/PdfImagePlugin.py,sha256=f3foSWC1anwbnVBXVi-4wmtEnOR4_dbmqrbiQ--48Bk,7311
+PIL/PdfParser.py,sha256=Kxq4ZLMoayNODnpURMIcXljGJS-rX8AMBKA5iA0O29M,34561
+PIL/PixarImagePlugin.py,sha256=5MMcrrShVr511QKevK1ziKyJn0WllokWQxBhs8NWttY,1631
+PIL/PngImagePlugin.py,sha256=377uheEGeWvhlmTda0wRsQdVqAmOuboJWUMkHCl3Fs4,45016
+PIL/PpmImagePlugin.py,sha256=FclF4DGFyqWmqCOexRpzX47YuoylGNnVK1_VffYrP_s,5850
+PIL/PsdImagePlugin.py,sha256=8pYj9Sc4FYHl997QnJ6-79rAcS1flv7mIAMVR4_o1ws,7572
+PIL/PyAccess.py,sha256=SaGs2ZE4kjh-dybpAA5_Og4wuhA6d0LTPKK8t2aHffY,9607
+PIL/SgiImagePlugin.py,sha256=mqpi0G4aiKzWmJHk22WKZ0oGqsglcTNgDfp4H8S-GCM,6097
+PIL/SpiderImagePlugin.py,sha256=3weeJ7kc2t6gA-Hau9QdKgDdbXPcY8zrcTbR4cfAU-g,9554
+PIL/SunImagePlugin.py,sha256=bnjnVFRjvApCH1QC1F9HeynoCe5AZk3wa1tOhPvHzKU,4282
+PIL/TarIO.py,sha256=E_pjAxk9wHezXUuR_99liySBXfJoL2wjzdNDf0g1hTo,1440
+PIL/TgaImagePlugin.py,sha256=geeOJJJ-5Xz3u4JiDMrouyr-XFSqZ6Z48OuOaOY7_lI,6485
+PIL/TiffImagePlugin.py,sha256=uYKFj4zJivvZI_QSHRjR4uWJC_tHh4VgsegOAJPZCfY,75049
+PIL/TiffTags.py,sha256=CPaXv9s7T2oNFZFVbD-Kwz-K2V5ZcHKFkw3rT-Llkp4,15297
+PIL/WalImageFile.py,sha256=MhlGQBmSA_4OPBv6EL9bqFYe0YAf5rYtgAI_y0T920U,5520
+PIL/WebPImagePlugin.py,sha256=buw7FnrHviRmiYMcVSslJNohK3-OcwOUcnAkbZYJu-o,10924
+PIL/WmfImagePlugin.py,sha256=wvJeH9k4XJoUE2wVcf5G_8eeIuuO9BuGiV8jOZlcWrM,4625
+PIL/XVThumbImagePlugin.py,sha256=zmZ8Z4B8Kr6NOdUqSipW9_X5mKiLBLs-wxvPRRg1l0M,1940
+PIL/XbmImagePlugin.py,sha256=kuyd690rupwLFZj5r8hbGmI0Wr8sD_CceCuRew_PUew,2454
+PIL/XpmImagePlugin.py,sha256=1EBt-g678p0A0NXOkxq7sGM8dymneDMHHQmwJzAbrlw,3062
+PIL/__init__.py,sha256=3Z8lwq0danRE7WQFZxa7vMvfSjv_C4-Q73FUr_gHt4Y,1763
+PIL/__main__.py,sha256=axR7PO-HtXp-o0rBhKIxs0wark0rBfaDIhAIWqtWUo4,41
+PIL/__pycache__/BdfFontFile.cpython-38.pyc,,
+PIL/__pycache__/BlpImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/BmpImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/BufrStubImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/ContainerIO.cpython-38.pyc,,
+PIL/__pycache__/CurImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/DcxImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/DdsImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/EpsImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/ExifTags.cpython-38.pyc,,
+PIL/__pycache__/FitsImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/FitsStubImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/FliImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/FontFile.cpython-38.pyc,,
+PIL/__pycache__/FpxImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/FtexImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/GbrImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/GdImageFile.cpython-38.pyc,,
+PIL/__pycache__/GifImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/GimpGradientFile.cpython-38.pyc,,
+PIL/__pycache__/GimpPaletteFile.cpython-38.pyc,,
+PIL/__pycache__/GribStubImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/Hdf5StubImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/IcnsImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/IcoImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/ImImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/Image.cpython-38.pyc,,
+PIL/__pycache__/ImageChops.cpython-38.pyc,,
+PIL/__pycache__/ImageCms.cpython-38.pyc,,
+PIL/__pycache__/ImageColor.cpython-38.pyc,,
+PIL/__pycache__/ImageDraw.cpython-38.pyc,,
+PIL/__pycache__/ImageDraw2.cpython-38.pyc,,
+PIL/__pycache__/ImageEnhance.cpython-38.pyc,,
+PIL/__pycache__/ImageFile.cpython-38.pyc,,
+PIL/__pycache__/ImageFilter.cpython-38.pyc,,
+PIL/__pycache__/ImageFont.cpython-38.pyc,,
+PIL/__pycache__/ImageGrab.cpython-38.pyc,,
+PIL/__pycache__/ImageMath.cpython-38.pyc,,
+PIL/__pycache__/ImageMode.cpython-38.pyc,,
+PIL/__pycache__/ImageMorph.cpython-38.pyc,,
+PIL/__pycache__/ImageOps.cpython-38.pyc,,
+PIL/__pycache__/ImagePalette.cpython-38.pyc,,
+PIL/__pycache__/ImagePath.cpython-38.pyc,,
+PIL/__pycache__/ImageQt.cpython-38.pyc,,
+PIL/__pycache__/ImageSequence.cpython-38.pyc,,
+PIL/__pycache__/ImageShow.cpython-38.pyc,,
+PIL/__pycache__/ImageStat.cpython-38.pyc,,
+PIL/__pycache__/ImageTk.cpython-38.pyc,,
+PIL/__pycache__/ImageTransform.cpython-38.pyc,,
+PIL/__pycache__/ImageWin.cpython-38.pyc,,
+PIL/__pycache__/ImtImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/IptcImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/Jpeg2KImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/JpegImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/JpegPresets.cpython-38.pyc,,
+PIL/__pycache__/McIdasImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/MicImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/MpegImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/MpoImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/MspImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PSDraw.cpython-38.pyc,,
+PIL/__pycache__/PaletteFile.cpython-38.pyc,,
+PIL/__pycache__/PalmImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PcdImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PcfFontFile.cpython-38.pyc,,
+PIL/__pycache__/PcxImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PdfImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PdfParser.cpython-38.pyc,,
+PIL/__pycache__/PixarImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PngImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PpmImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PsdImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/PyAccess.cpython-38.pyc,,
+PIL/__pycache__/SgiImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/SpiderImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/SunImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/TarIO.cpython-38.pyc,,
+PIL/__pycache__/TgaImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/TiffImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/TiffTags.cpython-38.pyc,,
+PIL/__pycache__/WalImageFile.cpython-38.pyc,,
+PIL/__pycache__/WebPImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/WmfImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/XVThumbImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/XbmImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/XpmImagePlugin.cpython-38.pyc,,
+PIL/__pycache__/__init__.cpython-38.pyc,,
+PIL/__pycache__/__main__.cpython-38.pyc,,
+PIL/__pycache__/_binary.cpython-38.pyc,,
+PIL/__pycache__/_tkinter_finder.cpython-38.pyc,,
+PIL/__pycache__/_util.cpython-38.pyc,,
+PIL/__pycache__/_version.cpython-38.pyc,,
+PIL/__pycache__/features.cpython-38.pyc,,
+PIL/_binary.py,sha256=E5qhxNJ7hhbEoqu0mODOXHT8z-FDRShXG3jTJhsDdas,2043
+PIL/_imaging.cpython-38-x86_64-linux-gnu.so,sha256=JRb9OncAZJrIQQDDG7UeR5ec4BKLpmfkIfUdK3pIeGU,682160
+PIL/_imagingcms.cpython-38-x86_64-linux-gnu.so,sha256=qf9AL-U4Abv3vxruG-einjvWG463NzGDKDopB3OXhoE,47120
+PIL/_imagingft.cpython-38-x86_64-linux-gnu.so,sha256=toDOLem1KvVrRha_VlmbwrPYScCNijGtS6ltBQuka5I,68728
+PIL/_imagingmath.cpython-38-x86_64-linux-gnu.so,sha256=0KJXtj2tPNs_WL16e44Ms9XnUG9jwfbHAstX0EgSqoc,30976
+PIL/_imagingmorph.cpython-38-x86_64-linux-gnu.so,sha256=obJSPi6AGhsOV8Du7NWaPG9j3tQt12HAkdEDVed2ARQ,14656
+PIL/_imagingtk.cpython-38-x86_64-linux-gnu.so,sha256=wvuzENwFMjPjsSmP2haO4OtqbX-H_IFkrYko1n-9IcM,14656
+PIL/_tkinter_finder.py,sha256=_h4IyntUxL3ZCMnuKGxvW5VwN9k8Yiel0E4j_i41nxk,752
+PIL/_util.py,sha256=pbjX5KY1W2oZyYVC4TE9ai2PfrJZrAsO5hAnz_JMees,359
+PIL/_version.py,sha256=GGBDt0UeWTzrogKfjuxS2NAdMuW0ewTpeM55P8AdOLo,50
+PIL/_webp.cpython-38-x86_64-linux-gnu.so,sha256=LHzIXR10VGRCYgCh7vkfBg4AFPF4u7dvSvHz6Hcedqg,45816
+PIL/features.py,sha256=j2LT6v78cHWbR8z8OVaAGIbJWI-Bs62pfiB1i1fminM,9387
+Pillow-9.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Pillow-9.1.1.dist-info/LICENSE,sha256=YJxFpXSmibpABP8KRpkP5NOqcbiIMg2xx1087M2UNqA,27652
+Pillow-9.1.1.dist-info/METADATA,sha256=XWy4nfHH9qEmliF0mu4N-zaVLAgrExP0vS9qc2mFbE8,8702
+Pillow-9.1.1.dist-info/RECORD,,
+Pillow-9.1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+Pillow-9.1.1.dist-info/WHEEL,sha256=-ijGDuALlPxm3HbhKntps0QzHsi-DPlXqgerYTTJkFE,148
+Pillow-9.1.1.dist-info/top_level.txt,sha256=riZqrk-hyZqh5f1Z0Zwii3dKfxEsByhu9cU9IODF-NY,4
+Pillow-9.1.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+Pillow.libs/libXau-00ec42fe.so.6.0.0,sha256=d75mSMNgdE9Ubbyh6DWZcVKSq3R4m8pD6eltRC2w49o,17048
+Pillow.libs/libfreetype-27ef11b8.so.6.18.3,sha256=4795dKXnaMmYimBCSkjUKvoIBgOH0AgsGQBtLPMe6Fc,1393808
+Pillow.libs/libharfbuzz-17f91c2d.so.0.40201.0,sha256=vn61Xiqh80NriNxnnTVuWTnPrYwduJLxT-6hAROG7t4,3096464
+Pillow.libs/libjpeg-1b553ed5.so.62.3.0,sha256=J3KIMSJDBacbNjgW2MNXvMqzwyM_B0My1ckE6eaV-_8,699576
+Pillow.libs/liblcms2-1e643a89.so.2.0.13,sha256=257NDTU_KcAnn4ilrjQ9KIKzDxWmFae3ppT-cWNMk1M,494256
+Pillow.libs/liblzma-d540a118.so.5.2.5,sha256=JbIyQEIYTjuWokwsiGf-sItr6eJBCFWYtfeWpZJ9o64,220808
+Pillow.libs/libopenjp2-fca9bf24.so.2.5.0,sha256=qh0Ucjqk_1G9VGmPDNdvAowPM0Rn_zj4rULjN7acVEU,569824
+Pillow.libs/libpng16-52f22300.so.16.37.0,sha256=XMgABQkG6aNIn-YPv1f43KZJyWJ_h2ls4zafMslFfVY,277816
+Pillow.libs/libtiff-d0580107.so.5.7.0,sha256=y_SrUrLp1kt1MRII0P1s7qAYiOY2w_tHhdiECTZMMeM,681728
+Pillow.libs/libwebp-8efe125f.so.7.1.3,sha256=C5OMn2tSEFZGyXnwJTTEiiov5ZHOPIRMuDO3OFzh7XM,645808
+Pillow.libs/libwebpdemux-016472e8.so.2.0.9,sha256=TaU1aMJ_RfkUIBWim90HFgxKxmTnfDJ_pXaJHOGWxdc,29456
+Pillow.libs/libwebpmux-5c00cf3e.so.3.0.8,sha256=duMtYlNBckRPEgL7KeY2amcf3w9Cc1pVaHDYgAf4bJw,54456
+Pillow.libs/libxcb-1122e22b.so.1.1.0,sha256=Ghohd8ctbBf5_jE5i6MExypVbwyX-uv1QjLvW_ADCHQ,243216
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..3a48d3480384503bea53d4a7c55a666ace0eb5fc
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: false
+Tag: cp38-cp38-manylinux_2_17_x86_64
+Tag: cp38-cp38-manylinux2014_x86_64
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b338169ce0c740c335bfe82912227ae8637bd492
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+PIL
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/zip-safe b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/zip-safe
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow-9.1.1.dist-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/constant.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/constant.py
new file mode 100644
index 0000000000000000000000000000000000000000..c32f5cf2d63b5aa698bd80da10eaa67b1de18df4
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/constant.py
@@ -0,0 +1,503 @@
+from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
+from collections import OrderedDict
+from encodings.aliases import aliases
+from re import IGNORECASE, compile as re_compile
+from typing import Dict, List, Set, Union
+
+from .assets import FREQUENCIES
+
+# Contain for each eligible encoding a list of/item bytes SIG/BOM
+ENCODING_MARKS = OrderedDict(
+ [
+ ("utf_8", BOM_UTF8),
+ (
+ "utf_7",
+ [
+ b"\x2b\x2f\x76\x38",
+ b"\x2b\x2f\x76\x39",
+ b"\x2b\x2f\x76\x2b",
+ b"\x2b\x2f\x76\x2f",
+ b"\x2b\x2f\x76\x38\x2d",
+ ],
+ ),
+ ("gb18030", b"\x84\x31\x95\x33"),
+ ("utf_32", [BOM_UTF32_BE, BOM_UTF32_LE]),
+ ("utf_16", [BOM_UTF16_BE, BOM_UTF16_LE]),
+ ]
+) # type: Dict[str, Union[bytes, List[bytes]]]
+
+TOO_SMALL_SEQUENCE = 32 # type: int
+TOO_BIG_SEQUENCE = int(10e6) # type: int
+
+UTF8_MAXIMAL_ALLOCATION = 1112064 # type: int
+
+UNICODE_RANGES_COMBINED = {
+ "Control character": range(31 + 1),
+ "Basic Latin": range(32, 127 + 1),
+ "Latin-1 Supplement": range(128, 255 + 1),
+ "Latin Extended-A": range(256, 383 + 1),
+ "Latin Extended-B": range(384, 591 + 1),
+ "IPA Extensions": range(592, 687 + 1),
+ "Spacing Modifier Letters": range(688, 767 + 1),
+ "Combining Diacritical Marks": range(768, 879 + 1),
+ "Greek and Coptic": range(880, 1023 + 1),
+ "Cyrillic": range(1024, 1279 + 1),
+ "Cyrillic Supplement": range(1280, 1327 + 1),
+ "Armenian": range(1328, 1423 + 1),
+ "Hebrew": range(1424, 1535 + 1),
+ "Arabic": range(1536, 1791 + 1),
+ "Syriac": range(1792, 1871 + 1),
+ "Arabic Supplement": range(1872, 1919 + 1),
+ "Thaana": range(1920, 1983 + 1),
+ "NKo": range(1984, 2047 + 1),
+ "Samaritan": range(2048, 2111 + 1),
+ "Mandaic": range(2112, 2143 + 1),
+ "Syriac Supplement": range(2144, 2159 + 1),
+ "Arabic Extended-A": range(2208, 2303 + 1),
+ "Devanagari": range(2304, 2431 + 1),
+ "Bengali": range(2432, 2559 + 1),
+ "Gurmukhi": range(2560, 2687 + 1),
+ "Gujarati": range(2688, 2815 + 1),
+ "Oriya": range(2816, 2943 + 1),
+ "Tamil": range(2944, 3071 + 1),
+ "Telugu": range(3072, 3199 + 1),
+ "Kannada": range(3200, 3327 + 1),
+ "Malayalam": range(3328, 3455 + 1),
+ "Sinhala": range(3456, 3583 + 1),
+ "Thai": range(3584, 3711 + 1),
+ "Lao": range(3712, 3839 + 1),
+ "Tibetan": range(3840, 4095 + 1),
+ "Myanmar": range(4096, 4255 + 1),
+ "Georgian": range(4256, 4351 + 1),
+ "Hangul Jamo": range(4352, 4607 + 1),
+ "Ethiopic": range(4608, 4991 + 1),
+ "Ethiopic Supplement": range(4992, 5023 + 1),
+ "Cherokee": range(5024, 5119 + 1),
+ "Unified Canadian Aboriginal Syllabics": range(5120, 5759 + 1),
+ "Ogham": range(5760, 5791 + 1),
+ "Runic": range(5792, 5887 + 1),
+ "Tagalog": range(5888, 5919 + 1),
+ "Hanunoo": range(5920, 5951 + 1),
+ "Buhid": range(5952, 5983 + 1),
+ "Tagbanwa": range(5984, 6015 + 1),
+ "Khmer": range(6016, 6143 + 1),
+ "Mongolian": range(6144, 6319 + 1),
+ "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6399 + 1),
+ "Limbu": range(6400, 6479 + 1),
+ "Tai Le": range(6480, 6527 + 1),
+ "New Tai Lue": range(6528, 6623 + 1),
+ "Khmer Symbols": range(6624, 6655 + 1),
+ "Buginese": range(6656, 6687 + 1),
+ "Tai Tham": range(6688, 6831 + 1),
+ "Combining Diacritical Marks Extended": range(6832, 6911 + 1),
+ "Balinese": range(6912, 7039 + 1),
+ "Sundanese": range(7040, 7103 + 1),
+ "Batak": range(7104, 7167 + 1),
+ "Lepcha": range(7168, 7247 + 1),
+ "Ol Chiki": range(7248, 7295 + 1),
+ "Cyrillic Extended C": range(7296, 7311 + 1),
+ "Sundanese Supplement": range(7360, 7375 + 1),
+ "Vedic Extensions": range(7376, 7423 + 1),
+ "Phonetic Extensions": range(7424, 7551 + 1),
+ "Phonetic Extensions Supplement": range(7552, 7615 + 1),
+ "Combining Diacritical Marks Supplement": range(7616, 7679 + 1),
+ "Latin Extended Additional": range(7680, 7935 + 1),
+ "Greek Extended": range(7936, 8191 + 1),
+ "General Punctuation": range(8192, 8303 + 1),
+ "Superscripts and Subscripts": range(8304, 8351 + 1),
+ "Currency Symbols": range(8352, 8399 + 1),
+ "Combining Diacritical Marks for Symbols": range(8400, 8447 + 1),
+ "Letterlike Symbols": range(8448, 8527 + 1),
+ "Number Forms": range(8528, 8591 + 1),
+ "Arrows": range(8592, 8703 + 1),
+ "Mathematical Operators": range(8704, 8959 + 1),
+ "Miscellaneous Technical": range(8960, 9215 + 1),
+ "Control Pictures": range(9216, 9279 + 1),
+ "Optical Character Recognition": range(9280, 9311 + 1),
+ "Enclosed Alphanumerics": range(9312, 9471 + 1),
+ "Box Drawing": range(9472, 9599 + 1),
+ "Block Elements": range(9600, 9631 + 1),
+ "Geometric Shapes": range(9632, 9727 + 1),
+ "Miscellaneous Symbols": range(9728, 9983 + 1),
+ "Dingbats": range(9984, 10175 + 1),
+ "Miscellaneous Mathematical Symbols-A": range(10176, 10223 + 1),
+ "Supplemental Arrows-A": range(10224, 10239 + 1),
+ "Braille Patterns": range(10240, 10495 + 1),
+ "Supplemental Arrows-B": range(10496, 10623 + 1),
+ "Miscellaneous Mathematical Symbols-B": range(10624, 10751 + 1),
+ "Supplemental Mathematical Operators": range(10752, 11007 + 1),
+ "Miscellaneous Symbols and Arrows": range(11008, 11263 + 1),
+ "Glagolitic": range(11264, 11359 + 1),
+ "Latin Extended-C": range(11360, 11391 + 1),
+ "Coptic": range(11392, 11519 + 1),
+ "Georgian Supplement": range(11520, 11567 + 1),
+ "Tifinagh": range(11568, 11647 + 1),
+ "Ethiopic Extended": range(11648, 11743 + 1),
+ "Cyrillic Extended-A": range(11744, 11775 + 1),
+ "Supplemental Punctuation": range(11776, 11903 + 1),
+ "CJK Radicals Supplement": range(11904, 12031 + 1),
+ "Kangxi Radicals": range(12032, 12255 + 1),
+ "Ideographic Description Characters": range(12272, 12287 + 1),
+ "CJK Symbols and Punctuation": range(12288, 12351 + 1),
+ "Hiragana": range(12352, 12447 + 1),
+ "Katakana": range(12448, 12543 + 1),
+ "Bopomofo": range(12544, 12591 + 1),
+ "Hangul Compatibility Jamo": range(12592, 12687 + 1),
+ "Kanbun": range(12688, 12703 + 1),
+ "Bopomofo Extended": range(12704, 12735 + 1),
+ "CJK Strokes": range(12736, 12783 + 1),
+ "Katakana Phonetic Extensions": range(12784, 12799 + 1),
+ "Enclosed CJK Letters and Months": range(12800, 13055 + 1),
+ "CJK Compatibility": range(13056, 13311 + 1),
+ "CJK Unified Ideographs Extension A": range(13312, 19903 + 1),
+ "Yijing Hexagram Symbols": range(19904, 19967 + 1),
+ "CJK Unified Ideographs": range(19968, 40959 + 1),
+ "Yi Syllables": range(40960, 42127 + 1),
+ "Yi Radicals": range(42128, 42191 + 1),
+ "Lisu": range(42192, 42239 + 1),
+ "Vai": range(42240, 42559 + 1),
+ "Cyrillic Extended-B": range(42560, 42655 + 1),
+ "Bamum": range(42656, 42751 + 1),
+ "Modifier Tone Letters": range(42752, 42783 + 1),
+ "Latin Extended-D": range(42784, 43007 + 1),
+ "Syloti Nagri": range(43008, 43055 + 1),
+ "Common Indic Number Forms": range(43056, 43071 + 1),
+ "Phags-pa": range(43072, 43135 + 1),
+ "Saurashtra": range(43136, 43231 + 1),
+ "Devanagari Extended": range(43232, 43263 + 1),
+ "Kayah Li": range(43264, 43311 + 1),
+ "Rejang": range(43312, 43359 + 1),
+ "Hangul Jamo Extended-A": range(43360, 43391 + 1),
+ "Javanese": range(43392, 43487 + 1),
+ "Myanmar Extended-B": range(43488, 43519 + 1),
+ "Cham": range(43520, 43615 + 1),
+ "Myanmar Extended-A": range(43616, 43647 + 1),
+ "Tai Viet": range(43648, 43743 + 1),
+ "Meetei Mayek Extensions": range(43744, 43775 + 1),
+ "Ethiopic Extended-A": range(43776, 43823 + 1),
+ "Latin Extended-E": range(43824, 43887 + 1),
+ "Cherokee Supplement": range(43888, 43967 + 1),
+ "Meetei Mayek": range(43968, 44031 + 1),
+ "Hangul Syllables": range(44032, 55215 + 1),
+ "Hangul Jamo Extended-B": range(55216, 55295 + 1),
+ "High Surrogates": range(55296, 56191 + 1),
+ "High Private Use Surrogates": range(56192, 56319 + 1),
+ "Low Surrogates": range(56320, 57343 + 1),
+ "Private Use Area": range(57344, 63743 + 1),
+ "CJK Compatibility Ideographs": range(63744, 64255 + 1),
+ "Alphabetic Presentation Forms": range(64256, 64335 + 1),
+ "Arabic Presentation Forms-A": range(64336, 65023 + 1),
+ "Variation Selectors": range(65024, 65039 + 1),
+ "Vertical Forms": range(65040, 65055 + 1),
+ "Combining Half Marks": range(65056, 65071 + 1),
+ "CJK Compatibility Forms": range(65072, 65103 + 1),
+ "Small Form Variants": range(65104, 65135 + 1),
+ "Arabic Presentation Forms-B": range(65136, 65279 + 1),
+ "Halfwidth and Fullwidth Forms": range(65280, 65519 + 1),
+ "Specials": range(65520, 65535 + 1),
+ "Linear B Syllabary": range(65536, 65663 + 1),
+ "Linear B Ideograms": range(65664, 65791 + 1),
+ "Aegean Numbers": range(65792, 65855 + 1),
+ "Ancient Greek Numbers": range(65856, 65935 + 1),
+ "Ancient Symbols": range(65936, 65999 + 1),
+ "Phaistos Disc": range(66000, 66047 + 1),
+ "Lycian": range(66176, 66207 + 1),
+ "Carian": range(66208, 66271 + 1),
+ "Coptic Epact Numbers": range(66272, 66303 + 1),
+ "Old Italic": range(66304, 66351 + 1),
+ "Gothic": range(66352, 66383 + 1),
+ "Old Permic": range(66384, 66431 + 1),
+ "Ugaritic": range(66432, 66463 + 1),
+ "Old Persian": range(66464, 66527 + 1),
+ "Deseret": range(66560, 66639 + 1),
+ "Shavian": range(66640, 66687 + 1),
+ "Osmanya": range(66688, 66735 + 1),
+ "Osage": range(66736, 66815 + 1),
+ "Elbasan": range(66816, 66863 + 1),
+ "Caucasian Albanian": range(66864, 66927 + 1),
+ "Linear A": range(67072, 67455 + 1),
+ "Cypriot Syllabary": range(67584, 67647 + 1),
+ "Imperial Aramaic": range(67648, 67679 + 1),
+ "Palmyrene": range(67680, 67711 + 1),
+ "Nabataean": range(67712, 67759 + 1),
+ "Hatran": range(67808, 67839 + 1),
+ "Phoenician": range(67840, 67871 + 1),
+ "Lydian": range(67872, 67903 + 1),
+ "Meroitic Hieroglyphs": range(67968, 67999 + 1),
+ "Meroitic Cursive": range(68000, 68095 + 1),
+ "Kharoshthi": range(68096, 68191 + 1),
+ "Old South Arabian": range(68192, 68223 + 1),
+ "Old North Arabian": range(68224, 68255 + 1),
+ "Manichaean": range(68288, 68351 + 1),
+ "Avestan": range(68352, 68415 + 1),
+ "Inscriptional Parthian": range(68416, 68447 + 1),
+ "Inscriptional Pahlavi": range(68448, 68479 + 1),
+ "Psalter Pahlavi": range(68480, 68527 + 1),
+ "Old Turkic": range(68608, 68687 + 1),
+ "Old Hungarian": range(68736, 68863 + 1),
+ "Rumi Numeral Symbols": range(69216, 69247 + 1),
+ "Brahmi": range(69632, 69759 + 1),
+ "Kaithi": range(69760, 69839 + 1),
+ "Sora Sompeng": range(69840, 69887 + 1),
+ "Chakma": range(69888, 69967 + 1),
+ "Mahajani": range(69968, 70015 + 1),
+ "Sharada": range(70016, 70111 + 1),
+ "Sinhala Archaic Numbers": range(70112, 70143 + 1),
+ "Khojki": range(70144, 70223 + 1),
+ "Multani": range(70272, 70319 + 1),
+ "Khudawadi": range(70320, 70399 + 1),
+ "Grantha": range(70400, 70527 + 1),
+ "Newa": range(70656, 70783 + 1),
+ "Tirhuta": range(70784, 70879 + 1),
+ "Siddham": range(71040, 71167 + 1),
+ "Modi": range(71168, 71263 + 1),
+ "Mongolian Supplement": range(71264, 71295 + 1),
+ "Takri": range(71296, 71375 + 1),
+ "Ahom": range(71424, 71487 + 1),
+ "Warang Citi": range(71840, 71935 + 1),
+ "Zanabazar Square": range(72192, 72271 + 1),
+ "Soyombo": range(72272, 72367 + 1),
+ "Pau Cin Hau": range(72384, 72447 + 1),
+ "Bhaiksuki": range(72704, 72815 + 1),
+ "Marchen": range(72816, 72895 + 1),
+ "Masaram Gondi": range(72960, 73055 + 1),
+ "Cuneiform": range(73728, 74751 + 1),
+ "Cuneiform Numbers and Punctuation": range(74752, 74879 + 1),
+ "Early Dynastic Cuneiform": range(74880, 75087 + 1),
+ "Egyptian Hieroglyphs": range(77824, 78895 + 1),
+ "Anatolian Hieroglyphs": range(82944, 83583 + 1),
+ "Bamum Supplement": range(92160, 92735 + 1),
+ "Mro": range(92736, 92783 + 1),
+ "Bassa Vah": range(92880, 92927 + 1),
+ "Pahawh Hmong": range(92928, 93071 + 1),
+ "Miao": range(93952, 94111 + 1),
+ "Ideographic Symbols and Punctuation": range(94176, 94207 + 1),
+ "Tangut": range(94208, 100351 + 1),
+ "Tangut Components": range(100352, 101119 + 1),
+ "Kana Supplement": range(110592, 110847 + 1),
+ "Kana Extended-A": range(110848, 110895 + 1),
+ "Nushu": range(110960, 111359 + 1),
+ "Duployan": range(113664, 113823 + 1),
+ "Shorthand Format Controls": range(113824, 113839 + 1),
+ "Byzantine Musical Symbols": range(118784, 119039 + 1),
+ "Musical Symbols": range(119040, 119295 + 1),
+ "Ancient Greek Musical Notation": range(119296, 119375 + 1),
+ "Tai Xuan Jing Symbols": range(119552, 119647 + 1),
+ "Counting Rod Numerals": range(119648, 119679 + 1),
+ "Mathematical Alphanumeric Symbols": range(119808, 120831 + 1),
+ "Sutton SignWriting": range(120832, 121519 + 1),
+ "Glagolitic Supplement": range(122880, 122927 + 1),
+ "Mende Kikakui": range(124928, 125151 + 1),
+ "Adlam": range(125184, 125279 + 1),
+ "Arabic Mathematical Alphabetic Symbols": range(126464, 126719 + 1),
+ "Mahjong Tiles": range(126976, 127023 + 1),
+ "Domino Tiles": range(127024, 127135 + 1),
+ "Playing Cards": range(127136, 127231 + 1),
+ "Enclosed Alphanumeric Supplement": range(127232, 127487 + 1),
+ "Enclosed Ideographic Supplement": range(127488, 127743 + 1),
+ "Miscellaneous Symbols and Pictographs": range(127744, 128511 + 1),
+ "Emoticons range(Emoji)": range(128512, 128591 + 1),
+ "Ornamental Dingbats": range(128592, 128639 + 1),
+ "Transport and Map Symbols": range(128640, 128767 + 1),
+ "Alchemical Symbols": range(128768, 128895 + 1),
+ "Geometric Shapes Extended": range(128896, 129023 + 1),
+ "Supplemental Arrows-C": range(129024, 129279 + 1),
+ "Supplemental Symbols and Pictographs": range(129280, 129535 + 1),
+ "CJK Unified Ideographs Extension B": range(131072, 173791 + 1),
+ "CJK Unified Ideographs Extension C": range(173824, 177983 + 1),
+ "CJK Unified Ideographs Extension D": range(177984, 178207 + 1),
+ "CJK Unified Ideographs Extension E": range(178208, 183983 + 1),
+ "CJK Unified Ideographs Extension F": range(183984, 191471 + 1),
+ "CJK Compatibility Ideographs Supplement": range(194560, 195103 + 1),
+ "Tags": range(917504, 917631 + 1),
+ "Variation Selectors Supplement": range(917760, 917999 + 1),
+} # type: Dict[str, range]
+
+
+UNICODE_SECONDARY_RANGE_KEYWORD = [
+ "Supplement",
+ "Extended",
+ "Extensions",
+ "Modifier",
+ "Marks",
+ "Punctuation",
+ "Symbols",
+ "Forms",
+ "Operators",
+ "Miscellaneous",
+ "Drawing",
+ "Block",
+ "Shapes",
+ "Supplemental",
+ "Tags",
+] # type: List[str]
+
+RE_POSSIBLE_ENCODING_INDICATION = re_compile(
+ r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
+ IGNORECASE,
+)
+
+IANA_SUPPORTED = sorted(
+ filter(
+ lambda x: x.endswith("_codec") is False
+ and x not in {"rot_13", "tactis", "mbcs"},
+ list(set(aliases.values())),
+ )
+) # type: List[str]
+
+IANA_SUPPORTED_COUNT = len(IANA_SUPPORTED) # type: int
+
+# pre-computed code page that are similar using the function cp_similarity.
+IANA_SUPPORTED_SIMILAR = {
+ "cp037": ["cp1026", "cp1140", "cp273", "cp500"],
+ "cp1026": ["cp037", "cp1140", "cp273", "cp500"],
+ "cp1125": ["cp866"],
+ "cp1140": ["cp037", "cp1026", "cp273", "cp500"],
+ "cp1250": ["iso8859_2"],
+ "cp1251": ["kz1048", "ptcp154"],
+ "cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
+ "cp1253": ["iso8859_7"],
+ "cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
+ "cp1257": ["iso8859_13"],
+ "cp273": ["cp037", "cp1026", "cp1140", "cp500"],
+ "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
+ "cp500": ["cp037", "cp1026", "cp1140", "cp273"],
+ "cp850": ["cp437", "cp857", "cp858", "cp865"],
+ "cp857": ["cp850", "cp858", "cp865"],
+ "cp858": ["cp437", "cp850", "cp857", "cp865"],
+ "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
+ "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
+ "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
+ "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
+ "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
+ "cp866": ["cp1125"],
+ "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
+ "iso8859_11": ["tis_620"],
+ "iso8859_13": ["cp1257"],
+ "iso8859_14": [
+ "iso8859_10",
+ "iso8859_15",
+ "iso8859_16",
+ "iso8859_3",
+ "iso8859_9",
+ "latin_1",
+ ],
+ "iso8859_15": [
+ "cp1252",
+ "cp1254",
+ "iso8859_10",
+ "iso8859_14",
+ "iso8859_16",
+ "iso8859_3",
+ "iso8859_9",
+ "latin_1",
+ ],
+ "iso8859_16": [
+ "iso8859_14",
+ "iso8859_15",
+ "iso8859_2",
+ "iso8859_3",
+ "iso8859_9",
+ "latin_1",
+ ],
+ "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
+ "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
+ "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
+ "iso8859_7": ["cp1253"],
+ "iso8859_9": [
+ "cp1252",
+ "cp1254",
+ "cp1258",
+ "iso8859_10",
+ "iso8859_14",
+ "iso8859_15",
+ "iso8859_16",
+ "iso8859_3",
+ "iso8859_4",
+ "latin_1",
+ ],
+ "kz1048": ["cp1251", "ptcp154"],
+ "latin_1": [
+ "cp1252",
+ "cp1254",
+ "cp1258",
+ "iso8859_10",
+ "iso8859_14",
+ "iso8859_15",
+ "iso8859_16",
+ "iso8859_3",
+ "iso8859_4",
+ "iso8859_9",
+ ],
+ "mac_iceland": ["mac_roman", "mac_turkish"],
+ "mac_roman": ["mac_iceland", "mac_turkish"],
+ "mac_turkish": ["mac_iceland", "mac_roman"],
+ "ptcp154": ["cp1251", "kz1048"],
+ "tis_620": ["iso8859_11"],
+} # type: Dict[str, List[str]]
+
+
+CHARDET_CORRESPONDENCE = {
+ "iso2022_kr": "ISO-2022-KR",
+ "iso2022_jp": "ISO-2022-JP",
+ "euc_kr": "EUC-KR",
+ "tis_620": "TIS-620",
+ "utf_32": "UTF-32",
+ "euc_jp": "EUC-JP",
+ "koi8_r": "KOI8-R",
+ "iso8859_1": "ISO-8859-1",
+ "iso8859_2": "ISO-8859-2",
+ "iso8859_5": "ISO-8859-5",
+ "iso8859_6": "ISO-8859-6",
+ "iso8859_7": "ISO-8859-7",
+ "iso8859_8": "ISO-8859-8",
+ "utf_16": "UTF-16",
+ "cp855": "IBM855",
+ "mac_cyrillic": "MacCyrillic",
+ "gb2312": "GB2312",
+ "gb18030": "GB18030",
+ "cp932": "CP932",
+ "cp866": "IBM866",
+ "utf_8": "utf-8",
+ "utf_8_sig": "UTF-8-SIG",
+ "shift_jis": "SHIFT_JIS",
+ "big5": "Big5",
+ "cp1250": "windows-1250",
+ "cp1251": "windows-1251",
+ "cp1252": "Windows-1252",
+ "cp1253": "windows-1253",
+ "cp1255": "windows-1255",
+ "cp1256": "windows-1256",
+ "cp1254": "Windows-1254",
+ "cp949": "CP949",
+} # type: Dict[str, str]
+
+
+COMMON_SAFE_ASCII_CHARACTERS = {
+ "<",
+ ">",
+ "=",
+ ":",
+ "/",
+ "&",
+ ";",
+ "{",
+ "}",
+ "[",
+ "]",
+ ",",
+ "|",
+ '"',
+ "-",
+} # type: Set[str]
+
+
+KO_NAMES = {"johab", "cp949", "euc_kr"} # type: Set[str]
+ZH_NAMES = {"big5", "cp950", "big5hkscs", "hz"} # type: Set[str]
+
+NOT_PRINTABLE_PATTERN = re_compile(r"[0-9\W\n\r\t]+")
+
+LANGUAGE_SUPPORTED_COUNT = len(FREQUENCIES) # type: int
+
+# Logging LEVEL bellow DEBUG
+TRACE = 5 # type: int
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/md.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/md.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3d6505cf00f0a6a8a63b5a0a745ed45c0982c3c
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/md.py
@@ -0,0 +1,559 @@
+from functools import lru_cache
+from typing import List, Optional
+
+from .constant import COMMON_SAFE_ASCII_CHARACTERS, UNICODE_SECONDARY_RANGE_KEYWORD
+from .utils import (
+ is_accentuated,
+ is_ascii,
+ is_case_variable,
+ is_cjk,
+ is_emoticon,
+ is_hangul,
+ is_hiragana,
+ is_katakana,
+ is_latin,
+ is_punctuation,
+ is_separator,
+ is_symbol,
+ is_thai,
+ remove_accent,
+ unicode_range,
+)
+
+
+class MessDetectorPlugin:
+ """
+ Base abstract class used for mess detection plugins.
+ All detectors MUST extend and implement given methods.
+ """
+
+ def eligible(self, character: str) -> bool:
+ """
+ Determine if given character should be fed in.
+ """
+ raise NotImplementedError # pragma: nocover
+
+ def feed(self, character: str) -> None:
+ """
+ The main routine to be executed upon character.
+ Insert the logic in witch the text would be considered chaotic.
+ """
+ raise NotImplementedError # pragma: nocover
+
+ def reset(self) -> None: # pragma: no cover
+ """
+ Permit to reset the plugin to the initial state.
+ """
+ raise NotImplementedError
+
+ @property
+ def ratio(self) -> float:
+ """
+ Compute the chaos ratio based on what your feed() has seen.
+ Must NOT be lower than 0.; No restriction gt 0.
+ """
+ raise NotImplementedError # pragma: nocover
+
+
+class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
+ def __init__(self) -> None:
+ self._punctuation_count = 0 # type: int
+ self._symbol_count = 0 # type: int
+ self._character_count = 0 # type: int
+
+ self._last_printable_char = None # type: Optional[str]
+ self._frenzy_symbol_in_word = False # type: bool
+
+ def eligible(self, character: str) -> bool:
+ return character.isprintable()
+
+ def feed(self, character: str) -> None:
+ self._character_count += 1
+
+ if (
+ character != self._last_printable_char
+ and character not in COMMON_SAFE_ASCII_CHARACTERS
+ ):
+ if is_punctuation(character):
+ self._punctuation_count += 1
+ elif (
+ character.isdigit() is False
+ and is_symbol(character)
+ and is_emoticon(character) is False
+ ):
+ self._symbol_count += 2
+
+ self._last_printable_char = character
+
+ def reset(self) -> None: # pragma: no cover
+ self._punctuation_count = 0
+ self._character_count = 0
+ self._symbol_count = 0
+
+ @property
+ def ratio(self) -> float:
+ if self._character_count == 0:
+ return 0.0
+
+ ratio_of_punctuation = (
+ self._punctuation_count + self._symbol_count
+ ) / self._character_count # type: float
+
+ return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
+
+
+class TooManyAccentuatedPlugin(MessDetectorPlugin):
+ def __init__(self) -> None:
+ self._character_count = 0 # type: int
+ self._accentuated_count = 0 # type: int
+
+ def eligible(self, character: str) -> bool:
+ return character.isalpha()
+
+ def feed(self, character: str) -> None:
+ self._character_count += 1
+
+ if is_accentuated(character):
+ self._accentuated_count += 1
+
+ def reset(self) -> None: # pragma: no cover
+ self._character_count = 0
+ self._accentuated_count = 0
+
+ @property
+ def ratio(self) -> float:
+ if self._character_count == 0:
+ return 0.0
+ ratio_of_accentuation = (
+ self._accentuated_count / self._character_count
+ ) # type: float
+ return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
+
+
+class UnprintablePlugin(MessDetectorPlugin):
+ def __init__(self) -> None:
+ self._unprintable_count = 0 # type: int
+ self._character_count = 0 # type: int
+
+ def eligible(self, character: str) -> bool:
+ return True
+
+ def feed(self, character: str) -> None:
+ if (
+ character.isspace() is False # includes \n \t \r \v
+ and character.isprintable() is False
+ and character != "\x1A" # Why? Its the ASCII substitute character.
+ ):
+ self._unprintable_count += 1
+ self._character_count += 1
+
+ def reset(self) -> None: # pragma: no cover
+ self._unprintable_count = 0
+
+ @property
+ def ratio(self) -> float:
+ if self._character_count == 0:
+ return 0.0
+
+ return (self._unprintable_count * 8) / self._character_count
+
+
+class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
+ def __init__(self) -> None:
+ self._successive_count = 0 # type: int
+ self._character_count = 0 # type: int
+
+ self._last_latin_character = None # type: Optional[str]
+
+ def eligible(self, character: str) -> bool:
+ return character.isalpha() and is_latin(character)
+
+ def feed(self, character: str) -> None:
+ self._character_count += 1
+ if (
+ self._last_latin_character is not None
+ and is_accentuated(character)
+ and is_accentuated(self._last_latin_character)
+ ):
+ if character.isupper() and self._last_latin_character.isupper():
+ self._successive_count += 1
+ # Worse if its the same char duplicated with different accent.
+ if remove_accent(character) == remove_accent(self._last_latin_character):
+ self._successive_count += 1
+ self._last_latin_character = character
+
+ def reset(self) -> None: # pragma: no cover
+ self._successive_count = 0
+ self._character_count = 0
+ self._last_latin_character = None
+
+ @property
+ def ratio(self) -> float:
+ if self._character_count == 0:
+ return 0.0
+
+ return (self._successive_count * 2) / self._character_count
+
+
+class SuspiciousRange(MessDetectorPlugin):
+ def __init__(self) -> None:
+ self._suspicious_successive_range_count = 0 # type: int
+ self._character_count = 0 # type: int
+ self._last_printable_seen = None # type: Optional[str]
+
+ def eligible(self, character: str) -> bool:
+ return character.isprintable()
+
+ def feed(self, character: str) -> None:
+ self._character_count += 1
+
+ if (
+ character.isspace()
+ or is_punctuation(character)
+ or character in COMMON_SAFE_ASCII_CHARACTERS
+ ):
+ self._last_printable_seen = None
+ return
+
+ if self._last_printable_seen is None:
+ self._last_printable_seen = character
+ return
+
+ unicode_range_a = unicode_range(
+ self._last_printable_seen
+ ) # type: Optional[str]
+ unicode_range_b = unicode_range(character) # type: Optional[str]
+
+ if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
+ self._suspicious_successive_range_count += 1
+
+ self._last_printable_seen = character
+
+ def reset(self) -> None: # pragma: no cover
+ self._character_count = 0
+ self._suspicious_successive_range_count = 0
+ self._last_printable_seen = None
+
+ @property
+ def ratio(self) -> float:
+ if self._character_count == 0:
+ return 0.0
+
+ ratio_of_suspicious_range_usage = (
+ self._suspicious_successive_range_count * 2
+ ) / self._character_count # type: float
+
+ if ratio_of_suspicious_range_usage < 0.1:
+ return 0.0
+
+ return ratio_of_suspicious_range_usage
+
+
+class SuperWeirdWordPlugin(MessDetectorPlugin):
+ def __init__(self) -> None:
+ self._word_count = 0 # type: int
+ self._bad_word_count = 0 # type: int
+ self._foreign_long_count = 0 # type: int
+
+ self._is_current_word_bad = False # type: bool
+ self._foreign_long_watch = False # type: bool
+
+ self._character_count = 0 # type: int
+ self._bad_character_count = 0 # type: int
+
+ self._buffer = "" # type: str
+ self._buffer_accent_count = 0 # type: int
+
+ def eligible(self, character: str) -> bool:
+ return True
+
+ def feed(self, character: str) -> None:
+ if character.isalpha():
+ self._buffer = "".join([self._buffer, character])
+ if is_accentuated(character):
+ self._buffer_accent_count += 1
+ if (
+ self._foreign_long_watch is False
+ and (is_latin(character) is False or is_accentuated(character))
+ and is_cjk(character) is False
+ and is_hangul(character) is False
+ and is_katakana(character) is False
+ and is_hiragana(character) is False
+ and is_thai(character) is False
+ ):
+ self._foreign_long_watch = True
+ return
+ if not self._buffer:
+ return
+ if (
+ character.isspace() or is_punctuation(character) or is_separator(character)
+ ) and self._buffer:
+ self._word_count += 1
+ buffer_length = len(self._buffer) # type: int
+
+ self._character_count += buffer_length
+
+ if buffer_length >= 4:
+ if self._buffer_accent_count / buffer_length > 0.34:
+ self._is_current_word_bad = True
+ # Word/Buffer ending with a upper case accentuated letter are so rare,
+ # that we will consider them all as suspicious. Same weight as foreign_long suspicious.
+ if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper():
+ self._foreign_long_count += 1
+ self._is_current_word_bad = True
+ if buffer_length >= 24 and self._foreign_long_watch:
+ self._foreign_long_count += 1
+ self._is_current_word_bad = True
+
+ if self._is_current_word_bad:
+ self._bad_word_count += 1
+ self._bad_character_count += len(self._buffer)
+ self._is_current_word_bad = False
+
+ self._foreign_long_watch = False
+ self._buffer = ""
+ self._buffer_accent_count = 0
+ elif (
+ character not in {"<", ">", "-", "=", "~", "|", "_"}
+ and character.isdigit() is False
+ and is_symbol(character)
+ ):
+ self._is_current_word_bad = True
+ self._buffer += character
+
+ def reset(self) -> None: # pragma: no cover
+ self._buffer = ""
+ self._is_current_word_bad = False
+ self._foreign_long_watch = False
+ self._bad_word_count = 0
+ self._word_count = 0
+ self._character_count = 0
+ self._bad_character_count = 0
+ self._foreign_long_count = 0
+
+ @property
+ def ratio(self) -> float:
+ if self._word_count <= 10 and self._foreign_long_count == 0:
+ return 0.0
+
+ return self._bad_character_count / self._character_count
+
+
+class CjkInvalidStopPlugin(MessDetectorPlugin):
+ """
+ GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
+ can be easily detected. Searching for the overuse of '丅' and '丄'.
+ """
+
+ def __init__(self) -> None:
+ self._wrong_stop_count = 0 # type: int
+ self._cjk_character_count = 0 # type: int
+
+ def eligible(self, character: str) -> bool:
+ return True
+
+ def feed(self, character: str) -> None:
+ if character in {"丅", "丄"}:
+ self._wrong_stop_count += 1
+ return
+ if is_cjk(character):
+ self._cjk_character_count += 1
+
+ def reset(self) -> None: # pragma: no cover
+ self._wrong_stop_count = 0
+ self._cjk_character_count = 0
+
+ @property
+ def ratio(self) -> float:
+ if self._cjk_character_count < 16:
+ return 0.0
+ return self._wrong_stop_count / self._cjk_character_count
+
+
+class ArchaicUpperLowerPlugin(MessDetectorPlugin):
+ def __init__(self) -> None:
+ self._buf = False # type: bool
+
+ self._character_count_since_last_sep = 0 # type: int
+
+ self._successive_upper_lower_count = 0 # type: int
+ self._successive_upper_lower_count_final = 0 # type: int
+
+ self._character_count = 0 # type: int
+
+ self._last_alpha_seen = None # type: Optional[str]
+ self._current_ascii_only = True # type: bool
+
+ def eligible(self, character: str) -> bool:
+ return True
+
+ def feed(self, character: str) -> None:
+ is_concerned = character.isalpha() and is_case_variable(character)
+ chunk_sep = is_concerned is False
+
+ if chunk_sep and self._character_count_since_last_sep > 0:
+ if (
+ self._character_count_since_last_sep <= 64
+ and character.isdigit() is False
+ and self._current_ascii_only is False
+ ):
+ self._successive_upper_lower_count_final += (
+ self._successive_upper_lower_count
+ )
+
+ self._successive_upper_lower_count = 0
+ self._character_count_since_last_sep = 0
+ self._last_alpha_seen = None
+ self._buf = False
+ self._character_count += 1
+ self._current_ascii_only = True
+
+ return
+
+ if self._current_ascii_only is True and is_ascii(character) is False:
+ self._current_ascii_only = False
+
+ if self._last_alpha_seen is not None:
+ if (character.isupper() and self._last_alpha_seen.islower()) or (
+ character.islower() and self._last_alpha_seen.isupper()
+ ):
+ if self._buf is True:
+ self._successive_upper_lower_count += 2
+ self._buf = False
+ else:
+ self._buf = True
+ else:
+ self._buf = False
+
+ self._character_count += 1
+ self._character_count_since_last_sep += 1
+ self._last_alpha_seen = character
+
+ def reset(self) -> None: # pragma: no cover
+ self._character_count = 0
+ self._character_count_since_last_sep = 0
+ self._successive_upper_lower_count = 0
+ self._successive_upper_lower_count_final = 0
+ self._last_alpha_seen = None
+ self._buf = False
+ self._current_ascii_only = True
+
+ @property
+ def ratio(self) -> float:
+ if self._character_count == 0:
+ return 0.0
+
+ return self._successive_upper_lower_count_final / self._character_count
+
+
+def is_suspiciously_successive_range(
+ unicode_range_a: Optional[str], unicode_range_b: Optional[str]
+) -> bool:
+ """
+ Determine if two Unicode range seen next to each other can be considered as suspicious.
+ """
+ if unicode_range_a is None or unicode_range_b is None:
+ return True
+
+ if unicode_range_a == unicode_range_b:
+ return False
+
+ if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
+ return False
+
+ if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
+ return False
+
+ # Latin characters can be accompanied with a combining diacritical mark
+ # eg. Vietnamese.
+ if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
+ "Combining" in unicode_range_a or "Combining" in unicode_range_b
+ ):
+ return False
+
+ keywords_range_a, keywords_range_b = unicode_range_a.split(
+ " "
+ ), unicode_range_b.split(" ")
+
+ for el in keywords_range_a:
+ if el in UNICODE_SECONDARY_RANGE_KEYWORD:
+ continue
+ if el in keywords_range_b:
+ return False
+
+ # Japanese Exception
+ range_a_jp_chars, range_b_jp_chars = (
+ unicode_range_a
+ in (
+ "Hiragana",
+ "Katakana",
+ ),
+ unicode_range_b in ("Hiragana", "Katakana"),
+ )
+ if (range_a_jp_chars or range_b_jp_chars) and (
+ "CJK" in unicode_range_a or "CJK" in unicode_range_b
+ ):
+ return False
+ if range_a_jp_chars and range_b_jp_chars:
+ return False
+
+ if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
+ if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
+ return False
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
+ return False
+
+ # Chinese/Japanese use dedicated range for punctuation and/or separators.
+ if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
+ unicode_range_a in ["Katakana", "Hiragana"]
+ and unicode_range_b in ["Katakana", "Hiragana"]
+ ):
+ if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
+ return False
+ if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
+ return False
+
+ return True
+
+
+@lru_cache(maxsize=2048)
+def mess_ratio(
+ decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
+) -> float:
+ """
+ Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
+ """
+
+ detectors = [
+ md_class() for md_class in MessDetectorPlugin.__subclasses__()
+ ] # type: List[MessDetectorPlugin]
+
+ length = len(decoded_sequence) + 1 # type: int
+
+ mean_mess_ratio = 0.0 # type: float
+
+ if length < 512:
+ intermediary_mean_mess_ratio_calc = 32 # type: int
+ elif length <= 1024:
+ intermediary_mean_mess_ratio_calc = 64
+ else:
+ intermediary_mean_mess_ratio_calc = 128
+
+ for character, index in zip(decoded_sequence + "\n", range(length)):
+ for detector in detectors:
+ if detector.eligible(character):
+ detector.feed(character)
+
+ if (
+ index > 0 and index % intermediary_mean_mess_ratio_calc == 0
+ ) or index == length - 1:
+ mean_mess_ratio = sum(dt.ratio for dt in detectors)
+
+ if mean_mess_ratio >= maximum_threshold:
+ break
+
+ if debug:
+ for dt in detectors: # pragma: nocover
+ print(dt.__class__, dt.ratio)
+
+ return round(mean_mess_ratio, 3)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/py.typed b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..7b6d30f8c859cab7602d7dbff98aad3e10c27281
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/METADATA
@@ -0,0 +1,137 @@
+Metadata-Version: 2.1
+Name: imageio
+Version: 2.19.3
+Summary: Library for reading and writing a wide range of image, video, scientific, and volumetric data formats.
+Home-page: https://github.com/imageio/imageio
+Author: imageio contributors
+Author-email: almar.klein@gmail.com
+License: BSD-2-Clause
+Download-URL: http://pypi.python.org/pypi/imageio
+Keywords: image video volume imread imwrite io animation ffmpeg
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Provides: imageio
+Requires-Python: >=3.7
+License-File: LICENSE
+Requires-Dist: numpy
+Requires-Dist: pillow (>=8.3.2)
+Provides-Extra: all-plugins
+Requires-Dist: astropy ; extra == 'all-plugins'
+Requires-Dist: av ; extra == 'all-plugins'
+Requires-Dist: imageio-ffmpeg ; extra == 'all-plugins'
+Requires-Dist: opencv-python ; extra == 'all-plugins'
+Requires-Dist: psutil ; extra == 'all-plugins'
+Requires-Dist: tifffile ; extra == 'all-plugins'
+Provides-Extra: all-plugins-pypy
+Requires-Dist: av ; extra == 'all-plugins-pypy'
+Requires-Dist: imageio-ffmpeg ; extra == 'all-plugins-pypy'
+Requires-Dist: psutil ; extra == 'all-plugins-pypy'
+Requires-Dist: tifffile ; extra == 'all-plugins-pypy'
+Provides-Extra: bsdf
+Provides-Extra: build
+Requires-Dist: wheel ; extra == 'build'
+Provides-Extra: dev
+Requires-Dist: invoke ; extra == 'dev'
+Requires-Dist: pytest ; extra == 'dev'
+Requires-Dist: pytest-cov ; extra == 'dev'
+Requires-Dist: fsspec[github] ; extra == 'dev'
+Requires-Dist: black ; extra == 'dev'
+Requires-Dist: flake8 ; extra == 'dev'
+Provides-Extra: dicom
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+Requires-Dist: numpydoc ; extra == 'docs'
+Requires-Dist: pydata-sphinx-theme ; extra == 'docs'
+Provides-Extra: feisem
+Provides-Extra: ffmpeg
+Requires-Dist: imageio-ffmpeg ; extra == 'ffmpeg'
+Requires-Dist: psutil ; extra == 'ffmpeg'
+Provides-Extra: fits
+Requires-Dist: astropy ; extra == 'fits'
+Provides-Extra: freeimage
+Provides-Extra: full
+Requires-Dist: astropy ; extra == 'full'
+Requires-Dist: av ; extra == 'full'
+Requires-Dist: black ; extra == 'full'
+Requires-Dist: flake8 ; extra == 'full'
+Requires-Dist: fsspec[github] ; extra == 'full'
+Requires-Dist: gdal ; extra == 'full'
+Requires-Dist: imageio-ffmpeg ; extra == 'full'
+Requires-Dist: invoke ; extra == 'full'
+Requires-Dist: itk ; extra == 'full'
+Requires-Dist: numpydoc ; extra == 'full'
+Requires-Dist: opencv-python ; extra == 'full'
+Requires-Dist: psutil ; extra == 'full'
+Requires-Dist: pydata-sphinx-theme ; extra == 'full'
+Requires-Dist: pytest ; extra == 'full'
+Requires-Dist: pytest-cov ; extra == 'full'
+Requires-Dist: sphinx ; extra == 'full'
+Requires-Dist: tifffile ; extra == 'full'
+Requires-Dist: wheel ; extra == 'full'
+Provides-Extra: gdal
+Requires-Dist: gdal ; extra == 'gdal'
+Provides-Extra: itk
+Requires-Dist: itk ; extra == 'itk'
+Provides-Extra: linting
+Requires-Dist: black ; extra == 'linting'
+Requires-Dist: flake8 ; extra == 'linting'
+Provides-Extra: lytro
+Provides-Extra: numpy
+Provides-Extra: opencv
+Requires-Dist: opencv-python ; extra == 'opencv'
+Provides-Extra: pillow
+Provides-Extra: pyav
+Requires-Dist: av ; extra == 'pyav'
+Provides-Extra: simpleitk
+Provides-Extra: spe
+Provides-Extra: swf
+Provides-Extra: test
+Requires-Dist: invoke ; extra == 'test'
+Requires-Dist: pytest ; extra == 'test'
+Requires-Dist: pytest-cov ; extra == 'test'
+Requires-Dist: fsspec[github] ; extra == 'test'
+Provides-Extra: tifffile
+Requires-Dist: tifffile ; extra == 'tifffile'
+
+
+.. image:: https://github.com/imageio/imageio/workflows/CI/badge.svg
+ :target: https://github.com/imageio/imageio/actions
+
+
+Imageio is a Python library that provides an easy interface to read and
+write a wide range of image data, including animated images, volumetric
+data, and scientific formats. It is cross-platform, runs on Python 3.5+,
+and is easy to install.
+
+Main website: https://imageio.readthedocs.io/
+
+
+Release notes: https://github.com/imageio/imageio/blob/master/CHANGELOG.md
+
+Example:
+
+.. code-block:: python
+
+ >>> import imageio
+ >>> im = imageio.imread('imageio:astronaut.png')
+ >>> im.shape # im is a numpy array
+ (512, 512, 3)
+ >>> imageio.imwrite('astronaut-gray.jpg', im[:, :, 0])
+
+See the `API Reference `_
+or `examples `_
+for more information.
+
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a464e4cd37851148ffe3b7ae88921620c50cfe03
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+imageio
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/LICENSE b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..50716743e35eb117b622e5248eda1490dc973189
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/LICENSE
@@ -0,0 +1,60 @@
+License file for pydicom, a pure-python DICOM library
+
+Copyright (c) 2008-2020 Darcy Mason and pydicom contributors
+
+Except for portions outlined below, pydicom is released under an MIT license:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+Portions of pydicom (private dictionary file(s)) were generated from the
+private dictionary of the GDCM library, released under the following license:
+
+ Program: GDCM (Grassroots DICOM). A DICOM library
+ Module: http://gdcm.sourceforge.net/Copyright.html
+
+Copyright (c) 2006-2010 Mathieu Malaterre
+Copyright (c) 1993-2005 CREATIS
+(CREATIS = Centre de Recherche et d'Applications en Traitement de l'Image)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither name of Mathieu Malaterre, or CREATIS, nor the names of any
+ contributors (CNRS, INSERM, UCB, Universite Lyon I), may be used to
+ endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..2c773d698c87361393d6ead549c791d6d5c509b5
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/METADATA
@@ -0,0 +1,158 @@
+Metadata-Version: 2.1
+Name: pydicom
+Version: 2.3.0
+Summary: A pure Python package for reading and writing DICOM data
+Home-page: https://github.com/pydicom/pydicom
+Author: Darcy Mason and contributors
+Author-email: darcymason@gmail.com
+License: MIT
+Download-URL: https://github.com/pydicom/pydicom/archive/master.zip
+Keywords: dicom python medical imaging
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Healthcare Industry
+Classifier: Intended Audience :: Science/Research
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Scientific/Engineering :: Medical Science Apps.
+Classifier: Topic :: Scientific/Engineering :: Physics
+Classifier: Topic :: Software Development :: Libraries
+Requires-Python: >=3.6.1
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Provides-Extra: docs
+Requires-Dist: numpy ; extra == 'docs'
+Requires-Dist: numpydoc ; extra == 'docs'
+Requires-Dist: matplotlib ; extra == 'docs'
+Requires-Dist: pillow ; extra == 'docs'
+Requires-Dist: sphinx ; extra == 'docs'
+Requires-Dist: sphinx-rtd-theme ; extra == 'docs'
+Requires-Dist: sphinx-gallery ; extra == 'docs'
+Requires-Dist: sphinxcontrib-napoleon ; extra == 'docs'
+Requires-Dist: sphinx-copybutton ; extra == 'docs'
+
+[](https://github.com/pydicom/pydicom/actions?query=workflow%3Aunit-tests)
+[](https://github.com/pydicom/pydicom/actions?query=workflow%3Atype-hints)
+[](https://circleci.com/gh/pydicom/pydicom/tree/master)
+[](https://codecov.io/gh/pydicom/pydicom)
+[](https://img.shields.io/pypi/pyversions/pydicom.svg)
+[](https://badge.fury.io/py/pydicom)
+[](https://doi.org/10.5281/zenodo.5164413)
+[](https://gitter.im/pydicom/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+# *pydicom*
+
+*pydicom* is a pure Python package for working with [DICOM](https://www.dicomstandard.org/) files. It lets you read, modify and write DICOM data in an easy "pythonic" way.
+
+As a pure Python package, *pydicom* can run anywhere Python runs without any other requirements, although if you're working with *Pixel Data* then we recommend you also install [NumPy](http://www.numpy.org).
+
+If you're looking for a Python library for DICOM networking then you might be interested in another of our projects: [pynetdicom](https://github.com/pydicom/pynetdicom).
+
+## Installation
+
+Using [pip](https://pip.pypa.io/en/stable/):
+```
+pip install pydicom
+```
+Using [conda](https://docs.conda.io/en/latest/):
+```
+conda install -c conda-forge pydicom
+```
+
+For more information, including installation instructions for the development version, see the [installation guide](https://pydicom.github.io/pydicom/stable/tutorials/installation.html).
+
+
+## Documentation
+
+The *pydicom* [user guide](https://pydicom.github.io/pydicom/stable/old/pydicom_user_guide.html), [tutorials](https://pydicom.github.io/pydicom/stable/tutorials/index.html), [examples](https://pydicom.github.io/pydicom/stable/auto_examples/index.html) and [API reference](https://pydicom.github.io/pydicom/stable/reference/index.html) documentation is available for both the [current release](https://pydicom.github.io/pydicom/stable) and the [development version](https://pydicom.github.io/pydicom/dev) on GitHub Pages.
+
+## *Pixel Data*
+
+Compressed and uncompressed *Pixel Data* is always available to
+be read, changed and written as [bytes](https://docs.python.org/3/library/stdtypes.html#bytes-objects):
+```python
+>>> from pydicom import dcmread
+>>> from pydicom.data import get_testdata_file
+>>> path = get_testdata_file("CT_small.dcm")
+>>> ds = dcmread(path)
+>>> type(ds.PixelData)
+
+>>> len(ds.PixelData)
+32768
+>>> ds.PixelData[:2]
+b'\xaf\x00'
+
+```
+
+If [NumPy](http://www.numpy.org) is installed, *Pixel Data* can be converted to an [ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) using the [Dataset.pixel_array](https://pydicom.github.io/pydicom/stable/reference/generated/pydicom.dataset.Dataset.html#pydicom.dataset.Dataset.pixel_array) property:
+
+```python
+>>> arr = ds.pixel_array
+>>> arr.shape
+(128, 128)
+>>> arr
+array([[175, 180, 166, ..., 203, 207, 216],
+ [186, 183, 157, ..., 181, 190, 239],
+ [184, 180, 171, ..., 152, 164, 235],
+ ...,
+ [906, 910, 923, ..., 922, 929, 927],
+ [914, 954, 938, ..., 942, 925, 905],
+ [959, 955, 916, ..., 911, 904, 909]], dtype=int16)
+```
+### Compressed *Pixel Data*
+#### JPEG, JPEG-LS and JPEG 2000
+Converting JPEG compressed *Pixel Data* to an ``ndarray`` requires installing one or more additional Python libraries. For information on which libraries are required, see the [pixel data handler documentation](https://pydicom.github.io/pydicom/stable/old/image_data_handlers.html#guide-compressed).
+
+Compressing data into one of the JPEG formats is not currently supported.
+
+#### RLE
+Encoding and decoding RLE *Pixel Data* only requires NumPy, however it can
+be quite slow. You may want to consider [installing one or more additional
+Python libraries](https://pydicom.github.io/pydicom/stable/old/image_data_compression.html) to speed up the process.
+
+## Examples
+More [examples](https://pydicom.github.io/pydicom/stable/auto_examples/index.html) are available in the documentation.
+
+**Change a patient's ID**
+```python
+from pydicom import dcmread
+
+ds = dcmread("/path/to/file.dcm")
+# Edit the (0010,0020) 'Patient ID' element
+ds.PatientID = "12345678"
+ds.save_as("/path/to/file_updated.dcm")
+```
+
+**Display the Pixel Data**
+
+With [NumPy](http://www.numpy.org) and [matplotlib](https://matplotlib.org/)
+```python
+import matplotlib.pyplot as plt
+from pydicom import dcmread
+from pydicom.data import get_testdata_file
+
+# The path to a pydicom test dataset
+path = get_testdata_file("CT_small.dcm")
+ds = dcmread(path)
+# `arr` is a numpy.ndarray
+arr = ds.pixel_array
+
+plt.imshow(arr, cmap="gray")
+plt.show()
+```
+
+## Contributing
+
+To contribute to *pydicom*, read our [contribution guide](https://github.com/pydicom/pydicom/blob/master/CONTRIBUTING.md).
+
+To contribute an example or extension of *pydicom* that doesn't belong with the core software, see our contribution repository:
+[contrib-pydicom](https://www.github.com/pydicom/contrib-pydicom).
+
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..baf64c6f8048e4179c33d9df7f1d168cc4b59a9b
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/RECORD
@@ -0,0 +1,440 @@
+../../../bin/pydicom,sha256=0iLug-6vlqt51reI8uWnuo2koYGWeBWVDNIR6l-DJOA,233
+pydicom-2.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pydicom-2.3.0.dist-info/LICENSE,sha256=gf3hItdxmjiEAS5q9tZcqmIt12BzCjvp_GyU1Rtrj2c,3196
+pydicom-2.3.0.dist-info/METADATA,sha256=t_FC0ZeGOcB0tGuC6UsiOG2JP-W0M7P_ipQfjDiX85Q,7137
+pydicom-2.3.0.dist-info/RECORD,,
+pydicom-2.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pydicom-2.3.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
+pydicom-2.3.0.dist-info/entry_points.txt,sha256=PbqQYG4QiiM5MW6mvjmClqjLgomOdwvR4D3IpnhQt-8,154
+pydicom-2.3.0.dist-info/top_level.txt,sha256=M2L2xCoA4YYwHP87hpIvN2d1PnvQ_B1EEY6JAEaRP0Y,8
+pydicom/__init__.py,sha256=oiyr5X5M5QFyKWJZ3PTPQZYJMxq57WSmIxCKdToerzI,1577
+pydicom/__pycache__/__init__.cpython-38.pyc,,
+pydicom/__pycache__/_dicom_dict.cpython-38.pyc,,
+pydicom/__pycache__/_private_dict.cpython-38.pyc,,
+pydicom/__pycache__/_storage_sopclass_uids.cpython-38.pyc,,
+pydicom/__pycache__/_uid_dict.cpython-38.pyc,,
+pydicom/__pycache__/_version.cpython-38.pyc,,
+pydicom/__pycache__/charset.cpython-38.pyc,,
+pydicom/__pycache__/compat.cpython-38.pyc,,
+pydicom/__pycache__/config.cpython-38.pyc,,
+pydicom/__pycache__/datadict.cpython-38.pyc,,
+pydicom/__pycache__/dataelem.cpython-38.pyc,,
+pydicom/__pycache__/dataset.cpython-38.pyc,,
+pydicom/__pycache__/dicomdir.cpython-38.pyc,,
+pydicom/__pycache__/dicomio.cpython-38.pyc,,
+pydicom/__pycache__/encaps.cpython-38.pyc,,
+pydicom/__pycache__/env_info.cpython-38.pyc,,
+pydicom/__pycache__/errors.cpython-38.pyc,,
+pydicom/__pycache__/filebase.cpython-38.pyc,,
+pydicom/__pycache__/filereader.cpython-38.pyc,,
+pydicom/__pycache__/fileset.cpython-38.pyc,,
+pydicom/__pycache__/fileutil.cpython-38.pyc,,
+pydicom/__pycache__/filewriter.cpython-38.pyc,,
+pydicom/__pycache__/jsonrep.cpython-38.pyc,,
+pydicom/__pycache__/misc.cpython-38.pyc,,
+pydicom/__pycache__/multival.cpython-38.pyc,,
+pydicom/__pycache__/sequence.cpython-38.pyc,,
+pydicom/__pycache__/tag.cpython-38.pyc,,
+pydicom/__pycache__/uid.cpython-38.pyc,,
+pydicom/__pycache__/valuerep.cpython-38.pyc,,
+pydicom/__pycache__/values.cpython-38.pyc,,
+pydicom/_dicom_dict.py,sha256=GyeZITc314lXB5XYrDFb6oIWg3rh0HNftMuVLWZTZ8A,494937
+pydicom/_private_dict.py,sha256=OFCFhgCmtUmittdqQlFzUtnCVsIuMmS2xu_2KiK_9-c,613172
+pydicom/_storage_sopclass_uids.py,sha256=BDb5mn-b2JmId6N-0sQYz0bCK7wJjsl1gWMkPMVZwqQ,130
+pydicom/_uid_dict.py,sha256=O9pbYsNMSxiT4kmzxOsB9_xwwH47Qwb1V6VxjyLvXlM,63470
+pydicom/_version.py,sha256=UdsM1ftSBB6yJJys0VJQ4Quynk-YIcGQwjNwMLwwsUo,379
+pydicom/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pydicom/benchmarks/__pycache__/__init__.cpython-38.pyc,,
+pydicom/benchmarks/__pycache__/bench_encaps.cpython-38.pyc,,
+pydicom/benchmarks/__pycache__/bench_handler_numpy.cpython-38.pyc,,
+pydicom/benchmarks/__pycache__/bench_pixel_util.cpython-38.pyc,,
+pydicom/benchmarks/__pycache__/bench_rle_decode.cpython-38.pyc,,
+pydicom/benchmarks/__pycache__/bench_rle_encode.cpython-38.pyc,,
+pydicom/benchmarks/bench_encaps.py,sha256=joap4CXNaWWdqwcEEHD1WXhq6BFOepOrjdOfghKhgaE,2962
+pydicom/benchmarks/bench_handler_numpy.py,sha256=uMnnoLJaUAyVhsjqGGNW6ny32dn4pOHQluVzh0tMgX8,7943
+pydicom/benchmarks/bench_pixel_util.py,sha256=lrrdjtd6Epie0-XGMAGQyEswUKOSev68v-4rMvSB3Ls,2080
+pydicom/benchmarks/bench_rle_decode.py,sha256=cPUt9Iz4onsTON0W5ec_ONdVN1Fpf6xF5zVetYx3qxQ,4160
+pydicom/benchmarks/bench_rle_encode.py,sha256=-ru1ht7bFauaSjaZPIDZ899GqDNVVP5Y9XXw6uXUP3w,3310
+pydicom/charset.py,sha256=QpTybCvJEYWuWHHHul3L_6nzIjR30nVtoq1U3IhTQk0,31107
+pydicom/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pydicom/cli/__pycache__/__init__.cpython-38.pyc,,
+pydicom/cli/__pycache__/codify.cpython-38.pyc,,
+pydicom/cli/__pycache__/main.cpython-38.pyc,,
+pydicom/cli/__pycache__/show.cpython-38.pyc,,
+pydicom/cli/codify.py,sha256=5zs3TMNvWgVtDaTztRk7gnAZXu1NaHw4jPAkTE_JK6k,1001
+pydicom/cli/main.py,sha256=ExxSawKB2_eFhwPWBn14HLuQsF_02Y8VmZa5K6dCJ0U,7281
+pydicom/cli/show.py,sha256=XeJhDbchu0rVynyvWYOC0BDPOo-Zpv_jKLNHaCWI_sw,5323
+pydicom/compat.py,sha256=7_QfnBdg3uZ_oQ3Y2qOxpriK0sWkSJewFbSgi45GnVU,607
+pydicom/config.py,sha256=IgU-XVzCnsUU8R0M1nyfPovKHgrmkwmJPYKP4FoK38o,18383
+pydicom/data/__init__.py,sha256=2yCx61qIC4xlvXYabrlNvOzI10_TU_KXqtLm1i1CUo8,403
+pydicom/data/__pycache__/__init__.cpython-38.pyc,,
+pydicom/data/__pycache__/data_manager.cpython-38.pyc,,
+pydicom/data/__pycache__/download.cpython-38.pyc,,
+pydicom/data/__pycache__/retry.cpython-38.pyc,,
+pydicom/data/charset_files/FileInfo.txt,sha256=8bqIjlfGV1jPXzoPyVagyDrQk15zh43TyRFMtdFkb_M,1896
+pydicom/data/charset_files/chrArab.dcm,sha256=cCDs27aL3RMmTa6yn6vmNqKNeaGoPsu2Wg2ZbGhFjB4,1892
+pydicom/data/charset_files/chrFren.dcm,sha256=g2Pz0uVbRIpojteGNnW_lkXnepGUht37j6oSIn4osJc,1890
+pydicom/data/charset_files/chrFrenMulti.dcm,sha256=6gHCARG2ODJ9bZwANjGgOWFnJnv8acU4FfBVx24tr8E,1938
+pydicom/data/charset_files/chrGerm.dcm,sha256=SaKFVUpO9irpfDHByMFen8MolXCg9W9EbmplLHmdrl0,1894
+pydicom/data/charset_files/chrGreek.dcm,sha256=zb33ggZCwTwmtJ5JbVcfOQQY0weOdbLAcOXGaIYe1Js,1890
+pydicom/data/charset_files/chrH31.dcm,sha256=N7EWX8KzXL4S8LA2pDnRxpQSrbNM5aOH0jGR_C0oX0g,1950
+pydicom/data/charset_files/chrH32.dcm,sha256=3kKvcVrBHXAdSTrDSxz0d9Ph8tXQXjRBc4po1JX3cI8,1960
+pydicom/data/charset_files/chrHbrw.dcm,sha256=UGW7XI5VjsyFzEgROhsL8HcB6vLp-ht7czZpx62O-cQ,1890
+pydicom/data/charset_files/chrI2.dcm,sha256=HS7RqifAHKhe0kgtb_6XJJ0_J2Zh-mW5nBsRA7eKoMw,1934
+pydicom/data/charset_files/chrJapMulti.dcm,sha256=n2nmi6EmUbKND3YEew93Yo4wVMwWRhdGrTYi-ovUqUU,1918
+pydicom/data/charset_files/chrJapMultiExplicitIR6.dcm,sha256=vUXDBLQ-H4lJQsMCOfRjFW4OY5hvsdrUwmJbWaoy4B8,1940
+pydicom/data/charset_files/chrKoreanMulti.dcm,sha256=8H1hkzEOWT_X-Ozz4wHaAcgsR9wilziW8qZxxp1_3U0,1898
+pydicom/data/charset_files/chrRuss.dcm,sha256=6C2IVrfZ-0B6gKKCTcet-Nr3ztJlRQ1piVX3VKmvFzA,1890
+pydicom/data/charset_files/chrSQEncoding.dcm,sha256=sSSnS88vJY7oyZw1QgjrfOs5aefy6CfZ3W5BkF-sqn4,520
+pydicom/data/charset_files/chrSQEncoding1.dcm,sha256=HuYYm0XhYQcxdit6gj884ym3D5zamq0ZNpcpZqiqxmE,520
+pydicom/data/charset_files/chrX1.dcm,sha256=EzIypmZYfuiEgEyweqpL7PNvcgvFkZoHgXMs5pH13tw,1910
+pydicom/data/charset_files/chrX2.dcm,sha256=xibzEOAwEhOEVlifUWdUfFv-z3re8Tux7UOiHcsnTug,1904
+pydicom/data/data_manager.py,sha256=EF5WO899j8962lq4IW8WiwLg0s5JCC8pBpmLuCMrY4w,11895
+pydicom/data/download.py,sha256=svh0EQH7Wj67-EbaX2gdHg7s3eu05v67mbmipUt3lh4,8190
+pydicom/data/hashes.json,sha256=wyHfFQVGt6egRpOal3bP0AneQz6F3b2nYSXpL1SpIGU,6297
+pydicom/data/palettes/README.md,sha256=GbyzAaqmlnv-0NWPtCfyS7w5sgdYGt-f1u-XdLIjWhM,1725
+pydicom/data/palettes/fall.dcm,sha256=02oKSUXFYbGZSaa_mQKNQOa22_fXsHSVWXzbksoCvl8,4078
+pydicom/data/palettes/hotiron.dcm,sha256=8BfbYxPOkLPwI4bIKz-rCj6J4EmIIh529poz8boIQ3g,4754
+pydicom/data/palettes/hotmetalblue.dcm,sha256=dQMYgjybVe4HFVCvvUO3QMVqgNf7bL4Q58SsXyvbk_I,4776
+pydicom/data/palettes/pet.dcm,sha256=tAEivc9JpTmkzFFfJxQccEbdXG8Eh73e5Q27LqVDCgM,4732
+pydicom/data/palettes/pet20step.dcm,sha256=OkVr26P0pfUH4b55WxSdVMAVlJ7P38yN97FVdR4mAe4,4790
+pydicom/data/palettes/spring.dcm,sha256=FQPqx9UektgbHpX-tyEe_enonc9X-IupefsuNKEATLA,4104
+pydicom/data/palettes/summer.dcm,sha256=ib4swc-T8aGNLuedM972FlTcvGyB0te2_NdUjPD-0eM,4100
+pydicom/data/palettes/winter.dcm,sha256=MES-2NHAxIHFwkbDgsJXAj-dEtzUZs6uXYYjRf3NrF4,4112
+pydicom/data/retry.py,sha256=uoH6mTCQcFUQJuUQ2y96upL_h2RVZj62rQExHq_mJzk,2057
+pydicom/data/test_files/693_J2KI.dcm,sha256=jV1QP9RrmlnGKHYtcdc5HqGipf2NM5rILvnigaFe9l8,3590
+pydicom/data/test_files/CT_small.dcm,sha256=PdMeXMg1s_LN1GydoZgvWSUeeFGP76gWPZFGMcZkN9Y,39206
+pydicom/data/test_files/ExplVR_BigEnd.dcm,sha256=Quth6lZQ8QZOUtSAGc2HsRjlLPTfvI-ldCftLtTANuo,15412
+pydicom/data/test_files/ExplVR_BigEndNoMeta.dcm,sha256=pWvoyMUvDRz1XXws7Wq8byK3mcv1VrhLhWBVp6dWWUk,434
+pydicom/data/test_files/ExplVR_LitEndNoMeta.dcm,sha256=AI6TApddNImdibTj8ET4Y3sWrN4lJC6ws2of_ANLm0I,434
+pydicom/data/test_files/GDCMJ2K_TextGBR.dcm,sha256=afX7npLlegsq4XbIkegJwzSdWvLXH1VTIxhH5jQ0Ods,30706
+pydicom/data/test_files/J2K_pixelrep_mismatch.dcm,sha256=LfksUj02Y55NiPiS9H5PZhbEirAXpEWmJB_-OLLQe78,138518
+pydicom/data/test_files/JPEG-lossy.dcm,sha256=xCVgji_NqDMsddM_iQv-O64ycAYItxkEaz2eeJN0wpI,9844
+pydicom/data/test_files/JPEG2000-embedded-sequence-delimiter.dcm,sha256=sf2TAdnQy-A-41hDscGS0EDu59xDv5e7Hpa6OtYC2H8,3308
+pydicom/data/test_files/JPEG2000.dcm,sha256=W-U5Ak5oAwKae3PA-OcuiNAy46C8BZIsDARzRHgKqOE,3308
+pydicom/data/test_files/JPGExtended.dcm,sha256=E9IXYX_K3CLAaex7GOlHMdNGxdg3A7wVL2kod8q1CF8,9844
+pydicom/data/test_files/MR_small.dcm,sha256=PyfRwi8aZugNe7fJEehhD9C7cDJadnRqetscDd788rs,9830
+pydicom/data/test_files/MR_small_RLE.dcm,sha256=Lly2CHjcCsxJQpjM2tKPzizxTFEJbl2M7atAJI6gLmw,7790
+pydicom/data/test_files/MR_small_bigendian.dcm,sha256=PkyMn-cN5PO-FJu9Zz-lbyEcjo4v-brGP3D53DG10Qg,9708
+pydicom/data/test_files/MR_small_expb.dcm,sha256=izhGdx4du0s22vPqu9MxCQpHNdKTAXRUBFjFHsCAius,9846
+pydicom/data/test_files/MR_small_implicit.dcm,sha256=YHdELEKlb8f8x9uEEaZX3e2fwQnm0ydXZcTeNYKSspk,9702
+pydicom/data/test_files/MR_small_jp2klossless.dcm,sha256=TABJ4DVbVgyMhGU42Cevva5TEbIPxeWpOjiS4Qm7FA0,6008
+pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm,sha256=srad0q6FS_ffraZ0VwnNXYpFc-oSOHrbvcVui-YFYgY,6124
+pydicom/data/test_files/MR_small_padded.dcm,sha256=tG4y2EMPHobn_AO5VC4G_8QKWRiQo6zGRMMB1qLw5X8,9958
+pydicom/data/test_files/MR_truncated.dcm,sha256=o_JsJ53SFJUdMqFUg2LfPJP5cwE1-ok6AVUsDmMvWH8,9630
+pydicom/data/test_files/README.txt,sha256=Ga-n5Gsa_1F0lhbz8NBpGUgAdkW_CBpwjvcEbe1E6Q4,15389
+pydicom/data/test_files/SC_jpeg_no_color_transform.dcm,sha256=DJptn-pOS-8i2u3Tqxv7q-vuwYwylsfgyOw_ap9CR0s,4316
+pydicom/data/test_files/SC_jpeg_no_color_transform_2.dcm,sha256=F_GmgHA9htwzft7fRBIVcWOwQapvn1W3fHFdG09JQ0Q,4310
+pydicom/data/test_files/SC_rgb_dcmtk_+eb+cr.dcm,sha256=0WCStSbkYyiJehjLCtxcWCu_6VPW3LLRK7knDTmPbEE,3626
+pydicom/data/test_files/SC_rgb_dcmtk_+eb+cy+n1.dcm,sha256=rt_n4xSA51u2pkz5a2oQSKiyTpE1lUhy-6Sii2CUGmA,3136
+pydicom/data/test_files/SC_rgb_dcmtk_+eb+cy+n2.dcm,sha256=1aHXElfidjxjNxq3Ssrs51lpaFAHeoi89F6X4KhPcGU,3090
+pydicom/data/test_files/SC_rgb_dcmtk_+eb+cy+np.dcm,sha256=VGfC6X78rmtGL1n88dYm9PGsmcfqg8JZbObnqvIIOsY,3140
+pydicom/data/test_files/SC_rgb_dcmtk_+eb+cy+s2.dcm,sha256=RNgNpxrjvmBI7XMzobb0wQmL4jEMpJgfaHDw_tL-dO0,3094
+pydicom/data/test_files/SC_rgb_dcmtk_+eb+cy+s4.dcm,sha256=8u1AEoCebTK7VSY4IhtECAiyY9mPUVFeaBa61_zZ_Xs,3420
+pydicom/data/test_files/SC_rgb_gdcm_KY.dcm,sha256=vQ_dl4QUMd2OOnakhCz203goUlSic_eITAp6jduWItg,2998
+pydicom/data/test_files/SC_rgb_jpeg.dcm,sha256=ho7HqHgnhE9mvl_SXAHcdtOofdcraeBsRlHCiqya3l8,4464
+pydicom/data/test_files/SC_rgb_jpeg_app14_dcmd.dcm,sha256=F_GmgHA9htwzft7fRBIVcWOwQapvn1W3fHFdG09JQ0Q,4310
+pydicom/data/test_files/SC_rgb_jpeg_dcmd.dcm,sha256=HSK117x5be3HhiT3JBIa_Xdz9wkgnuFqctWJavwh1HU,197506
+pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm,sha256=ZUikWggAYmz3Cll2YUb_O3kKOT7gyfyjWfkscPNws4I,3424
+pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm,sha256=pJLtShIMUaB2EmpgIejKsaywFy2j1CxihDsqNKjd0lI,5204
+pydicom/data/test_files/SC_rgb_jpeg_lossy_gdcm.dcm,sha256=-58affvKGNOvZm70qNFeLYHnV0Wy4RjjxZD_kN4YjQs,5042
+pydicom/data/test_files/SC_rgb_rle.dcm,sha256=P5juNS51sQzNbSecowsMseNjoMnd4xiAPw7GYBETJ9Y,2006
+pydicom/data/test_files/SC_rgb_rle_16bit.dcm,sha256=hlBL6kqM6jbvT2Xmh0lc6oAOcBJUajICp5RLTwazdrU,2606
+pydicom/data/test_files/SC_rgb_rle_16bit_2frame.dcm,sha256=1xOQoK2cZgFycdTcFsOkX8KtSAilqO_fW63wEnTc9gI,3896
+pydicom/data/test_files/SC_rgb_rle_2frame.dcm,sha256=zJzQmKsJm196GMRZnyhY0vPzRxWQ_4oU1M98g0aS2fA,2696
+pydicom/data/test_files/SC_rgb_rle_32bit.dcm,sha256=IixwYL4Wl6JOZlm9l-8X_pyFlu5Bu6Kol2iFTuwA44M,3760
+pydicom/data/test_files/SC_rgb_rle_32bit_2frame.dcm,sha256=XJqmBpgurbHYqkRfvocxitL6mEYV4U2cLEa9RJfN8sM,6246
+pydicom/data/test_files/SC_rgb_small_odd.dcm,sha256=Sso2GrMw9X9g5rHjsx3Ng0pRK-6KQka74dFRARxH4DE,1444
+pydicom/data/test_files/SC_rgb_small_odd_jpeg.dcm,sha256=_7UhnKRaK0ks5eWm_HpfWtWmZ3FqLluFnroyM3at9Dk,2044
+pydicom/data/test_files/SC_ybr_full_422_uncompressed.dcm,sha256=CPb0k1riJSgthIHyl9N7HPM76MPZkCjzEKmj-eiq8oQ,21686
+pydicom/data/test_files/UN_sequence.dcm,sha256=CfAz4NQKGMSt4LCLjTGKtt62hKhPUD1OxPAJCIg91DE,674
+pydicom/data/test_files/badVR.dcm,sha256=0SWD3wz2YUYlYji0ytomaVajF8BUbHUWyzb1aYLOhbI,7618
+pydicom/data/test_files/dicomdirtests/77654033/CR1/6154,sha256=qk6ezZr2UuXhfkRSUn8Hm44Fx3bFgLgY3AmQgnE791A,2300
+pydicom/data/test_files/dicomdirtests/77654033/CR2/6247,sha256=6u-Vi708OCmPNJMM8AE8D1B5zAUty-gu8m8U-LETumk,2298
+pydicom/data/test_files/dicomdirtests/77654033/CR3/6278,sha256=wER_c9jA1ytYqDthkssPgfjPgMw6FXHQL_j5cFWc2aw,2298
+pydicom/data/test_files/dicomdirtests/77654033/CT2/17106,sha256=Z433IEEeht9nAxwo4WGSzTHiBio_CQ1le58WzobbPx0,3810
+pydicom/data/test_files/dicomdirtests/77654033/CT2/17136,sha256=Wy0HNL_Th-k66nXPXNtzpWcM9ZFJN95jb_ae9dxyBXM,3812
+pydicom/data/test_files/dicomdirtests/77654033/CT2/17166,sha256=BJsiHJsu6aS92EPd31Ydzh25rRkk8bg9h7w8bnTMmrc,3812
+pydicom/data/test_files/dicomdirtests/77654033/CT2/17196,sha256=5HluUUgKjHPtcXwzANc4TsktY-YyLt0IDTNmWrNzZfA,3812
+pydicom/data/test_files/dicomdirtests/98892001/CT2N/6293,sha256=3ilw2gWJypSPuoY78Ok_TBihaVvT7C_o-nOQW1OsXmc,3920
+pydicom/data/test_files/dicomdirtests/98892001/CT2N/6924,sha256=HcNAKOB3B3UNHBncF7lJf0RZFYCCfMet8rjGefX587E,3908
+pydicom/data/test_files/dicomdirtests/98892001/CT5N/2062,sha256=J_gSZIVjSoF_CUHMcnCU6M-ssYNB-8MX0XX_NzgEEYs,3936
+pydicom/data/test_files/dicomdirtests/98892001/CT5N/2392,sha256=ArCvIO6ulfbUJVTFOmMLq6pe0lGWVyCBN9PHQYtqy4E,3936
+pydicom/data/test_files/dicomdirtests/98892001/CT5N/2693,sha256=YCD9_9UTAX57LV3FJDWyu_JyCaq-4YOjAgehSdzOyy4,3936
+pydicom/data/test_files/dicomdirtests/98892001/CT5N/3023,sha256=T39PKzx5rN1eD8I_3Z9rrm5tqdzPrdH5FHt7_1N1ZlM,3936
+pydicom/data/test_files/dicomdirtests/98892001/CT5N/3353,sha256=XqpmIRi42HYBoUIxiTy37PFzekNMlRIM-0B2z85mexg,3938
+pydicom/data/test_files/dicomdirtests/98892003/MR1/15820,sha256=YZaYwKFfmWPQxNJx9tEa5oWoPsHrfQZUehbkirvHCCE,2336
+pydicom/data/test_files/dicomdirtests/98892003/MR1/4919,sha256=Gg_C7GF2I67sz0SSvGBazk77M8685_5N1UvORyschjU,2336
+pydicom/data/test_files/dicomdirtests/98892003/MR1/5641,sha256=-4CehnrpihyZXUHw1Fj7eqLPEXuLczFVm9ATRlPJhOg,2330
+pydicom/data/test_files/dicomdirtests/98892003/MR2/15970,sha256=ngpnSCqWqkuGS-tcuAUUHoD1ZMsxaRuAsoBLmsp5Lvk,2336
+pydicom/data/test_files/dicomdirtests/98892003/MR2/4950,sha256=e0pPqPFPVBYdpisxN2tse_JyKzvidNcSf3fGAh2EYeo,2356
+pydicom/data/test_files/dicomdirtests/98892003/MR2/4981,sha256=AURSQGtFTnejN4gbql7SFsz0FLZ7yVc4-HzXSSGAFLU,2354
+pydicom/data/test_files/dicomdirtests/98892003/MR2/5011,sha256=tKQP10aHO5-mMIQSzF59u4HHNzgTLBhHxFDHzRwcf3w,2354
+pydicom/data/test_files/dicomdirtests/98892003/MR2/6273,sha256=ivSQvSlna_ARs7PO-Mg8uRzSjpJ_wrOxCf0r-OzZRRA,2348
+pydicom/data/test_files/dicomdirtests/98892003/MR2/6605,sha256=Td1cP4kBvZYNICRyqzG8iwQ5St8FVkYe0O2tc-4S98Q,2348
+pydicom/data/test_files/dicomdirtests/98892003/MR2/6935,sha256=SpQ4pOYwsAQ2e2Ku-tmwYKOx9yotZvSOkRYR4BWOwnE,2350
+pydicom/data/test_files/dicomdirtests/98892003/MR700/4467,sha256=MYE4LWCI9R6Oce6LqmiVEd_wC58OZ5k64fr98oIBH7U,2350
+pydicom/data/test_files/dicomdirtests/98892003/MR700/4528,sha256=Y3SlmnGZlmkJHvITE8xUoRWGgHb2w2aX9t3y-Aj4KYE,2348
+pydicom/data/test_files/dicomdirtests/98892003/MR700/4558,sha256=9m1WKSK5GMkTE-YVyLTtG1-Va_Ea7Cch-1Su-ZFf_60,2348
+pydicom/data/test_files/dicomdirtests/98892003/MR700/4588,sha256=H650bBIYzIx8KxQ0QUcEjXtGMbY2MrB2CO7Dk-Vo-sA,2350
+pydicom/data/test_files/dicomdirtests/98892003/MR700/4618,sha256=gy1CsHNhkfxSrjygg4hJwG5EVmEdhLGcTJLj4nGwQIY,2350
+pydicom/data/test_files/dicomdirtests/98892003/MR700/4648,sha256=8BkImUJFXR8xahHQycRUyErcHAQYR9O5_z9nCyHlr_8,2350
+pydicom/data/test_files/dicomdirtests/98892003/MR700/4678,sha256=N0nWXRQiMYXDEFhJWI-YrSqWKqsbFCSIsg59RR2oXuY,2350
+pydicom/data/test_files/dicomdirtests/DICOMDIR,sha256=ub9jG7IPknYRi6-rCUKRuuNyG8zSW_S-8YR0quXWBJg,11116
+pydicom/data/test_files/dicomdirtests/DICOMDIR-bigEnd,sha256=AZcSW2sv7XtE0c_EYHOI3dxwulS0snimcvY9gxPy2Dw,11116
+pydicom/data/test_files/dicomdirtests/DICOMDIR-empty.dcm,sha256=BQMRb80U1nDu7AN_KI1S0oxqHwyUrmzdSsbYDbcjNhM,396
+pydicom/data/test_files/dicomdirtests/DICOMDIR-implicit,sha256=ACO8IkLM0Sf93cOZ-QcQlkNmC2xMLXH9X1YTzmWtKpQ,11110
+pydicom/data/test_files/dicomdirtests/DICOMDIR-nooffset,sha256=KlUFZW0UrKKagZCDsYO3nqYD3iCqhasAU0-Ps5-4g3c,11092
+pydicom/data/test_files/dicomdirtests/DICOMDIR-nopatient,sha256=NaWZFjzvVHADMOXcIxvRBdLRXVncfmXXOoaDqKevkLQ,11116
+pydicom/data/test_files/dicomdirtests/DICOMDIR-reordered,sha256=aoV_z4TsxSefVhagM52ZOcYi_RTQV-__LAdH5x3pPHs,11116
+pydicom/data/test_files/dicomdirtests/README.txt,sha256=q74ZyKoE_HZ9cuK1mUizg-tgL-ZEDnQ5fp5KkxLFzP0,719
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/DICOMDIR,sha256=YgEFEzvPoTlneB3T5pzqYaGlp7JBFjYKHVDoBBVLvFQ,13066
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000000,sha256=zPLwJruec5wzx3X6Er38B5y_ydJMRZftXey4eXlD21M,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000001,sha256=JHbRsOrg58qAVRV7TM3EoJLrJaz_G0LhvwgaOVHIQno,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000002,sha256=1Ne1Vk_UlSosSeI65mlBxC7eg5hSeciVrKz7jx-aFP8,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000003,sha256=L5aEE7d39gfbpcvyIRkGD3H8m-FhOK40Z59rbAuFx74,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000004,sha256=J32Abshoc_exA8UVXhBtpUmoevgaIpmI7nRN54Dr_8k,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000005,sha256=IAKkOSkMWJCladCGR3hMCEQeKQhBQ_pKrgcDPLdJ9Ls,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000006,sha256=XhyWmb4G_CHGSy6VQFnS9p1J5WN_GcQLnyBOG8WB5jw,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000007,sha256=Ogo3CMlFrhZ9hzO7UJqBraihDhcH_Qh5hhReYNmGWuE,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000008,sha256=9LH6UnTXJ3hQOuO7N9w6i8m_DRHyToLMOGTzypieWZI,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000009,sha256=ItYa8aAC6H1CWmVHlxTkoxRMYnDndnteeLCzvvlPCrI,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000A,sha256=UcB4Le4pOTayuoKSYlIHV9spAeGlqpqyb9urKAKzFsc,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000B,sha256=e2YxdBJ4lR9WssIS6iNURrt9rKJM6V7Ualdg7MiQc_I,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000C,sha256=knJlF4zJacMyOpjIqUE6wdbAxQcTEqkweyg8fsArvwc,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000D,sha256=n2wISMr5iugObCndS7DSvh605pdKdUfuQnJhi8yQmGA,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000E,sha256=mw7wiXtegS3LzPS6HRi75aYbpepQrrCl-Tpc3nBSrDw,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000F,sha256=nOScB7GcUb3kVlFrK90mLINL4adLUuG1YRyUnTOlPnc,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000G,sha256=56Z8X6WShc3QD_8w9E0OHY1t0hSvWtkcReUvxzBoDfs,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000H,sha256=BzHEkOGKZhEaNE7KUtd5VBaxiRuugdeAIuRLU2h-R90,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000I,sha256=3kSaH1V8lsNHkdZBtxn7SVnyY2GrY6nwbGE8dthFPmY,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000J,sha256=Le8qsclEmkq0Ztgwo36D-BkTr-IO_2llgRyQZk2cL80,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000K,sha256=AtAGTeuIl0B847441qSV17VagYTAxqja80HYkpQLV08,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000L,sha256=juRJmCVg_C6w9jFeY-e7vNbLVnwduG9QqvqBlppB4rs,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000M,sha256=Efk5nWmK6HimLeI45OtAK-0ljQdKCYelHGvvJCIOMXM,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000N,sha256=NXL_mHjfjQiNz_ggl7eCs2LM0KvijAUCZyPuT8lwrOs,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000O,sha256=9RL9NK6yoHK4UcBVeoVmk__QmEc3KA0NtPSY9Jye6RA,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000P,sha256=CQV-tDTBaVAWQJAUGe9kG9wC7IvskO3TEjB3CWDEddk,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000Q,sha256=kluBuCiPHjvlUJDpHD55LmHUD-RUeiC9MWGg0H_NEPM,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000R,sha256=6Mv83BA3v3cUqHhDuVHRkCu2oyHUjcCO-XHhroal9ds,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000S,sha256=mM-YX7yMymIlCKvtmlJKyZp7zCyWjZywsPYZQ33_N64,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000T,sha256=YxcfWM19WSAQLs09ZqcDOX2YerpHiDTRjnlzvaaezUE,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000U,sha256=EPNmSb9Lk4vBZYy7ooPpAn7H5F5s1nChhEMpEQ5VJ3o,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000V,sha256=2cau16C-ucneAxEiNt3Ba2HYGUkT6jBMbEBJsFpC76M,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000W,sha256=paquQBoYcer7aGmyfYyCvbiCwTWX2wT0wxMOxSV-qxg,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000X,sha256=BjpxPFxcVb6NRudHBUlmcow37Nzr-rcNVhDZf1-7FW4,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000Y,sha256=pq-O934Ub19S-VKg69fR9Xa9ueijTthpzJGZceypx-4,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00000Z,sha256=3Eiid2Y9hYfM3eyY13h1M9FqUpZI0FjBd-uoHv-kf5o,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000010,sha256=RCHpAHlzhX7_U_1VEQqxTtJmTcutjmoJ7afVo2lnBg0,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000011,sha256=NxwurTwH3uai_0qp3P5BHb2nuNmxsFOl1iB0sQNeuM0,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000012,sha256=9GF8dH0OpVyT2gpkO9GCQFl1IQqreb2xU0yN-k13NRc,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000013,sha256=rnexSgeI45ojWdzzeQpJYDdZGcouyXVfpTnqpqZ_MiI,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000014,sha256=Gq0cFJaNLtBUrRs4nQD7Q00MecIuH9Y32kSHRxKAhUM,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000015,sha256=ztMXY_JAUbq1eIZSg1ACHKolNUEV5Ei-rJ6g5NGaJ70,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000016,sha256=Wli7OFPlW8DE0mKeDOSqFBhomVCz1a24RpY4O-Uz3KM,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000017,sha256=UWC_9cDcdzx5fUHzfP_w0lyzJiND4vIxAe1L2bjeupw,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000018,sha256=LXSLFPSlsQi_2gx9joVtgdbfC91quhIgmfCmkcFF7jI,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM000019,sha256=kRIYk2aIiH4BykkV8PMQmboMIl1RQBjLw8u8ZWwJoQY,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00001A,sha256=sz316xvWmwo_EsQZk3DDtq8BL2HytEbHH2o5PnXOHZ4,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00001B,sha256=b_HpWbjrvoUfweoEGL2IfbIE2x4gpPhOrTQpGgq0wI0,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00001C,sha256=JpKlrbGCiEydAByu6H2jfu-uNrWEOWP1u59EKLcmXJg,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/PT000000/ST000000/SE000000/IM00001D,sha256=bnhThxOiA0BsVCG3OOt_pWftWnr7vXdJ-QDydoL9Q5Q,740
+pydicom/data/test_files/dicomdirtests/TINY_ALPHA/README,sha256=x0ALEevpqr4zTykO4sr5MNXb2tx0EOEaAKP4PPBc70w,1206
+pydicom/data/test_files/empty_charset_LEI.dcm,sha256=f9IIKnbpqXyxMG8do4m6_jLsL4dCYqnGx4t8R1rP-00,276
+pydicom/data/test_files/image_dfl.dcm,sha256=ACnru6F-fG8IFAjUM80otdHP7g7rTP9Qm02XL_qdryc,4637
+pydicom/data/test_files/liver_1frame.dcm,sha256=isNUYYXQwYwZNDi0exbE7zI_Dr4Oj9Bx7h5tQ-3vGXg,37084
+pydicom/data/test_files/liver_expb_1frame.dcm,sha256=JCkljewPnERLadnXMmtEK9J8ZqK6HW9ogEAF0n32rxM,36532
+pydicom/data/test_files/meta_missing_tsyntax.dcm,sha256=B13FFIOVG7ZR3a_XfoAxAfzuIatlbKh9blMerq60QPI,317
+pydicom/data/test_files/nested_priv_SQ.dcm,sha256=XMaUlkwQywK1AfAlhZCaf070ZWwHqJ5gPRgzYpkZxcY,343
+pydicom/data/test_files/no_meta.dcm,sha256=UpErmVD0V6x2GO-q0M3ZG1I1Tgf7wlq-6JW9hr7r-bw,38871
+pydicom/data/test_files/no_meta_group_length.dcm,sha256=dsavgrQkYoX25bU_FQr0S5t-Kv564l4bYExUm6PRZzM,408
+pydicom/data/test_files/priv_SQ.dcm,sha256=Al6nkc2WW3l3o_E5TWS9NhfnPbx5ZifJib3LsGQScKI,546
+pydicom/data/test_files/reportsi.dcm,sha256=WcpfT79SS9VCqQf48pAovlEOnZByOdvi8cgv_FCIU4s,2968
+pydicom/data/test_files/reportsi_with_empty_number_tags.dcm,sha256=_YqKXcPqoFPQiXSYH3DOSDf0oWdiiLnwWdZg98RwGpI,2700
+pydicom/data/test_files/rtdose.dcm,sha256=HWzAkhRtCT4IamvMzvTrt9CXlBND9c07Y5XRV7ZON-Q,7568
+pydicom/data/test_files/rtdose_1frame.dcm,sha256=ZoUnPhZhVi842-KxxihLnJUMfbzwgKccFDBdYj0LYJA,1958
+pydicom/data/test_files/rtdose_expb.dcm,sha256=_kDuftDNY9Hna1G0LU5ot2S9X4qa1Zzp-rlIcVjFULg,7618
+pydicom/data/test_files/rtdose_expb_1frame.dcm,sha256=qWz9PI1Mpw8qjRuFvCK0fXu9YJ19Ieov6CExSgzTZ0Y,2008
+pydicom/data/test_files/rtdose_rle.dcm,sha256=L4Pjou8N41VXDDiGCyM_wvpsN2Jsga0IDYZhwDpBNSI,6816
+pydicom/data/test_files/rtdose_rle_1frame.dcm,sha256=9Oejt66zhsoaLXRgsMSXcdkj9v2Ku6CMHUx7xcJc_Cc,2122
+pydicom/data/test_files/rtplan.dcm,sha256=GFhdu9b3xdG350nWl21yJRgCrYnWW8zTHAMAb5WquJs,2672
+pydicom/data/test_files/rtplan.dump,sha256=Tl-mZrdz24m7EsBKZ3I98ru_4_Mg4tGyxU4eC7_zcvA,14474
+pydicom/data/test_files/rtplan_truncated.dcm,sha256=FQCex3E9xTuVrf1OGmkohSQN3TSg8Y9SwDJ6BcrL_VM,2129
+pydicom/data/test_files/rtstruct.dcm,sha256=QMQb34cf2FUzlrAkdqZgJO0jwEknwNxT_RDs00cs0NM,2534
+pydicom/data/test_files/rtstruct.dump,sha256=y8DRC1oY93nYBzKIs8KQsUSGn2jJZ9JY24uskAgpENE,7935
+pydicom/data/test_files/test-SR.dcm,sha256=7r8Ao36XUDtaZQIvnC-J226NrEzGMmgqo0Vq7htsF34,6796
+pydicom/data/test_files/test1.json,sha256=vdM-wJdbfVQGnp9w5-0MJDOHJm30Mu4zm1kyHW3y22A,20965
+pydicom/data/test_files/test_PN.json,sha256=H0lJhdcqv1N2S5xxictHuLMywvSCUBvOI35PgdSKkUE,1841
+pydicom/data/test_files/waveform_ecg.dcm,sha256=cvHLDmXoAjMhrNqlQlxEElzVB_WqoUj3_hBRbh0uaIo,291088
+pydicom/data/test_files/zipMR.gz,sha256=dSos0EgKuUcBnF1ejSnKKXpoBmgcmyHVoddNgfIV7es,6958
+pydicom/data/urls.json,sha256=LOcuTSvmtBTmHEekUpfLVVAGXPwoqPet4iCBWbnrfaE,9463
+pydicom/datadict.py,sha256=dwa9wZrn3-R53pfYA_g1ZnUnFPT_mrI-TNyAbbi8uOw,18384
+pydicom/dataelem.py,sha256=K9wiMZ12VkQHRLfTC2KqlPccHNrokgDmJKbrBvPzjGU,31095
+pydicom/dataset.py,sha256=IO_ZWaU_f6Wp60M2nA1YqYv_tPeUHf0SsXaz-V_Soyk,106270
+pydicom/dicomdir.py,sha256=Qmju1GVDtbzzAx_obzSQcyKooxfHe0oAh-IvKKq7Q-Y,5676
+pydicom/dicomio.py,sha256=fdg4bwY9XN4RVxsECLbmCbk2Pk2l4BzkORVMnQLHel4,403
+pydicom/encaps.py,sha256=hcMoi0owHZ-eE5yRzj33ua8V--iUxQbnNG2_A5VSkUk,26824
+pydicom/encoders/__init__.py,sha256=s2MrW4WAb8U_8FBI7FRXV0hsEm2cP8LMv_bBh8V4NMI,66
+pydicom/encoders/__pycache__/__init__.cpython-38.pyc,,
+pydicom/encoders/__pycache__/base.cpython-38.pyc,,
+pydicom/encoders/__pycache__/gdcm.cpython-38.pyc,,
+pydicom/encoders/__pycache__/native.cpython-38.pyc,,
+pydicom/encoders/__pycache__/pylibjpeg.cpython-38.pyc,,
+pydicom/encoders/base.py,sha256=uZCVh51rSOVYfKSmLsH41wtsNngNqavIFDMJkQG1lvQ,36135
+pydicom/encoders/gdcm.py,sha256=EmREte0s_PLdic2VJxhVHK44Gm3ikuknPh40nheqL48,5901
+pydicom/encoders/native.py,sha256=ypHlHvYhE8OXCPE1IncItSAG5YlRdsbFDmDJ-QpmxW0,5374
+pydicom/encoders/pylibjpeg.py,sha256=BygTU3hqNbr8KMjOmTzwdbpupfTWoFZ1FmRPB_-0dWc,1125
+pydicom/env_info.py,sha256=9ar42-m4f5YZ8xnalvMX7FKcKXdPz0sLUJ7J54MlUj0,1714
+pydicom/errors.py,sha256=a95wGR7yEzYS7swRAc7NHZm_Ttxsu6Ft_Yr88NryqTk,771
+pydicom/filebase.py,sha256=C2azQGrbMEH-foxUqHB9mpKbtIc69S5mY-p3It8aEDU,7794
+pydicom/filereader.py,sha256=T0lQ_vqGpBgKQBtwluiqvaNt3Y2gdIubLTZBuyufTqQ,44269
+pydicom/fileset.py,sha256=DpkyQBxDbsnPbnJwB5NcVnXjMun4nuozFx9BsSjDuNM,102357
+pydicom/fileutil.py,sha256=mDbr0HFOeoQEeOi6CyalvvtLDBzf41-mP5yX-Tjd5fE,14625
+pydicom/filewriter.py,sha256=LDQHtDPW4Htef8IpASHQpkQBvzEm_stO3nXlSWNT-YY,44257
+pydicom/jsonrep.py,sha256=1znLXxMnvjX1XEKFEwrW0dF0_s-IeA2fzCwQrIAU88g,12252
+pydicom/misc.py,sha256=un7Sim6bEJ8tgMF0KAgomDYycQQoangg5QBxYK_drmo,1644
+pydicom/multival.py,sha256=EJ8g9E2l11bN1Z4qRqF4R01vYmYzL8Nt6NaiZXgBygI,5096
+pydicom/overlay_data_handlers/__init__.py,sha256=wLwH6ICid72V8ndmTuUUZJoY4C9iv_Vl1XQss86fNCo,108
+pydicom/overlay_data_handlers/__pycache__/__init__.cpython-38.pyc,,
+pydicom/overlays/__init__.py,sha256=mNmGZN8uGz417oSmHkFSw8fnVdCFR3syY3ZFCH8Dxyo,61
+pydicom/overlays/__pycache__/__init__.cpython-38.pyc,,
+pydicom/overlays/__pycache__/numpy_handler.cpython-38.pyc,,
+pydicom/overlays/numpy_handler.py,sha256=dBZ5N0ey5VOpuzr-fgqAJUSrA_ypbp_C5zvruD-nToQ,9951
+pydicom/pixel_data_handlers/__init__.py,sha256=Pw3m-6eFTnBB9yw0nRa16Nk_qT7RDR-22Spi-eWUDis,262
+pydicom/pixel_data_handlers/__pycache__/__init__.cpython-38.pyc,,
+pydicom/pixel_data_handlers/__pycache__/gdcm_handler.cpython-38.pyc,,
+pydicom/pixel_data_handlers/__pycache__/jpeg_ls_handler.cpython-38.pyc,,
+pydicom/pixel_data_handlers/__pycache__/numpy_handler.cpython-38.pyc,,
+pydicom/pixel_data_handlers/__pycache__/pillow_handler.cpython-38.pyc,,
+pydicom/pixel_data_handlers/__pycache__/pylibjpeg_handler.cpython-38.pyc,,
+pydicom/pixel_data_handlers/__pycache__/rle_handler.cpython-38.pyc,,
+pydicom/pixel_data_handlers/__pycache__/util.cpython-38.pyc,,
+pydicom/pixel_data_handlers/gdcm_handler.py,sha256=kWe91nFDFFBEpiXE3qoJ5yrh-yoEukKGY9dTLMoeHxA,9761
+pydicom/pixel_data_handlers/jpeg_ls_handler.py,sha256=1MKBotm-X70E9YXqUaw99ib2w-g143RViXVjjOL_1Oo,3404
+pydicom/pixel_data_handlers/numpy_handler.py,sha256=bsH3Il6fBP6iJkNRnrdymaPIHdtIQXdvSOIthBW6s2o,11645
+pydicom/pixel_data_handlers/pillow_handler.py,sha256=iF-cj7Rx6SuwgTz1jXABUySqf6jB82Xb30sppHZcmT8,8695
+pydicom/pixel_data_handlers/pylibjpeg_handler.py,sha256=QmB8Ugi0hG92jPQbyPYkHheJESFqH1MjCer7HBZ_T2k,11053
+pydicom/pixel_data_handlers/rle_handler.py,sha256=pMNW5Cf0HyXvM6jq-PaPmm708-UKDIXTkOTut-q-ZG8,16581
+pydicom/pixel_data_handlers/util.py,sha256=cqi6-YF5KhOggPH-OOXuamLSuhD_jZD8O9I2JfmjGkQ,54704
+pydicom/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pydicom/sequence.py,sha256=qALmYmOL7mzqWMIPOdE_JAZv2jt18n5fs4mm95GeusI,5980
+pydicom/sr/__init__.py,sha256=z6OpIwX12LlTOD2fk_avCkpT9Fcn0-AnZCoBNi5AykI,38
+pydicom/sr/__pycache__/__init__.cpython-38.pyc,,
+pydicom/sr/__pycache__/_cid_dict.cpython-38.pyc,,
+pydicom/sr/__pycache__/_concepts_dict.cpython-38.pyc,,
+pydicom/sr/__pycache__/_snomed_dict.cpython-38.pyc,,
+pydicom/sr/__pycache__/codedict.cpython-38.pyc,,
+pydicom/sr/__pycache__/coding.cpython-38.pyc,,
+pydicom/sr/_cid_dict.py,sha256=irBc5MWaCuNzgEx-6LFHJidLcmKuTkQHDYyTJe6YbhU,788696
+pydicom/sr/_concepts_dict.py,sha256=NunKry35rvgjHijaWM5mGEywA7n0b_8X80Th7PwmaqY,2725654
+pydicom/sr/_snomed_dict.py,sha256=-9zmakqUYLFYNlQM3FVpTHDtpTlNGO6VfLF3rXeVT6c,424514
+pydicom/sr/codedict.py,sha256=JWgCduobhCXiC4bZhTug8zsqrK7CAEiXzpp65n0eHOY,11035
+pydicom/sr/coding.py,sha256=xteL3hz_2WhenfZ7YU1dRvpfEduXVyk-N0DfeFoUT-U,2026
+pydicom/tag.py,sha256=XKz_m-jEcfWwaDMlvc6saRCtByFXLRe7mTRj02Uphfo,8083
+pydicom/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pydicom/tests/__pycache__/__init__.cpython-38.pyc,,
+pydicom/tests/__pycache__/_write_stds.cpython-38.pyc,,
+pydicom/tests/__pycache__/conftest.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_JPEG_LS_transfer_syntax.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_charset.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_cli.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_codes.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_config.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_data_manager.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_dataelem.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_dataset.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_dicomdir.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_dictionary.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_encaps.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_encoders.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_encoders_gdcm.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_encoders_pydicom.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_env_info.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_errors.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_filebase.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_filereader.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_fileset.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_fileutil.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_filewriter.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_gdcm_pixel_data.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_handler_util.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_jpeg_ls_pixel_data.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_json.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_misc.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_multival.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_numpy_pixel_data.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_overlay_np.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_pillow_pixel_data.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_pylibjpeg.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_rawread.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_rle_pixel_data.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_sequence.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_tag.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_uid.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_unicode.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_util.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_valuerep.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_values.cpython-38.pyc,,
+pydicom/tests/__pycache__/test_waveform.cpython-38.pyc,,
+pydicom/tests/_write_stds.py,sha256=3VHj9g6NJdmBGQLm7C-x9NrF1i9D0BCTOLSA7QxPx30,3006
+pydicom/tests/conftest.py,sha256=vnKkkAdFUB7Yd7RssZe8siWutdK4jQa7lZIuPkxD6Q4,2548
+pydicom/tests/test_JPEG_LS_transfer_syntax.py,sha256=ZQAZJrRpfUJHFFaFW_ERosRGfJmvBrJimLkVUXtcT5c,7024
+pydicom/tests/test_charset.py,sha256=UeX2601G11VuxTJg1HnZn1cu1wjhOa3J5hie9DDv5Xw,23452
+pydicom/tests/test_cli.py,sha256=uXBAEC1KVOViUQKUTIOhIhjRr_PiSSVxoJfS4z1qmYQ,7264
+pydicom/tests/test_codes.py,sha256=7sCMMefkN3KxFVexfZpex3U3bqdXCESiWiVc8XUROd0,12639
+pydicom/tests/test_config.py,sha256=mQqXhXiOE9ZcfUt3wN0pBApenJ3FpC_BIQa8QHvMmWM,7289
+pydicom/tests/test_data_manager.py,sha256=JrAuuj5_ibu4yNZvQY8K-xK-EEaiC4ZOPyMweer9yTA,11331
+pydicom/tests/test_dataelem.py,sha256=P_f0KyaIOE36LqwmqyGeergN8j0nTXRJCmdAUZDhP44,37245
+pydicom/tests/test_dataset.py,sha256=W7xApZXAf404ZxL-qgiMhKdS2LoxKcDlQ3uCI7N5YkA,83588
+pydicom/tests/test_dicomdir.py,sha256=PMIJVTl56DUspNPt8yWXrs7ho6kZz6myLEU4xa29Mlw,3522
+pydicom/tests/test_dictionary.py,sha256=XakZncAC9AFNoNxTRWq8HxdoTH7gLq4aA4JVWmzN9PQ,6640
+pydicom/tests/test_encaps.py,sha256=lscQvzuEyQ0A6DbIuwvZQ_OXDVy8FqZpzdyvf8zIW0A,51853
+pydicom/tests/test_encoders.py,sha256=OjXU25TjE0h2wg7-B4PcepXaxCeqliT0MJ6ocTSl_uA,40830
+pydicom/tests/test_encoders_gdcm.py,sha256=9OqFBXTvHjYHSzfJ0VKtiEfO5VL1jGw2n-Shgc4y8is,9907
+pydicom/tests/test_encoders_pydicom.py,sha256=tGEV40s3PiBWBnacb0CoSoYFW2xM1uSubS9H8MOa1Mw,16304
+pydicom/tests/test_env_info.py,sha256=02_Yd39wNjpN0cdkwn_UScrYw0I--6HfkC08pj8LfoQ,1077
+pydicom/tests/test_errors.py,sha256=NCiRzI_Og4DMIquwOyAFoW6M4NSDuV3w9ayMvYcaaJ8,658
+pydicom/tests/test_filebase.py,sha256=XsuMQIFgQF9J3cp6X2n0_KBMbphj1rwDf6TWR7ReoUA,9238
+pydicom/tests/test_filereader.py,sha256=8Wwx2q09OiGVrgzifZaUIcvPBoO93cZ-rMqfVBwZesc,64476
+pydicom/tests/test_fileset.py,sha256=8mj9hYq2uawKg2mGm5KDzQqFDZnTpl-u-OsNmeEUGMk,103236
+pydicom/tests/test_fileutil.py,sha256=24VHPKUQRNy5UjpJ25s7-mReaVpX5YucJ21QrjJdZ_4,956
+pydicom/tests/test_filewriter.py,sha256=EJNsjeb8qBM3vDU-DMaIM127odAhQRoA8RVvGmlQTtg,108880
+pydicom/tests/test_gdcm_pixel_data.py,sha256=i3tTyC7xd9VUQwapWl1Ln7B08V1-fJypIEP-3LkReoU,22694
+pydicom/tests/test_handler_util.py,sha256=9Ioa_weMCNLst3pPNHS1bB1lu0xxi2VzFfZRuipipEs,91897
+pydicom/tests/test_jpeg_ls_pixel_data.py,sha256=7rPXAanTBYjF9Sb3WC1yyOQmII0NISqcoRM7GNkbF_8,10049
+pydicom/tests/test_json.py,sha256=0fNFeiZ_rWL68BxJrdwhtJqISkUplnatLVrBpj__-AQ,19207
+pydicom/tests/test_misc.py,sha256=lFzhQ7MMJwvtfgsvkyhqB75lvZziZpICXavNLnsBTys,1839
+pydicom/tests/test_multival.py,sha256=dxU2pOIt5ABmiui7Ofy9XBtOnn7JOzeTM9jrmE8kAGw,5701
+pydicom/tests/test_numpy_pixel_data.py,sha256=sTZfdRPtZgPspDOiLgDgKcW6PzC4rhNAQlEsXpO8rz8,51663
+pydicom/tests/test_overlay_np.py,sha256=IdWtN6X-4WonHBgaSO0rnieugRSclvcSKhcJ-4n6T8U,15710
+pydicom/tests/test_pillow_pixel_data.py,sha256=ba6pModcmz0qDDQCsJea56ZZd_c3svQPlGWP0q9M5es,23921
+pydicom/tests/test_pylibjpeg.py,sha256=Wjx0pGKmfYBvtrQnQMy2LvPn_gR-uN9nilDE7klMe9s,30823
+pydicom/tests/test_rawread.py,sha256=8F2WUtWcwWTcgWCX6pkFaU84T8JHnRCwcyRARAsl9Is,23320
+pydicom/tests/test_rle_pixel_data.py,sha256=Y0k51rYiDrMsSPWX6NAb_pyMWyEHvb0Z0rbMTt53Cz8,44734
+pydicom/tests/test_sequence.py,sha256=9CGUsOwuNwVIrhTMl8_QmpSGFmr1JJYZ-BeM_8drN9s,5323
+pydicom/tests/test_tag.py,sha256=wratpg7COwmjdGaAyfzirpT_eHLtg1SZKmRWEhmYXu8,15696
+pydicom/tests/test_uid.py,sha256=ecZcKL9ZAFdmJM4akdR9C7qDL6YVNrKS5R0FFXcsNGY,16663
+pydicom/tests/test_unicode.py,sha256=GPbyzFRWYOszIaLhNJomkgSMnoLPtdZu59L95LjkTCU,845
+pydicom/tests/test_util.py,sha256=ox66zdx0xRbO_ROOdIul6oyOK8TbG4JRxY2_TC1rrVM,18477
+pydicom/tests/test_valuerep.py,sha256=NLDRPNSTBN0zRACodgz0buOp3FBM911b-14H7VAoGhc,59725
+pydicom/tests/test_values.py,sha256=YO4c6UL7uGUDvu0SVkilwF63jgU9ndWPjvVxRsWdGfo,8633
+pydicom/tests/test_waveform.py,sha256=61FMakZPstKtbeWZt_ra_P-InDfRmeDnAGD4d0hL6g0,5250
+pydicom/uid.py,sha256=sHJvX6q5w6UNSM43Hcy061JBVxrdR0lLZgRt-_-W9To,34984
+pydicom/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pydicom/util/__pycache__/__init__.cpython-38.pyc,,
+pydicom/util/__pycache__/codify.cpython-38.pyc,,
+pydicom/util/__pycache__/dump.cpython-38.pyc,,
+pydicom/util/__pycache__/fixer.cpython-38.pyc,,
+pydicom/util/__pycache__/hexutil.cpython-38.pyc,,
+pydicom/util/__pycache__/leanread.cpython-38.pyc,,
+pydicom/util/codify.py,sha256=sBjnHSJlFh-Nl4IERoDpfgNPboHYGcau4QwGSdmW-zg,15674
+pydicom/util/dump.py,sha256=iZb9QUlAPqu1H5YXPfaNIi9XlN4QeYh1bJNcBPdAR1c,3956
+pydicom/util/fixer.py,sha256=yAGcMrTi-upZPVhBhEc-OTp-3zAPJ5Vp9bpzdVZwn88,4259
+pydicom/util/hexutil.py,sha256=wqPzEjSDz2vvLdX5U2bSMievC3nd9kdnVgFwXFKdqY8,1576
+pydicom/util/leanread.py,sha256=aIW87i53P9-mJKJUc2LsPhoMcYSCP1_QtrWCs7jCVak,6448
+pydicom/valuerep.py,sha256=PTkO-FP5eugsfWHmJujY8B0UEdTGaeML9BHkShPTRy8,66992
+pydicom/values.py,sha256=7YtmNbOFovy2zqoNHPvmk34SSh4m7q7NF1vw4mnl5C0,25894
+pydicom/waveforms/__init__.py,sha256=80SVM1o317li1UAL-g0lAR_G3OW1SgUNZcDFk1b32LM,88
+pydicom/waveforms/__pycache__/__init__.cpython-38.pyc,,
+pydicom/waveforms/__pycache__/numpy_handler.cpython-38.pyc,,
+pydicom/waveforms/numpy_handler.py,sha256=FTBCbZH9OXxBNi-8UHeM_h2nHMLVj1ZC9BUn65FTEY4,8584
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/entry_points.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..44af99566e48de731fe41a98fcb449689761c570
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/entry_points.txt
@@ -0,0 +1,7 @@
+[console_scripts]
+pydicom = pydicom.cli.main:main
+
+[pydicom_subcommands]
+codify = pydicom.cli.codify:add_subparser
+show = pydicom.cli.show:add_subparser
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..435c1f7f28a6f319a12f9f11281a0de22ef42001
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pydicom-2.3.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+pydicom
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8fa5d78994f65a134620a11971afeb855b0c9e1
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/__init__.py
@@ -0,0 +1,210 @@
+# Copyright (c) 2017-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+"""Python interface to the Zstandard (zstd) compression library."""
+
+from __future__ import absolute_import, unicode_literals
+
+# This module serves 2 roles:
+#
+# 1) Export the C or CFFI "backend" through a central module.
+# 2) Implement additional functionality built on top of C or CFFI backend.
+
+import builtins
+import io
+import os
+import platform
+
+from typing import ByteString
+
+# Some Python implementations don't support C extensions. That's why we have
+# a CFFI implementation in the first place. The code here import one of our
+# "backends" then re-exports the symbols from this module. For convenience,
+# we support falling back to the CFFI backend if the C extension can't be
+# imported. But for performance reasons, we only do this on unknown Python
+# implementation. Notably, for CPython we require the C extension by default.
+# Because someone will inevitably want special behavior, the behavior is
+# configurable via an environment variable. A potentially better way to handle
+# this is to import a special ``__importpolicy__`` module or something
+# defining a variable and `setup.py` could write the file with whatever
+# policy was specified at build time. Until someone needs it, we go with
+# the hacky but simple environment variable approach.
+_module_policy = os.environ.get("PYTHON_ZSTANDARD_IMPORT_POLICY", "default")
+
+if _module_policy == "default":
+ if platform.python_implementation() in ("CPython",):
+ from .backend_c import * # type: ignore
+
+ backend = "cext"
+ elif platform.python_implementation() in ("PyPy",):
+ from .backend_cffi import * # type: ignore
+
+ backend = "cffi"
+ else:
+ try:
+ from .backend_c import *
+
+ backend = "cext"
+ except ImportError:
+ from .backend_cffi import *
+
+ backend = "cffi"
+elif _module_policy == "cffi_fallback":
+ try:
+ from .backend_c import *
+
+ backend = "cext"
+ except ImportError:
+ from .backend_cffi import *
+
+ backend = "cffi"
+elif _module_policy == "rust":
+ from .backend_rust import * # type: ignore
+
+ backend = "rust"
+elif _module_policy == "cext":
+ from .backend_c import *
+
+ backend = "cext"
+elif _module_policy == "cffi":
+ from .backend_cffi import *
+
+ backend = "cffi"
+else:
+ raise ImportError(
+ "unknown module import policy: %s; use default, cffi_fallback, "
+ "cext, or cffi" % _module_policy
+ )
+
+# Keep this in sync with python-zstandard.h, rust-ext/src/lib.rs, and debian/changelog.
+__version__ = "0.19.0"
+
+_MODE_CLOSED = 0
+_MODE_READ = 1
+_MODE_WRITE = 2
+
+
+def open(
+ filename,
+ mode="rb",
+ cctx=None,
+ dctx=None,
+ encoding=None,
+ errors=None,
+ newline=None,
+ closefd=None,
+):
+ """Create a file object with zstd (de)compression.
+
+ The object returned from this function will be a
+ :py:class:`ZstdDecompressionReader` if opened for reading in binary mode,
+ a :py:class:`ZstdCompressionWriter` if opened for writing in binary mode,
+ or an ``io.TextIOWrapper`` if opened for reading or writing in text mode.
+
+ :param filename:
+ ``bytes``, ``str``, or ``os.PathLike`` defining a file to open or a
+ file object (with a ``read()`` or ``write()`` method).
+ :param mode:
+ ``str`` File open mode. Accepts any of the open modes recognized by
+ ``open()``.
+ :param cctx:
+ ``ZstdCompressor`` to use for compression. If not specified and file
+ is opened for writing, the default ``ZstdCompressor`` will be used.
+ :param dctx:
+ ``ZstdDecompressor`` to use for decompression. If not specified and file
+ is opened for reading, the default ``ZstdDecompressor`` will be used.
+ :param encoding:
+ ``str`` that defines text encoding to use when file is opened in text
+ mode.
+ :param errors:
+ ``str`` defining text encoding error handling mode.
+ :param newline:
+ ``str`` defining newline to use in text mode.
+ :param closefd:
+ ``bool`` whether to close the file when the returned object is closed.
+ Only used if a file object is passed. If a filename is specified, the
+ opened file is always closed when the returned object is closed.
+ """
+ normalized_mode = mode.replace("t", "")
+
+ if normalized_mode in ("r", "rb"):
+ dctx = dctx or ZstdDecompressor()
+ open_mode = "r"
+ raw_open_mode = "rb"
+ elif normalized_mode in ("w", "wb", "a", "ab", "x", "xb"):
+ cctx = cctx or ZstdCompressor()
+ open_mode = "w"
+ raw_open_mode = normalized_mode
+ if not raw_open_mode.endswith("b"):
+ raw_open_mode = raw_open_mode + "b"
+ else:
+ raise ValueError("Invalid mode: {!r}".format(mode))
+
+ if hasattr(os, "PathLike"):
+ types = (str, bytes, os.PathLike)
+ else:
+ types = (str, bytes)
+
+ if isinstance(filename, types): # type: ignore
+ inner_fh = builtins.open(filename, raw_open_mode)
+ closefd = True
+ elif hasattr(filename, "read") or hasattr(filename, "write"):
+ inner_fh = filename
+ closefd = bool(closefd)
+ else:
+ raise TypeError(
+ "filename must be a str, bytes, file or PathLike object"
+ )
+
+ if open_mode == "r":
+ fh = dctx.stream_reader(inner_fh, closefd=closefd)
+ elif open_mode == "w":
+ fh = cctx.stream_writer(inner_fh, closefd=closefd)
+ else:
+ raise RuntimeError("logic error in zstandard.open() handling open mode")
+
+ if "b" not in normalized_mode:
+ return io.TextIOWrapper(
+ fh, encoding=encoding, errors=errors, newline=newline
+ )
+ else:
+ return fh
+
+
+def compress(data: ByteString, level: int = 3) -> bytes:
+ """Compress source data using the zstd compression format.
+
+ This performs one-shot compression using basic/default compression
+ settings.
+
+ This method is provided for convenience and is equivalent to calling
+ ``ZstdCompressor(level=level).compress(data)``.
+
+ If you find yourself calling this function in a tight loop,
+ performance will be greater if you construct a single ``ZstdCompressor``
+ and repeatedly call ``compress()`` on it.
+ """
+ cctx = ZstdCompressor(level=level)
+
+ return cctx.compress(data)
+
+
+def decompress(data: ByteString, max_output_size: int = 0) -> bytes:
+ """Decompress a zstd frame into its original data.
+
+ This performs one-shot decompression using basic/default compression
+ settings.
+
+ This method is provided for convenience and is equivalent to calling
+ ``ZstdDecompressor().decompress(data, max_output_size=max_output_size)``.
+
+ If you find yourself calling this function in a tight loop, performance
+ will be greater if you construct a single ``ZstdDecompressor`` and
+ repeatedly call ``decompress()`` on it.
+ """
+ dctx = ZstdDecompressor()
+
+ return dctx.decompress(data, max_output_size=max_output_size)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/__init__.pyi b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..795e0c6f27ad90c1d42bff492f7d740018d4696e
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/__init__.pyi
@@ -0,0 +1,478 @@
+# Copyright (c) 2016-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import os
+
+from typing import (
+ BinaryIO,
+ ByteString,
+ Generator,
+ IO,
+ Iterable,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+)
+
+FLUSH_BLOCK: int
+FLUSH_FRAME: int
+
+COMPRESSOBJ_FLUSH_FINISH: int
+COMPRESSOBJ_FLUSH_BLOCK: int
+
+CONTENTSIZE_UNKNOWN: int
+CONTENTSIZE_ERROR: int
+
+MAX_COMPRESSION_LEVEL: int
+
+COMPRESSION_RECOMMENDED_INPUT_SIZE: int
+COMPRESSION_RECOMMENDED_OUTPUT_SIZE: int
+
+DECOMPRESSION_RECOMMENDED_INPUT_SIZE: int
+DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE: int
+
+BLOCKSIZELOG_MAX: int
+BLOCKSIZE_MAX: int
+
+WINDOWLOG_MIN: int
+WINDOWLOG_MAX: int
+
+CHAINLOG_MIN: int
+CHAINLOG_MAX: int
+HASHLOG_MIN: int
+HASHLOG_MAX: int
+MINMATCH_MIN: int
+MINMATCH_MAX: int
+SEARCHLOG_MIN: int
+SEARCHLOG_MAX: int
+SEARCHLENGTH_MIN: int
+SEARCHLENGTH_MAX: int
+TARGETLENGTH_MIN: int
+TARGETLENGTH_MAX: int
+LDM_MINMATCH_MIN: int
+LDM_MINMATCH_MAX: int
+LDM_BUCKETSIZELOG_MAX: int
+
+STRATEGY_FAST: int
+STRATEGY_DFAST: int
+STRATEGY_GREEDY: int
+STRATEGY_LAZY: int
+STRATEGY_LAZY2: int
+STRATEGY_BTLAZY2: int
+STRATEGY_BTOPT: int
+STRATEGY_BTULTRA: int
+STRATEGY_BTULTRA2: int
+
+DICT_TYPE_AUTO: int
+DICT_TYPE_RAWCONTENT: int
+DICT_TYPE_FULLDICT: int
+
+FORMAT_ZSTD1: int
+FORMAT_ZSTD1_MAGICLESS: int
+
+ZSTD_VERSION: Tuple[int, int, int]
+FRAME_HEADER: bytes
+MAGIC_NUMBER: int
+
+backend: str
+backend_features: Set[str]
+__version__: str
+
+class ZstdError(Exception): ...
+
+class BufferSegment(object):
+ offset: int
+ def __len__(self) -> int: ...
+ def tobytes(self) -> bytes: ...
+
+class BufferSegments(object):
+ def __len__(self) -> int: ...
+ def __getitem__(self, i: int) -> BufferSegment: ...
+
+class BufferWithSegments(object):
+ size: int
+ def __init__(self, data: ByteString, segments: ByteString): ...
+ def __len__(self) -> int: ...
+ def __getitem__(self, i: int) -> BufferSegment: ...
+ def segments(self): ...
+ def tobytes(self) -> bytes: ...
+
+class BufferWithSegmentsCollection(object):
+ def __init__(self, *args): ...
+ def __len__(self) -> int: ...
+ def __getitem__(self, i: int) -> BufferSegment: ...
+ def size(self) -> int: ...
+
+class ZstdCompressionParameters(object):
+ @staticmethod
+ def from_level(
+ level: int, source_size: int = ..., dict_size: int = ..., **kwargs
+ ) -> "ZstdCompressionParameters": ...
+ def __init__(
+ self,
+ format: int = ...,
+ compression_level: int = ...,
+ window_log: int = ...,
+ hash_log: int = ...,
+ chain_log: int = ...,
+ search_log: int = ...,
+ min_match: int = ...,
+ target_length: int = ...,
+ strategy: int = ...,
+ write_content_size: int = ...,
+ write_checksum: int = ...,
+ write_dict_id: int = ...,
+ job_size: int = ...,
+ overlap_log: int = ...,
+ force_max_window: int = ...,
+ enable_ldm: int = ...,
+ ldm_hash_log: int = ...,
+ ldm_min_match: int = ...,
+ ldm_bucket_size_log: int = ...,
+ ldm_hash_rate_log: int = ...,
+ threads: int = ...,
+ ): ...
+ @property
+ def format(self) -> int: ...
+ @property
+ def compression_level(self) -> int: ...
+ @property
+ def window_log(self) -> int: ...
+ @property
+ def hash_log(self) -> int: ...
+ @property
+ def chain_log(self) -> int: ...
+ @property
+ def search_log(self) -> int: ...
+ @property
+ def min_match(self) -> int: ...
+ @property
+ def target_length(self) -> int: ...
+ @property
+ def strategy(self) -> int: ...
+ @property
+ def write_content_size(self) -> int: ...
+ @property
+ def write_checksum(self) -> int: ...
+ @property
+ def write_dict_id(self) -> int: ...
+ @property
+ def job_size(self) -> int: ...
+ @property
+ def overlap_log(self) -> int: ...
+ @property
+ def force_max_window(self) -> int: ...
+ @property
+ def enable_ldm(self) -> int: ...
+ @property
+ def ldm_hash_log(self) -> int: ...
+ @property
+ def ldm_min_match(self) -> int: ...
+ @property
+ def ldm_bucket_size_log(self) -> int: ...
+ @property
+ def ldm_hash_rate_log(self) -> int: ...
+ @property
+ def threads(self) -> int: ...
+ def estimated_compression_context_size(self) -> int: ...
+
+class CompressionParameters(ZstdCompressionParameters): ...
+
+class ZstdCompressionDict(object):
+ k: int
+ d: int
+ def __init__(
+ self,
+ data: ByteString,
+ dict_type: int = ...,
+ k: int = ...,
+ d: int = ...,
+ ): ...
+ def __len__(self) -> int: ...
+ def dict_id(self) -> int: ...
+ def as_bytes(self) -> bytes: ...
+ def precompute_compress(
+ self,
+ level: int = ...,
+ compression_params: ZstdCompressionParameters = ...,
+ ): ...
+
+class ZstdCompressionObj(object):
+ def compress(self, data: ByteString) -> bytes: ...
+ def flush(self, flush_mode: int = ...) -> bytes: ...
+
+class ZstdCompressionChunker(object):
+ def compress(self, data: ByteString): ...
+ def flush(self): ...
+ def finish(self): ...
+
+class ZstdCompressionReader(BinaryIO):
+ def __enter__(self) -> "ZstdCompressionReader": ...
+ def __exit__(self, exc_type, exc_value, exc_tb): ...
+ def readable(self) -> bool: ...
+ def writable(self) -> bool: ...
+ def seekable(self) -> bool: ...
+ def readline(self, limit: int = ...) -> bytes: ...
+ def readlines(self, hint: int = ...) -> List[bytes]: ...
+ def write(self, data: ByteString): ...
+ def writelines(self, data: Iterable[bytes]): ...
+ def isatty(self) -> bool: ...
+ def flush(self): ...
+ def close(self): ...
+ @property
+ def closed(self) -> bool: ...
+ def tell(self) -> int: ...
+ def readall(self) -> bytes: ...
+ def __iter__(self): ...
+ def __next__(self): ...
+ def next(self): ...
+ def read(self, size: int = ...) -> bytes: ...
+ def read1(self, size: int = ...) -> bytes: ...
+ def readinto(self, b) -> int: ...
+ def readinto1(self, b) -> int: ...
+
+class ZstdCompressionWriter(BinaryIO):
+ def __enter__(self) -> "ZstdCompressionWriter": ...
+ def __exit__(self, exc_type, exc_value, exc_tb): ...
+ def memory_size(self) -> int: ...
+ def fileno(self) -> int: ...
+ def close(self): ...
+ @property
+ def closed(self) -> bool: ...
+ def isatty(self) -> bool: ...
+ def readable(self) -> bool: ...
+ def readline(self, size: int = ...) -> bytes: ...
+ def readlines(self, hint: int = ...) -> List[bytes]: ...
+ def seek(self, offset: int, whence: int = ...): ...
+ def seekable(self) -> bool: ...
+ def truncate(self, size: int = ...): ...
+ def writable(self) -> bool: ...
+ def writelines(self, lines: Iterable[bytes]): ...
+ def read(self, size: int = ...) -> bytes: ...
+ def readall(self) -> bytes: ...
+ def readinto(self, b): ...
+ def write(self, data: ByteString) -> int: ...
+ def flush(self, flush_mode: int = ...) -> int: ...
+ def tell(self) -> int: ...
+
+class ZstdCompressor(object):
+ def __init__(
+ self,
+ level: int = ...,
+ dict_data: Optional[ZstdCompressionDict] = ...,
+ compression_params: Optional[ZstdCompressionParameters] = ...,
+ write_checksum: Optional[bool] = ...,
+ write_content_size: Optional[bool] = ...,
+ write_dict_id: Optional[bool] = ...,
+ threads: int = ...,
+ ): ...
+ def memory_size(self) -> int: ...
+ def compress(self, data: ByteString) -> bytes: ...
+ def compressobj(self, size: int = ...) -> ZstdCompressionObj: ...
+ def chunker(
+ self, size: int = ..., chunk_size: int = ...
+ ) -> ZstdCompressionChunker: ...
+ def copy_stream(
+ self,
+ ifh: IO[bytes],
+ ofh: IO[bytes],
+ size: int = ...,
+ read_size: int = ...,
+ write_size: int = ...,
+ ) -> Tuple[int, int]: ...
+ def stream_reader(
+ self,
+ source: Union[IO[bytes], ByteString],
+ size: int = ...,
+ read_size: int = ...,
+ *,
+ closefd: bool = ...,
+ ) -> ZstdCompressionReader: ...
+ def stream_writer(
+ self,
+ writer: IO[bytes],
+ size: int = ...,
+ write_size: int = ...,
+ write_return_read: bool = ...,
+ *,
+ closefd: bool = ...,
+ ) -> ZstdCompressionWriter: ...
+ def read_to_iter(
+ self,
+ reader: Union[IO[bytes], ByteString],
+ size: int = ...,
+ read_size: int = ...,
+ write_size: int = ...,
+ ) -> Generator[bytes, None, None]: ...
+ def frame_progression(self) -> Tuple[int, int, int]: ...
+ def multi_compress_to_buffer(
+ self,
+ data: Union[
+ BufferWithSegments,
+ BufferWithSegmentsCollection,
+ List[ByteString],
+ ],
+ threads: int = ...,
+ ) -> BufferWithSegmentsCollection: ...
+
+class ZstdDecompressionObj(object):
+ def decompress(self, data: ByteString) -> bytes: ...
+ def flush(self, length: int = ...) -> bytes: ...
+ @property
+ def unused_data(self) -> bytes: ...
+ @property
+ def unconsumed_tail(self) -> bytes: ...
+ @property
+ def eof(self) -> bool: ...
+
+class ZstdDecompressionReader(BinaryIO):
+ def __enter__(self) -> "ZstdDecompressionReader": ...
+ def __exit__(self, exc_type, exc_value, exc_tb): ...
+ def readable(self) -> bool: ...
+ def writable(self) -> bool: ...
+ def seekable(self) -> bool: ...
+ def readline(self, size: int = ...): ...
+ def readlines(self, hint: int = ...): ...
+ def write(self, data: ByteString): ...
+ def writelines(self, lines: Iterable[bytes]): ...
+ def isatty(self) -> bool: ...
+ def flush(self): ...
+ def close(self): ...
+ @property
+ def closed(self) -> bool: ...
+ def tell(self) -> int: ...
+ def readall(self) -> bytes: ...
+ def __iter__(self): ...
+ def __next__(self): ...
+ def next(self): ...
+ def read(self, size: int = ...) -> bytes: ...
+ def readinto(self, b) -> int: ...
+ def read1(self, size: int = ...) -> bytes: ...
+ def readinto1(self, b) -> int: ...
+ def seek(self, pos: int, whence: int = ...) -> int: ...
+
+class ZstdDecompressionWriter(BinaryIO):
+ def __enter__(self) -> "ZstdDecompressionWriter": ...
+ def __exit__(self, exc_type, exc_value, exc_tb): ...
+ def memory_size(self) -> int: ...
+ def close(self): ...
+ @property
+ def closed(self) -> bool: ...
+ def fileno(self) -> int: ...
+ def flush(self): ...
+ def isatty(self) -> bool: ...
+ def readable(self) -> bool: ...
+ def readline(self, size: int = ...): ...
+ def readlines(self, hint: int = ...): ...
+ def seek(self, offset: int, whence: int = ...): ...
+ def seekable(self) -> bool: ...
+ def tell(self): ...
+ def truncate(self, size: int = ...): ...
+ def writable(self) -> bool: ...
+ def writelines(self, lines: Iterable[bytes]): ...
+ def read(self, size: int = ...): ...
+ def readall(self): ...
+ def readinto(self, b): ...
+ def write(self, data: ByteString) -> int: ...
+
+class ZstdDecompressor(object):
+ def __init__(
+ self,
+ dict_data: Optional[ZstdCompressionDict] = ...,
+ max_window_size: int = ...,
+ format: int = ...,
+ ): ...
+ def memory_size(self) -> int: ...
+ def decompress(
+ self,
+ data: ByteString,
+ max_output_size: int = ...,
+ read_across_frames: bool = ...,
+ allow_extra_data: bool = ...,
+ ) -> bytes: ...
+ def stream_reader(
+ self,
+ source: Union[IO[bytes], ByteString],
+ read_size: int = ...,
+ read_across_frames: bool = ...,
+ *,
+ closefd=False,
+ ) -> ZstdDecompressionReader: ...
+ def decompressobj(self, write_size: int = ...) -> ZstdDecompressionObj: ...
+ def read_to_iter(
+ self,
+ reader: Union[IO[bytes], ByteString],
+ read_size: int = ...,
+ write_size: int = ...,
+ skip_bytes: int = ...,
+ ) -> Generator[bytes, None, None]: ...
+ def stream_writer(
+ self,
+ writer: IO[bytes],
+ write_size: int = ...,
+ write_return_read: bool = ...,
+ *,
+ closefd: bool = ...,
+ ) -> ZstdDecompressionWriter: ...
+ def copy_stream(
+ self,
+ ifh: IO[bytes],
+ ofh: IO[bytes],
+ read_size: int = ...,
+ write_size: int = ...,
+ ) -> Tuple[int, int]: ...
+ def decompress_content_dict_chain(
+ self, frames: list[ByteString]
+ ) -> bytes: ...
+ def multi_decompress_to_buffer(
+ self,
+ frames: Union[
+ BufferWithSegments,
+ BufferWithSegmentsCollection,
+ List[ByteString],
+ ],
+ decompressed_sizes: ByteString = ...,
+ threads: int = ...,
+ ) -> BufferWithSegmentsCollection: ...
+
+class FrameParameters(object):
+ content_size: int
+ window_size: int
+ dict_id: int
+ has_checksum: bool
+
+def estimate_decompression_context_size() -> int: ...
+def frame_content_size(data: ByteString) -> int: ...
+def frame_header_size(data: ByteString) -> int: ...
+def get_frame_parameters(data: ByteString) -> FrameParameters: ...
+def train_dictionary(
+ dict_size: int,
+ samples: list[ByteString],
+ k: int = ...,
+ d: int = ...,
+ f: int = ...,
+ split_point: float = ...,
+ accel: int = ...,
+ notifications: int = ...,
+ dict_id: int = ...,
+ level: int = ...,
+ steps: int = ...,
+ threads: int = ...,
+) -> ZstdCompressionDict: ...
+def open(
+ filename: Union[bytes, str, os.PathLike, BinaryIO],
+ mode: str = ...,
+ cctx: Optional[ZstdCompressor] = ...,
+ dctx: Optional[ZstdDecompressor] = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ newline: Optional[str] = ...,
+ closefd: bool = ...,
+): ...
+def compress(data: ByteString, level: int = ...) -> bytes: ...
+def decompress(data: ByteString, max_output_size: int = ...) -> bytes: ...
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/backend_cffi.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/backend_cffi.py
new file mode 100644
index 0000000000000000000000000000000000000000..39b49192f252f912997c47dcb6d60f2152365a1e
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/backend_cffi.py
@@ -0,0 +1,4457 @@
+# Copyright (c) 2016-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+"""Python interface to the Zstandard (zstd) compression library."""
+
+from __future__ import absolute_import, unicode_literals
+
+# This should match what the C extension exports.
+__all__ = [
+ "BufferSegment",
+ "BufferSegments",
+ "BufferWithSegments",
+ "BufferWithSegmentsCollection",
+ "ZstdCompressionChunker",
+ "ZstdCompressionDict",
+ "ZstdCompressionObj",
+ "ZstdCompressionParameters",
+ "ZstdCompressionReader",
+ "ZstdCompressionWriter",
+ "ZstdCompressor",
+ "ZstdDecompressionObj",
+ "ZstdDecompressionReader",
+ "ZstdDecompressionWriter",
+ "ZstdDecompressor",
+ "ZstdError",
+ "FrameParameters",
+ "backend_features",
+ "estimate_decompression_context_size",
+ "frame_content_size",
+ "frame_header_size",
+ "get_frame_parameters",
+ "train_dictionary",
+ # Constants.
+ "FLUSH_BLOCK",
+ "FLUSH_FRAME",
+ "COMPRESSOBJ_FLUSH_FINISH",
+ "COMPRESSOBJ_FLUSH_BLOCK",
+ "ZSTD_VERSION",
+ "FRAME_HEADER",
+ "CONTENTSIZE_UNKNOWN",
+ "CONTENTSIZE_ERROR",
+ "MAX_COMPRESSION_LEVEL",
+ "COMPRESSION_RECOMMENDED_INPUT_SIZE",
+ "COMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+ "DECOMPRESSION_RECOMMENDED_INPUT_SIZE",
+ "DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+ "MAGIC_NUMBER",
+ "BLOCKSIZELOG_MAX",
+ "BLOCKSIZE_MAX",
+ "WINDOWLOG_MIN",
+ "WINDOWLOG_MAX",
+ "CHAINLOG_MIN",
+ "CHAINLOG_MAX",
+ "HASHLOG_MIN",
+ "HASHLOG_MAX",
+ "MINMATCH_MIN",
+ "MINMATCH_MAX",
+ "SEARCHLOG_MIN",
+ "SEARCHLOG_MAX",
+ "SEARCHLENGTH_MIN",
+ "SEARCHLENGTH_MAX",
+ "TARGETLENGTH_MIN",
+ "TARGETLENGTH_MAX",
+ "LDM_MINMATCH_MIN",
+ "LDM_MINMATCH_MAX",
+ "LDM_BUCKETSIZELOG_MAX",
+ "STRATEGY_FAST",
+ "STRATEGY_DFAST",
+ "STRATEGY_GREEDY",
+ "STRATEGY_LAZY",
+ "STRATEGY_LAZY2",
+ "STRATEGY_BTLAZY2",
+ "STRATEGY_BTOPT",
+ "STRATEGY_BTULTRA",
+ "STRATEGY_BTULTRA2",
+ "DICT_TYPE_AUTO",
+ "DICT_TYPE_RAWCONTENT",
+ "DICT_TYPE_FULLDICT",
+ "FORMAT_ZSTD1",
+ "FORMAT_ZSTD1_MAGICLESS",
+]
+
+import io
+import os
+
+from ._cffi import ( # type: ignore
+ ffi,
+ lib,
+)
+
+
+backend_features = set() # type: ignore
+
+COMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_CStreamInSize()
+COMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_CStreamOutSize()
+DECOMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_DStreamInSize()
+DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_DStreamOutSize()
+
+new_nonzero = ffi.new_allocator(should_clear_after_alloc=False)
+
+
+MAX_COMPRESSION_LEVEL = lib.ZSTD_maxCLevel()
+MAGIC_NUMBER = lib.ZSTD_MAGICNUMBER
+FRAME_HEADER = b"\x28\xb5\x2f\xfd"
+CONTENTSIZE_UNKNOWN = lib.ZSTD_CONTENTSIZE_UNKNOWN
+CONTENTSIZE_ERROR = lib.ZSTD_CONTENTSIZE_ERROR
+ZSTD_VERSION = (
+ lib.ZSTD_VERSION_MAJOR,
+ lib.ZSTD_VERSION_MINOR,
+ lib.ZSTD_VERSION_RELEASE,
+)
+
+BLOCKSIZELOG_MAX = lib.ZSTD_BLOCKSIZELOG_MAX
+BLOCKSIZE_MAX = lib.ZSTD_BLOCKSIZE_MAX
+WINDOWLOG_MIN = lib.ZSTD_WINDOWLOG_MIN
+WINDOWLOG_MAX = lib.ZSTD_WINDOWLOG_MAX
+CHAINLOG_MIN = lib.ZSTD_CHAINLOG_MIN
+CHAINLOG_MAX = lib.ZSTD_CHAINLOG_MAX
+HASHLOG_MIN = lib.ZSTD_HASHLOG_MIN
+HASHLOG_MAX = lib.ZSTD_HASHLOG_MAX
+MINMATCH_MIN = lib.ZSTD_MINMATCH_MIN
+MINMATCH_MAX = lib.ZSTD_MINMATCH_MAX
+SEARCHLOG_MIN = lib.ZSTD_SEARCHLOG_MIN
+SEARCHLOG_MAX = lib.ZSTD_SEARCHLOG_MAX
+SEARCHLENGTH_MIN = lib.ZSTD_MINMATCH_MIN
+SEARCHLENGTH_MAX = lib.ZSTD_MINMATCH_MAX
+TARGETLENGTH_MIN = lib.ZSTD_TARGETLENGTH_MIN
+TARGETLENGTH_MAX = lib.ZSTD_TARGETLENGTH_MAX
+LDM_MINMATCH_MIN = lib.ZSTD_LDM_MINMATCH_MIN
+LDM_MINMATCH_MAX = lib.ZSTD_LDM_MINMATCH_MAX
+LDM_BUCKETSIZELOG_MAX = lib.ZSTD_LDM_BUCKETSIZELOG_MAX
+
+STRATEGY_FAST = lib.ZSTD_fast
+STRATEGY_DFAST = lib.ZSTD_dfast
+STRATEGY_GREEDY = lib.ZSTD_greedy
+STRATEGY_LAZY = lib.ZSTD_lazy
+STRATEGY_LAZY2 = lib.ZSTD_lazy2
+STRATEGY_BTLAZY2 = lib.ZSTD_btlazy2
+STRATEGY_BTOPT = lib.ZSTD_btopt
+STRATEGY_BTULTRA = lib.ZSTD_btultra
+STRATEGY_BTULTRA2 = lib.ZSTD_btultra2
+
+DICT_TYPE_AUTO = lib.ZSTD_dct_auto
+DICT_TYPE_RAWCONTENT = lib.ZSTD_dct_rawContent
+DICT_TYPE_FULLDICT = lib.ZSTD_dct_fullDict
+
+FORMAT_ZSTD1 = lib.ZSTD_f_zstd1
+FORMAT_ZSTD1_MAGICLESS = lib.ZSTD_f_zstd1_magicless
+
+FLUSH_BLOCK = 0
+FLUSH_FRAME = 1
+
+COMPRESSOBJ_FLUSH_FINISH = 0
+COMPRESSOBJ_FLUSH_BLOCK = 1
+
+
+def _cpu_count():
+ # os.cpu_count() was introducd in Python 3.4.
+ try:
+ return os.cpu_count() or 0
+ except AttributeError:
+ pass
+
+ # Linux.
+ try:
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+ except (AttributeError, ValueError):
+ pass
+
+ # TODO implement on other platforms.
+ return 0
+
+
+class BufferSegment:
+ """Represents a segment within a ``BufferWithSegments``.
+
+ This type is essentially a reference to N bytes within a
+ ``BufferWithSegments``.
+
+ The object conforms to the buffer protocol.
+ """
+
+ @property
+ def offset(self):
+ """The byte offset of this segment within its parent buffer."""
+ raise NotImplementedError()
+
+ def __len__(self):
+ """Obtain the length of the segment, in bytes."""
+ raise NotImplementedError()
+
+ def tobytes(self):
+ """Obtain bytes copy of this segment."""
+ raise NotImplementedError()
+
+
+class BufferSegments:
+ """Represents an array of ``(offset, length)`` integers.
+
+ This type is effectively an index used by :py:class:`BufferWithSegments`.
+
+ The array members are 64-bit unsigned integers using host/native bit order.
+
+ Instances conform to the buffer protocol.
+ """
+
+
+class BufferWithSegments:
+ """A memory buffer containing N discrete items of known lengths.
+
+ This type is essentially a fixed size memory address and an array
+ of 2-tuples of ``(offset, length)`` 64-bit unsigned native-endian
+ integers defining the byte offset and length of each segment within
+ the buffer.
+
+ Instances behave like containers.
+
+ Instances also conform to the buffer protocol. So a reference to the
+ backing bytes can be obtained via ``memoryview(o)``. A *copy* of the
+ backing bytes can be obtained via ``.tobytes()``.
+
+ This type exists to facilitate operations against N>1 items without
+ the overhead of Python object creation and management. Used with
+ APIs like :py:meth:`ZstdDecompressor.multi_decompress_to_buffer`, it
+ is possible to decompress many objects in parallel without the GIL
+ held, leading to even better performance.
+ """
+
+ @property
+ def size(self):
+ """Total sizein bytes of the backing buffer."""
+ raise NotImplementedError()
+
+ def __len__(self):
+ raise NotImplementedError()
+
+ def __getitem__(self, i):
+ """Obtains a segment within the buffer.
+
+ The returned object references memory within this buffer.
+
+ :param i:
+ Integer index of segment to retrieve.
+ :return:
+ :py:class:`BufferSegment`
+ """
+ raise NotImplementedError()
+
+ def segments(self):
+ """Obtain the array of ``(offset, length)`` segments in the buffer.
+
+ :return:
+ :py:class:`BufferSegments`
+ """
+ raise NotImplementedError()
+
+ def tobytes(self):
+ """Obtain bytes copy of this instance."""
+ raise NotImplementedError()
+
+
+class BufferWithSegmentsCollection:
+ """A virtual spanning view over multiple BufferWithSegments.
+
+ Instances are constructed from 1 or more :py:class:`BufferWithSegments`
+ instances. The resulting object behaves like an ordered sequence whose
+ members are the segments within each ``BufferWithSegments``.
+
+ If the object is composed of 2 ``BufferWithSegments`` instances with the
+ first having 2 segments and the second have 3 segments, then ``b[0]``
+ and ``b[1]`` access segments in the first object and ``b[2]``, ``b[3]``,
+ and ``b[4]`` access segments from the second.
+ """
+
+ def __len__(self):
+ """The number of segments within all ``BufferWithSegments``."""
+ raise NotImplementedError()
+
+ def __getitem__(self, i):
+ """Obtain the ``BufferSegment`` at an offset."""
+ raise NotImplementedError()
+
+
+class ZstdError(Exception):
+ pass
+
+
+def _zstd_error(zresult):
+ # Resolves to bytes on Python 2 and 3. We use the string for formatting
+ # into error messages, which will be literal unicode. So convert it to
+ # unicode.
+ return ffi.string(lib.ZSTD_getErrorName(zresult)).decode("utf-8")
+
+
+def _make_cctx_params(params):
+ res = lib.ZSTD_createCCtxParams()
+ if res == ffi.NULL:
+ raise MemoryError()
+
+ res = ffi.gc(res, lib.ZSTD_freeCCtxParams)
+
+ attrs = [
+ (lib.ZSTD_c_format, params.format),
+ (lib.ZSTD_c_compressionLevel, params.compression_level),
+ (lib.ZSTD_c_windowLog, params.window_log),
+ (lib.ZSTD_c_hashLog, params.hash_log),
+ (lib.ZSTD_c_chainLog, params.chain_log),
+ (lib.ZSTD_c_searchLog, params.search_log),
+ (lib.ZSTD_c_minMatch, params.min_match),
+ (lib.ZSTD_c_targetLength, params.target_length),
+ (lib.ZSTD_c_strategy, params.strategy),
+ (lib.ZSTD_c_contentSizeFlag, params.write_content_size),
+ (lib.ZSTD_c_checksumFlag, params.write_checksum),
+ (lib.ZSTD_c_dictIDFlag, params.write_dict_id),
+ (lib.ZSTD_c_nbWorkers, params.threads),
+ (lib.ZSTD_c_jobSize, params.job_size),
+ (lib.ZSTD_c_overlapLog, params.overlap_log),
+ (lib.ZSTD_c_forceMaxWindow, params.force_max_window),
+ (lib.ZSTD_c_enableLongDistanceMatching, params.enable_ldm),
+ (lib.ZSTD_c_ldmHashLog, params.ldm_hash_log),
+ (lib.ZSTD_c_ldmMinMatch, params.ldm_min_match),
+ (lib.ZSTD_c_ldmBucketSizeLog, params.ldm_bucket_size_log),
+ (lib.ZSTD_c_ldmHashRateLog, params.ldm_hash_rate_log),
+ ]
+
+ for param, value in attrs:
+ _set_compression_parameter(res, param, value)
+
+ return res
+
+
+class ZstdCompressionParameters(object):
+ """Low-level zstd compression parameters.
+
+ This type represents a collection of parameters to control how zstd
+ compression is performed.
+
+ Instances can be constructed from raw parameters or derived from a
+ base set of defaults specified from a compression level (recommended)
+ via :py:meth:`ZstdCompressionParameters.from_level`.
+
+ >>> # Derive compression settings for compression level 7.
+ >>> params = zstandard.ZstdCompressionParameters.from_level(7)
+
+ >>> # With an input size of 1MB
+ >>> params = zstandard.ZstdCompressionParameters.from_level(7, source_size=1048576)
+
+ Using ``from_level()``, it is also possible to override individual compression
+ parameters or to define additional settings that aren't automatically derived.
+ e.g.:
+
+ >>> params = zstandard.ZstdCompressionParameters.from_level(4, window_log=10)
+ >>> params = zstandard.ZstdCompressionParameters.from_level(5, threads=4)
+
+ Or you can define low-level compression settings directly:
+
+ >>> params = zstandard.ZstdCompressionParameters(window_log=12, enable_ldm=True)
+
+ Once a ``ZstdCompressionParameters`` instance is obtained, it can be used to
+ configure a compressor:
+
+ >>> cctx = zstandard.ZstdCompressor(compression_params=params)
+
+ Some of these are very low-level settings. It may help to consult the official
+ zstandard documentation for their behavior. Look for the ``ZSTD_p_*`` constants
+ in ``zstd.h`` (https://github.com/facebook/zstd/blob/dev/lib/zstd.h).
+ """
+
+ @staticmethod
+ def from_level(level, source_size=0, dict_size=0, **kwargs):
+ """Create compression parameters from a compression level.
+
+ :param level:
+ Integer compression level.
+ :param source_size:
+ Integer size in bytes of source to be compressed.
+ :param dict_size:
+ Integer size in bytes of compression dictionary to use.
+ :return:
+ :py:class:`ZstdCompressionParameters`
+ """
+ params = lib.ZSTD_getCParams(level, source_size, dict_size)
+
+ args = {
+ "window_log": "windowLog",
+ "chain_log": "chainLog",
+ "hash_log": "hashLog",
+ "search_log": "searchLog",
+ "min_match": "minMatch",
+ "target_length": "targetLength",
+ "strategy": "strategy",
+ }
+
+ for arg, attr in args.items():
+ if arg not in kwargs:
+ kwargs[arg] = getattr(params, attr)
+
+ return ZstdCompressionParameters(**kwargs)
+
+ def __init__(
+ self,
+ format=0,
+ compression_level=0,
+ window_log=0,
+ hash_log=0,
+ chain_log=0,
+ search_log=0,
+ min_match=0,
+ target_length=0,
+ strategy=-1,
+ write_content_size=1,
+ write_checksum=0,
+ write_dict_id=0,
+ job_size=0,
+ overlap_log=-1,
+ force_max_window=0,
+ enable_ldm=0,
+ ldm_hash_log=0,
+ ldm_min_match=0,
+ ldm_bucket_size_log=0,
+ ldm_hash_rate_log=-1,
+ threads=0,
+ ):
+
+ params = lib.ZSTD_createCCtxParams()
+ if params == ffi.NULL:
+ raise MemoryError()
+
+ params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
+
+ self._params = params
+
+ if threads < 0:
+ threads = _cpu_count()
+
+ # We need to set ZSTD_c_nbWorkers before ZSTD_c_jobSize and ZSTD_c_overlapLog
+ # because setting ZSTD_c_nbWorkers resets the other parameters.
+ _set_compression_parameter(params, lib.ZSTD_c_nbWorkers, threads)
+
+ _set_compression_parameter(params, lib.ZSTD_c_format, format)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_compressionLevel, compression_level
+ )
+ _set_compression_parameter(params, lib.ZSTD_c_windowLog, window_log)
+ _set_compression_parameter(params, lib.ZSTD_c_hashLog, hash_log)
+ _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log)
+ _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log)
+ _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_targetLength, target_length
+ )
+
+ if strategy == -1:
+ strategy = 0
+
+ _set_compression_parameter(params, lib.ZSTD_c_strategy, strategy)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_contentSizeFlag, write_content_size
+ )
+ _set_compression_parameter(
+ params, lib.ZSTD_c_checksumFlag, write_checksum
+ )
+ _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id)
+ _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size)
+
+ if overlap_log == -1:
+ overlap_log = 0
+
+ _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_forceMaxWindow, force_max_window
+ )
+ _set_compression_parameter(
+ params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm
+ )
+ _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_ldmMinMatch, ldm_min_match
+ )
+ _set_compression_parameter(
+ params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log
+ )
+
+ if ldm_hash_rate_log == -1:
+ ldm_hash_rate_log = 0
+
+ _set_compression_parameter(
+ params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log
+ )
+
+ @property
+ def format(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_format)
+
+ @property
+ def compression_level(self):
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_compressionLevel
+ )
+
+ @property
+ def window_log(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_windowLog)
+
+ @property
+ def hash_log(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_hashLog)
+
+ @property
+ def chain_log(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_chainLog)
+
+ @property
+ def search_log(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_searchLog)
+
+ @property
+ def min_match(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_minMatch)
+
+ @property
+ def target_length(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_targetLength)
+
+ @property
+ def strategy(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_strategy)
+
+ @property
+ def write_content_size(self):
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_contentSizeFlag
+ )
+
+ @property
+ def write_checksum(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_checksumFlag)
+
+ @property
+ def write_dict_id(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_dictIDFlag)
+
+ @property
+ def job_size(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_jobSize)
+
+ @property
+ def overlap_log(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_overlapLog)
+
+ @property
+ def force_max_window(self):
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_forceMaxWindow
+ )
+
+ @property
+ def enable_ldm(self):
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_enableLongDistanceMatching
+ )
+
+ @property
+ def ldm_hash_log(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashLog)
+
+ @property
+ def ldm_min_match(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_ldmMinMatch)
+
+ @property
+ def ldm_bucket_size_log(self):
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_ldmBucketSizeLog
+ )
+
+ @property
+ def ldm_hash_rate_log(self):
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_ldmHashRateLog
+ )
+
+ @property
+ def threads(self):
+ return _get_compression_parameter(self._params, lib.ZSTD_c_nbWorkers)
+
+ def estimated_compression_context_size(self):
+ """Estimated size in bytes needed to compress with these parameters."""
+ return lib.ZSTD_estimateCCtxSize_usingCCtxParams(self._params)
+
+
+def estimate_decompression_context_size():
+ """Estimate the memory size requirements for a decompressor instance.
+
+ :return:
+ Integer number of bytes.
+ """
+ return lib.ZSTD_estimateDCtxSize()
+
+
+def _set_compression_parameter(params, param, value):
+ zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "unable to set compression context parameter: %s"
+ % _zstd_error(zresult)
+ )
+
+
+def _get_compression_parameter(params, param):
+ result = ffi.new("int *")
+
+ zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "unable to get compression context parameter: %s"
+ % _zstd_error(zresult)
+ )
+
+ return result[0]
+
+
+class ZstdCompressionWriter(object):
+ """Writable compressing stream wrapper.
+
+ ``ZstdCompressionWriter`` is a write-only stream interface for writing
+ compressed data to another stream.
+
+ This type conforms to the ``io.RawIOBase`` interface and should be usable
+ by any type that operates against a *file-object* (``typing.BinaryIO``
+ in Python type hinting speak). Only methods that involve writing will do
+ useful things.
+
+ As data is written to this stream (e.g. via ``write()``), that data
+ is sent to the compressor. As compressed data becomes available from
+ the compressor, it is sent to the underlying stream by calling its
+ ``write()`` method.
+
+ Both ``write()`` and ``flush()`` return the number of bytes written to the
+ object's ``write()``. In many cases, small inputs do not accumulate enough
+ data to cause a write and ``write()`` will return ``0``.
+
+ Calling ``close()`` will mark the stream as closed and subsequent I/O
+ operations will raise ``ValueError`` (per the documented behavior of
+ ``io.RawIOBase``). ``close()`` will also call ``close()`` on the underlying
+ stream if such a method exists and the instance was constructed with
+ ``closefd=True``
+
+ Instances are obtained by calling :py:meth:`ZstdCompressor.stream_writer`.
+
+ Typically usage is as follows:
+
+ >>> cctx = zstandard.ZstdCompressor(level=10)
+ >>> compressor = cctx.stream_writer(fh)
+ >>> compressor.write(b"chunk 0\\n")
+ >>> compressor.write(b"chunk 1\\n")
+ >>> compressor.flush()
+ >>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\n`` at this point.
+ >>> # Receiver is also expecting more data in the zstd *frame*.
+ >>>
+ >>> compressor.write(b"chunk 2\\n")
+ >>> compressor.flush(zstandard.FLUSH_FRAME)
+ >>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\nchunk 2``.
+ >>> # Receiver is expecting no more data, as the zstd frame is closed.
+ >>> # Any future calls to ``write()`` at this point will construct a new
+ >>> # zstd frame.
+
+ Instances can be used as context managers. Exiting the context manager is
+ the equivalent of calling ``close()``, which is equivalent to calling
+ ``flush(zstandard.FLUSH_FRAME)``:
+
+ >>> cctx = zstandard.ZstdCompressor(level=10)
+ >>> with cctx.stream_writer(fh) as compressor:
+ ... compressor.write(b'chunk 0')
+ ... compressor.write(b'chunk 1')
+ ... ...
+
+ .. important::
+
+ If ``flush(FLUSH_FRAME)`` is not called, emitted data doesn't
+ constitute a full zstd *frame* and consumers of this data may complain
+ about malformed input. It is recommended to use instances as a context
+ manager to ensure *frames* are properly finished.
+
+ If the size of the data being fed to this streaming compressor is known,
+ you can declare it before compression begins:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> with cctx.stream_writer(fh, size=data_len) as compressor:
+ ... compressor.write(chunk0)
+ ... compressor.write(chunk1)
+ ... ...
+
+ Declaring the size of the source data allows compression parameters to
+ be tuned. And if ``write_content_size`` is used, it also results in the
+ content size being written into the frame header of the output data.
+
+ The size of chunks being ``write()`` to the destination can be specified:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> with cctx.stream_writer(fh, write_size=32768) as compressor:
+ ... ...
+
+ To see how much memory is being used by the streaming compressor:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> with cctx.stream_writer(fh) as compressor:
+ ... ...
+ ... byte_size = compressor.memory_size()
+
+ Thte total number of bytes written so far are exposed via ``tell()``:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> with cctx.stream_writer(fh) as compressor:
+ ... ...
+ ... total_written = compressor.tell()
+
+ ``stream_writer()`` accepts a ``write_return_read`` boolean argument to
+ control the return value of ``write()``. When ``False`` (the default),
+ ``write()`` returns the number of bytes that were ``write()``'en to the
+ underlying object. When ``True``, ``write()`` returns the number of bytes
+ read from the input that were subsequently written to the compressor.
+ ``True`` is the *proper* behavior for ``write()`` as specified by the
+ ``io.RawIOBase`` interface and will become the default value in a future
+ release.
+ """
+
+ def __init__(
+ self,
+ compressor,
+ writer,
+ source_size,
+ write_size,
+ write_return_read,
+ closefd=True,
+ ):
+ self._compressor = compressor
+ self._writer = writer
+ self._write_size = write_size
+ self._write_return_read = bool(write_return_read)
+ self._closefd = bool(closefd)
+ self._entered = False
+ self._closing = False
+ self._closed = False
+ self._bytes_compressed = 0
+
+ self._dst_buffer = ffi.new("char[]", write_size)
+ self._out_buffer = ffi.new("ZSTD_outBuffer *")
+ self._out_buffer.dst = self._dst_buffer
+ self._out_buffer.size = len(self._dst_buffer)
+ self._out_buffer.pos = 0
+
+ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
+
+ def __enter__(self):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if self._entered:
+ raise ZstdError("cannot __enter__ multiple times")
+
+ self._entered = True
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self._entered = False
+ self.close()
+ self._compressor = None
+
+ return False
+
+ def __iter__(self):
+ raise io.UnsupportedOperation()
+
+ def __next__(self):
+ raise io.UnsupportedOperation()
+
+ def memory_size(self):
+ return lib.ZSTD_sizeof_CCtx(self._compressor._cctx)
+
+ def fileno(self):
+ f = getattr(self._writer, "fileno", None)
+ if f:
+ return f()
+ else:
+ raise OSError("fileno not available on underlying writer")
+
+ def close(self):
+ if self._closed:
+ return
+
+ try:
+ self._closing = True
+ self.flush(FLUSH_FRAME)
+ finally:
+ self._closing = False
+ self._closed = True
+
+ # Call close() on underlying stream as well.
+ f = getattr(self._writer, "close", None)
+ if self._closefd and f:
+ f()
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def isatty(self):
+ return False
+
+ def readable(self):
+ return False
+
+ def readline(self, size=-1):
+ raise io.UnsupportedOperation()
+
+ def readlines(self, hint=-1):
+ raise io.UnsupportedOperation()
+
+ def seek(self, offset, whence=None):
+ raise io.UnsupportedOperation()
+
+ def seekable(self):
+ return False
+
+ def truncate(self, size=None):
+ raise io.UnsupportedOperation()
+
+ def writable(self):
+ return True
+
+ def writelines(self, lines):
+ raise NotImplementedError("writelines() is not yet implemented")
+
+ def read(self, size=-1):
+ raise io.UnsupportedOperation()
+
+ def readall(self):
+ raise io.UnsupportedOperation()
+
+ def readinto(self, b):
+ raise io.UnsupportedOperation()
+
+ def write(self, data):
+ """Send data to the compressor and possibly to the inner stream."""
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ total_write = 0
+
+ data_buffer = ffi.from_buffer(data)
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ in_buffer.src = data_buffer
+ in_buffer.size = len(data_buffer)
+ in_buffer.pos = 0
+
+ out_buffer = self._out_buffer
+ out_buffer.pos = 0
+
+ while in_buffer.pos < in_buffer.size:
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx,
+ out_buffer,
+ in_buffer,
+ lib.ZSTD_e_continue,
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ self._writer.write(
+ ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ )
+ total_write += out_buffer.pos
+ self._bytes_compressed += out_buffer.pos
+ out_buffer.pos = 0
+
+ if self._write_return_read:
+ return in_buffer.pos
+ else:
+ return total_write
+
+ def flush(self, flush_mode=FLUSH_BLOCK):
+ """Evict data from compressor's internal state and write it to inner stream.
+
+ Calling this method may result in 0 or more ``write()`` calls to the
+ inner stream.
+
+ This method will also call ``flush()`` on the inner stream, if such a
+ method exists.
+
+ :param flush_mode:
+ How to flush the zstd compressor.
+
+ ``zstandard.FLUSH_BLOCK`` will flush data already sent to the
+ compressor but not emitted to the inner stream. The stream is still
+ writable after calling this. This is the default behavior.
+
+ See documentation for other ``zstandard.FLUSH_*`` constants for more
+ flushing options.
+ :return:
+ Integer number of bytes written to the inner stream.
+ """
+
+ if flush_mode == FLUSH_BLOCK:
+ flush = lib.ZSTD_e_flush
+ elif flush_mode == FLUSH_FRAME:
+ flush = lib.ZSTD_e_end
+ else:
+ raise ValueError("unknown flush_mode: %r" % flush_mode)
+
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ total_write = 0
+
+ out_buffer = self._out_buffer
+ out_buffer.pos = 0
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ in_buffer.src = ffi.NULL
+ in_buffer.size = 0
+ in_buffer.pos = 0
+
+ while True:
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, out_buffer, in_buffer, flush
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ self._writer.write(
+ ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ )
+ total_write += out_buffer.pos
+ self._bytes_compressed += out_buffer.pos
+ out_buffer.pos = 0
+
+ if not zresult:
+ break
+
+ f = getattr(self._writer, "flush", None)
+ if f and not self._closing:
+ f()
+
+ return total_write
+
+ def tell(self):
+ return self._bytes_compressed
+
+
+class ZstdCompressionObj(object):
+ """A compressor conforming to the API in Python's standard library.
+
+ This type implements an API similar to compression types in Python's
+ standard library such as ``zlib.compressobj`` and ``bz2.BZ2Compressor``.
+ This enables existing code targeting the standard library API to swap
+ in this type to achieve zstd compression.
+
+ .. important::
+
+ The design of this API is not ideal for optimal performance.
+
+ The reason performance is not optimal is because the API is limited to
+ returning a single buffer holding compressed data. When compressing
+ data, we don't know how much data will be emitted. So in order to
+ capture all this data in a single buffer, we need to perform buffer
+ reallocations and/or extra memory copies. This can add significant
+ overhead depending on the size or nature of the compressed data how
+ much your application calls this type.
+
+ If performance is critical, consider an API like
+ :py:meth:`ZstdCompressor.stream_reader`,
+ :py:meth:`ZstdCompressor.stream_writer`,
+ :py:meth:`ZstdCompressor.chunker`, or
+ :py:meth:`ZstdCompressor.read_to_iter`, which result in less overhead
+ managing buffers.
+
+ Instances are obtained by calling :py:meth:`ZstdCompressor.compressobj`.
+
+ Here is how this API should be used:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> cobj = cctx.compressobj()
+ >>> data = cobj.compress(b"raw input 0")
+ >>> data = cobj.compress(b"raw input 1")
+ >>> data = cobj.flush()
+
+ Or to flush blocks:
+
+ >>> cctx.zstandard.ZstdCompressor()
+ >>> cobj = cctx.compressobj()
+ >>> data = cobj.compress(b"chunk in first block")
+ >>> data = cobj.flush(zstandard.COMPRESSOBJ_FLUSH_BLOCK)
+ >>> data = cobj.compress(b"chunk in second block")
+ >>> data = cobj.flush()
+
+ For best performance results, keep input chunks under 256KB. This avoids
+ extra allocations for a large output object.
+
+ It is possible to declare the input size of the data that will be fed
+ into the compressor:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> cobj = cctx.compressobj(size=6)
+ >>> data = cobj.compress(b"foobar")
+ >>> data = cobj.flush()
+ """
+
+ def compress(self, data):
+ """Send data to the compressor.
+
+ This method receives bytes to feed to the compressor and returns
+ bytes constituting zstd compressed data.
+
+ The zstd compressor accumulates bytes and the returned bytes may be
+ substantially smaller or larger than the size of the input data on
+ any given call. The returned value may be the empty byte string
+ (``b""``).
+
+ :param data:
+ Data to write to the compressor.
+ :return:
+ Compressed data.
+ """
+ if self._finished:
+ raise ZstdError("cannot call compress() after compressor finished")
+
+ data_buffer = ffi.from_buffer(data)
+ source = ffi.new("ZSTD_inBuffer *")
+ source.src = data_buffer
+ source.size = len(data_buffer)
+ source.pos = 0
+
+ chunks = []
+
+ while source.pos < len(data):
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, self._out, source, lib.ZSTD_e_continue
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
+
+ if self._out.pos:
+ chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
+ self._out.pos = 0
+
+ return b"".join(chunks)
+
+ def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
+ """Emit data accumulated in the compressor that hasn't been outputted yet.
+
+ The ``flush_mode`` argument controls how to end the stream.
+
+ ``zstandard.COMPRESSOBJ_FLUSH_FINISH`` (the default) ends the
+ compression stream and finishes a zstd frame. Once this type of flush
+ is performed, ``compress()`` and ``flush()`` can no longer be called.
+ This type of flush **must** be called to end the compression context. If
+ not called, the emitted data may be incomplete and may not be readable
+ by a decompressor.
+
+ ``zstandard.COMPRESSOBJ_FLUSH_BLOCK`` will flush a zstd block. This
+ ensures that all data fed to this instance will have been omitted and
+ can be decoded by a decompressor. Flushes of this type can be performed
+ multiple times. The next call to ``compress()`` will begin a new zstd
+ block.
+
+ :param flush_mode:
+ How to flush the zstd compressor.
+ :return:
+ Compressed data.
+ """
+ if flush_mode not in (
+ COMPRESSOBJ_FLUSH_FINISH,
+ COMPRESSOBJ_FLUSH_BLOCK,
+ ):
+ raise ValueError("flush mode not recognized")
+
+ if self._finished:
+ raise ZstdError("compressor object already finished")
+
+ if flush_mode == COMPRESSOBJ_FLUSH_BLOCK:
+ z_flush_mode = lib.ZSTD_e_flush
+ elif flush_mode == COMPRESSOBJ_FLUSH_FINISH:
+ z_flush_mode = lib.ZSTD_e_end
+ self._finished = True
+ else:
+ raise ZstdError("unhandled flush mode")
+
+ assert self._out.pos == 0
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ in_buffer.src = ffi.NULL
+ in_buffer.size = 0
+ in_buffer.pos = 0
+
+ chunks = []
+
+ while True:
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, self._out, in_buffer, z_flush_mode
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error ending compression stream: %s" % _zstd_error(zresult)
+ )
+
+ if self._out.pos:
+ chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
+ self._out.pos = 0
+
+ if not zresult:
+ break
+
+ return b"".join(chunks)
+
+
+class ZstdCompressionChunker(object):
+ """Compress data to uniformly sized chunks.
+
+ This type allows you to iteratively feed chunks of data into a compressor
+ and produce output chunks of uniform size.
+
+ ``compress()``, ``flush()``, and ``finish()`` all return an iterator of
+ ``bytes`` instances holding compressed data. The iterator may be empty.
+ Callers MUST iterate through all elements of the returned iterator before
+ performing another operation on the object or else the compressor's
+ internal state may become confused. This can result in an exception being
+ raised or malformed data being emitted.
+
+ All chunks emitted by ``compress()`` will have a length of the configured
+ chunk size.
+
+ ``flush()`` and ``finish()`` may return a final chunk smaller than
+ the configured chunk size.
+
+ Instances are obtained by calling :py:meth:`ZstdCompressor.chunker`.
+
+ Here is how the API should be used:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> chunker = cctx.chunker(chunk_size=32768)
+ >>>
+ >>> with open(path, 'rb') as fh:
+ ... while True:
+ ... in_chunk = fh.read(32768)
+ ... if not in_chunk:
+ ... break
+ ...
+ ... for out_chunk in chunker.compress(in_chunk):
+ ... # Do something with output chunk of size 32768.
+ ...
+ ... for out_chunk in chunker.finish():
+ ... # Do something with output chunks that finalize the zstd frame.
+
+ This compressor type is often a better alternative to
+ :py:class:`ZstdCompressor.compressobj` because it has better performance
+ properties.
+
+ ``compressobj()`` will emit output data as it is available. This results
+ in a *stream* of output chunks of varying sizes. The consistency of the
+ output chunk size with ``chunker()`` is more appropriate for many usages,
+ such as sending compressed data to a socket.
+
+ ``compressobj()`` may also perform extra memory reallocations in order
+ to dynamically adjust the sizes of the output chunks. Since ``chunker()``
+ output chunks are all the same size (except for flushed or final chunks),
+ there is less memory allocation/copying overhead.
+ """
+
+ def __init__(self, compressor, chunk_size):
+ self._compressor = compressor
+ self._out = ffi.new("ZSTD_outBuffer *")
+ self._dst_buffer = ffi.new("char[]", chunk_size)
+ self._out.dst = self._dst_buffer
+ self._out.size = chunk_size
+ self._out.pos = 0
+
+ self._in = ffi.new("ZSTD_inBuffer *")
+ self._in.src = ffi.NULL
+ self._in.size = 0
+ self._in.pos = 0
+ self._finished = False
+
+ def compress(self, data):
+ """Feed new input data into the compressor.
+
+ :param data:
+ Data to feed to compressor.
+ :return:
+ Iterator of ``bytes`` representing chunks of compressed data.
+ """
+ if self._finished:
+ raise ZstdError("cannot call compress() after compression finished")
+
+ if self._in.src != ffi.NULL:
+ raise ZstdError(
+ "cannot perform operation before consuming output "
+ "from previous operation"
+ )
+
+ data_buffer = ffi.from_buffer(data)
+
+ if not len(data_buffer):
+ return
+
+ self._in.src = data_buffer
+ self._in.size = len(data_buffer)
+ self._in.pos = 0
+
+ while self._in.pos < self._in.size:
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, self._out, self._in, lib.ZSTD_e_continue
+ )
+
+ if self._in.pos == self._in.size:
+ self._in.src = ffi.NULL
+ self._in.size = 0
+ self._in.pos = 0
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
+
+ if self._out.pos == self._out.size:
+ yield ffi.buffer(self._out.dst, self._out.pos)[:]
+ self._out.pos = 0
+
+ def flush(self):
+ """Flushes all data currently in the compressor.
+
+ :return:
+ Iterator of ``bytes`` of compressed data.
+ """
+ if self._finished:
+ raise ZstdError("cannot call flush() after compression finished")
+
+ if self._in.src != ffi.NULL:
+ raise ZstdError(
+ "cannot call flush() before consuming output from "
+ "previous operation"
+ )
+
+ while True:
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
+
+ if self._out.pos:
+ yield ffi.buffer(self._out.dst, self._out.pos)[:]
+ self._out.pos = 0
+
+ if not zresult:
+ return
+
+ def finish(self):
+ """Signals the end of input data.
+
+ No new data can be compressed after this method is called.
+
+ This method will flush buffered data and finish the zstd frame.
+
+ :return:
+ Iterator of ``bytes`` of compressed data.
+ """
+ if self._finished:
+ raise ZstdError("cannot call finish() after compression finished")
+
+ if self._in.src != ffi.NULL:
+ raise ZstdError(
+ "cannot call finish() before consuming output from "
+ "previous operation"
+ )
+
+ while True:
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
+
+ if self._out.pos:
+ yield ffi.buffer(self._out.dst, self._out.pos)[:]
+ self._out.pos = 0
+
+ if not zresult:
+ self._finished = True
+ return
+
+
+class ZstdCompressionReader(object):
+ """Readable compressing stream wrapper.
+
+ ``ZstdCompressionReader`` is a read-only stream interface for obtaining
+ compressed data from a source.
+
+ This type conforms to the ``io.RawIOBase`` interface and should be usable
+ by any type that operates against a *file-object* (``typing.BinaryIO``
+ in Python type hinting speak).
+
+ Instances are neither writable nor seekable (even if the underlying
+ source is seekable). ``readline()`` and ``readlines()`` are not implemented
+ because they don't make sense for compressed data. ``tell()`` returns the
+ number of compressed bytes emitted so far.
+
+ Instances are obtained by calling :py:meth:`ZstdCompressor.stream_reader`.
+
+ In this example, we open a file for reading and then wrap that file
+ handle with a stream from which compressed data can be ``read()``.
+
+ >>> with open(path, 'rb') as fh:
+ ... cctx = zstandard.ZstdCompressor()
+ ... reader = cctx.stream_reader(fh)
+ ... while True:
+ ... chunk = reader.read(16384)
+ ... if not chunk:
+ ... break
+ ...
+ ... # Do something with compressed chunk.
+
+ Instances can also be used as context managers:
+
+ >>> with open(path, 'rb') as fh:
+ ... cctx = zstandard.ZstdCompressor()
+ ... with cctx.stream_reader(fh) as reader:
+ ... while True:
+ ... chunk = reader.read(16384)
+ ... if not chunk:
+ ... break
+ ...
+ ... # Do something with compressed chunk.
+
+ When the context manager exits or ``close()`` is called, the stream is
+ closed, underlying resources are released, and future operations against
+ the compression stream will fail.
+
+ ``stream_reader()`` accepts a ``size`` argument specifying how large the
+ input stream is. This is used to adjust compression parameters so they are
+ tailored to the source size. e.g.
+
+ >>> with open(path, 'rb') as fh:
+ ... cctx = zstandard.ZstdCompressor()
+ ... with cctx.stream_reader(fh, size=os.stat(path).st_size) as reader:
+ ... ...
+
+ If the ``source`` is a stream, you can specify how large ``read()``
+ requests to that stream should be via the ``read_size`` argument.
+ It defaults to ``zstandard.COMPRESSION_RECOMMENDED_INPUT_SIZE``. e.g.
+
+ >>> with open(path, 'rb') as fh:
+ ... cctx = zstandard.ZstdCompressor()
+ ... # Will perform fh.read(8192) when obtaining data to feed into the
+ ... # compressor.
+ ... with cctx.stream_reader(fh, read_size=8192) as reader:
+ ... ...
+ """
+
+ def __init__(self, compressor, source, read_size, closefd=True):
+ self._compressor = compressor
+ self._source = source
+ self._read_size = read_size
+ self._closefd = closefd
+ self._entered = False
+ self._closed = False
+ self._bytes_compressed = 0
+ self._finished_input = False
+ self._finished_output = False
+
+ self._in_buffer = ffi.new("ZSTD_inBuffer *")
+ # Holds a ref so backing bytes in self._in_buffer stay alive.
+ self._source_buffer = None
+
+ def __enter__(self):
+ if self._entered:
+ raise ValueError("cannot __enter__ multiple times")
+
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ self._entered = True
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self._entered = False
+ self._compressor = None
+ self.close()
+ self._source = None
+
+ return False
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ return False
+
+ def seekable(self):
+ return False
+
+ def readline(self):
+ raise io.UnsupportedOperation()
+
+ def readlines(self):
+ raise io.UnsupportedOperation()
+
+ def write(self, data):
+ raise OSError("stream is not writable")
+
+ def writelines(self, ignored):
+ raise OSError("stream is not writable")
+
+ def isatty(self):
+ return False
+
+ def flush(self):
+ return None
+
+ def close(self):
+ if self._closed:
+ return
+
+ self._closed = True
+
+ f = getattr(self._source, "close", None)
+ if self._closefd and f:
+ f()
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def tell(self):
+ return self._bytes_compressed
+
+ def readall(self):
+ chunks = []
+
+ while True:
+ chunk = self.read(1048576)
+ if not chunk:
+ break
+
+ chunks.append(chunk)
+
+ return b"".join(chunks)
+
+ def __iter__(self):
+ raise io.UnsupportedOperation()
+
+ def __next__(self):
+ raise io.UnsupportedOperation()
+
+ next = __next__
+
+ def _read_input(self):
+ if self._finished_input:
+ return
+
+ if hasattr(self._source, "read"):
+ data = self._source.read(self._read_size)
+
+ if not data:
+ self._finished_input = True
+ return
+
+ self._source_buffer = ffi.from_buffer(data)
+ self._in_buffer.src = self._source_buffer
+ self._in_buffer.size = len(self._source_buffer)
+ self._in_buffer.pos = 0
+ else:
+ self._source_buffer = ffi.from_buffer(self._source)
+ self._in_buffer.src = self._source_buffer
+ self._in_buffer.size = len(self._source_buffer)
+ self._in_buffer.pos = 0
+
+ def _compress_into_buffer(self, out_buffer):
+ if self._in_buffer.pos >= self._in_buffer.size:
+ return
+
+ old_pos = out_buffer.pos
+
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx,
+ out_buffer,
+ self._in_buffer,
+ lib.ZSTD_e_continue,
+ )
+
+ self._bytes_compressed += out_buffer.pos - old_pos
+
+ if self._in_buffer.pos == self._in_buffer.size:
+ self._in_buffer.src = ffi.NULL
+ self._in_buffer.pos = 0
+ self._in_buffer.size = 0
+ self._source_buffer = None
+
+ if not hasattr(self._source, "read"):
+ self._finished_input = True
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError("zstd compress error: %s", _zstd_error(zresult))
+
+ return out_buffer.pos and out_buffer.pos == out_buffer.size
+
+ def read(self, size=-1):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if size < -1:
+ raise ValueError("cannot read negative amounts less than -1")
+
+ if size == -1:
+ return self.readall()
+
+ if self._finished_output or size == 0:
+ return b""
+
+ # Need a dedicated ref to dest buffer otherwise it gets collected.
+ dst_buffer = ffi.new("char[]", size)
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = dst_buffer
+ out_buffer.size = size
+ out_buffer.pos = 0
+
+ if self._compress_into_buffer(out_buffer):
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ while not self._finished_input:
+ self._read_input()
+
+ if self._compress_into_buffer(out_buffer):
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ # EOF
+ old_pos = out_buffer.pos
+
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+ )
+
+ self._bytes_compressed += out_buffer.pos - old_pos
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error ending compression stream: %s", _zstd_error(zresult)
+ )
+
+ if zresult == 0:
+ self._finished_output = True
+
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ def read1(self, size=-1):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if size < -1:
+ raise ValueError("cannot read negative amounts less than -1")
+
+ if self._finished_output or size == 0:
+ return b""
+
+ # -1 returns arbitrary number of bytes.
+ if size == -1:
+ size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+
+ dst_buffer = ffi.new("char[]", size)
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = dst_buffer
+ out_buffer.size = size
+ out_buffer.pos = 0
+
+ # read1() dictates that we can perform at most 1 call to the
+ # underlying stream to get input. However, we can't satisfy this
+ # restriction with compression because not all input generates output.
+ # It is possible to perform a block flush in order to ensure output.
+ # But this may not be desirable behavior. So we allow multiple read()
+ # to the underlying stream. But unlike read(), we stop once we have
+ # any output.
+
+ self._compress_into_buffer(out_buffer)
+ if out_buffer.pos:
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ while not self._finished_input:
+ self._read_input()
+
+ # If we've filled the output buffer, return immediately.
+ if self._compress_into_buffer(out_buffer):
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ # If we've populated the output buffer and we're not at EOF,
+ # also return, as we've satisfied the read1() limits.
+ if out_buffer.pos and not self._finished_input:
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ # Else if we're at EOS and we have room left in the buffer,
+ # fall through to below and try to add more data to the output.
+
+ # EOF.
+ old_pos = out_buffer.pos
+
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+ )
+
+ self._bytes_compressed += out_buffer.pos - old_pos
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error ending compression stream: %s" % _zstd_error(zresult)
+ )
+
+ if zresult == 0:
+ self._finished_output = True
+
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ def readinto(self, b):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if self._finished_output:
+ return 0
+
+ # TODO use writable=True once we require CFFI >= 1.12.
+ dest_buffer = ffi.from_buffer(b)
+ ffi.memmove(b, b"", 0)
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = dest_buffer
+ out_buffer.size = len(dest_buffer)
+ out_buffer.pos = 0
+
+ if self._compress_into_buffer(out_buffer):
+ return out_buffer.pos
+
+ while not self._finished_input:
+ self._read_input()
+ if self._compress_into_buffer(out_buffer):
+ return out_buffer.pos
+
+ # EOF.
+ old_pos = out_buffer.pos
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+ )
+
+ self._bytes_compressed += out_buffer.pos - old_pos
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error ending compression stream: %s", _zstd_error(zresult)
+ )
+
+ if zresult == 0:
+ self._finished_output = True
+
+ return out_buffer.pos
+
+ def readinto1(self, b):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if self._finished_output:
+ return 0
+
+ # TODO use writable=True once we require CFFI >= 1.12.
+ dest_buffer = ffi.from_buffer(b)
+ ffi.memmove(b, b"", 0)
+
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = dest_buffer
+ out_buffer.size = len(dest_buffer)
+ out_buffer.pos = 0
+
+ self._compress_into_buffer(out_buffer)
+ if out_buffer.pos:
+ return out_buffer.pos
+
+ while not self._finished_input:
+ self._read_input()
+
+ if self._compress_into_buffer(out_buffer):
+ return out_buffer.pos
+
+ if out_buffer.pos and not self._finished_input:
+ return out_buffer.pos
+
+ # EOF.
+ old_pos = out_buffer.pos
+
+ zresult = lib.ZSTD_compressStream2(
+ self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+ )
+
+ self._bytes_compressed += out_buffer.pos - old_pos
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error ending compression stream: %s" % _zstd_error(zresult)
+ )
+
+ if zresult == 0:
+ self._finished_output = True
+
+ return out_buffer.pos
+
+
+class ZstdCompressor(object):
+ """
+ Create an object used to perform Zstandard compression.
+
+ Each instance is essentially a wrapper around a ``ZSTD_CCtx`` from
+ zstd's C API.
+
+ An instance can compress data various ways. Instances can be used
+ multiple times. Each compression operation will use the compression
+ parameters defined at construction time.
+
+ .. note:
+
+ When using a compression dictionary and multiple compression
+ operations are performed, the ``ZstdCompressionParameters`` derived
+ from an integer compression ``level`` and the first compressed data's
+ size will be reused for all subsequent operations. This may not be
+ desirable if source data sizes vary significantly.
+
+ ``compression_params`` is mutually exclusive with ``level``,
+ ``write_checksum``, ``write_content_size``, ``write_dict_id``, and
+ ``threads``.
+
+ Assume that each ``ZstdCompressor`` instance can only handle a single
+ logical compression operation at the same time. i.e. if you call a method
+ like ``stream_reader()`` to obtain multiple objects derived from the same
+ ``ZstdCompressor`` instance and attempt to use them simultaneously, errors
+ will likely occur.
+
+ If you need to perform multiple logical compression operations and you
+ can't guarantee those operations are temporally non-overlapping, you need
+ to obtain multiple ``ZstdCompressor`` instances.
+
+ Unless specified otherwise, assume that no two methods of
+ ``ZstdCompressor`` instances can be called from multiple Python
+ threads simultaneously. In other words, assume instances are not thread safe
+ unless stated otherwise.
+
+ :param level:
+ Integer compression level. Valid values are all negative integers
+ through 22. Lower values generally yield faster operations with lower
+ compression ratios. Higher values are generally slower but compress
+ better. The default is 3, which is what the ``zstd`` CLI uses. Negative
+ levels effectively engage ``--fast`` mode from the ``zstd`` CLI.
+ :param dict_data:
+ A ``ZstdCompressionDict`` to be used to compress with dictionary
+ data.
+ :param compression_params:
+ A ``ZstdCompressionParameters`` instance defining low-level compression
+ parameters. If defined, this will overwrite the ``level`` argument.
+ :param write_checksum:
+ If True, a 4 byte content checksum will be written with the compressed
+ data, allowing the decompressor to perform content verification.
+ :param write_content_size:
+ If True (the default), the decompressed content size will be included
+ in the header of the compressed data. This data will only be written if
+ the compressor knows the size of the input data.
+ :param write_dict_id:
+ Determines whether the dictionary ID will be written into the compressed
+ data. Defaults to True. Only adds content to the compressed data if
+ a dictionary is being used.
+ :param threads:
+ Number of threads to use to compress data concurrently. When set,
+ compression operations are performed on multiple threads. The default
+ value (0) disables multi-threaded compression. A value of ``-1`` means
+ to set the number of threads to the number of detected logical CPUs.
+ """
+
+ def __init__(
+ self,
+ level=3,
+ dict_data=None,
+ compression_params=None,
+ write_checksum=None,
+ write_content_size=None,
+ write_dict_id=None,
+ threads=0,
+ ):
+ if level > lib.ZSTD_maxCLevel():
+ raise ValueError(
+ "level must be less than %d" % lib.ZSTD_maxCLevel()
+ )
+
+ if threads < 0:
+ threads = _cpu_count()
+
+ if compression_params and write_checksum is not None:
+ raise ValueError(
+ "cannot define compression_params and " "write_checksum"
+ )
+
+ if compression_params and write_content_size is not None:
+ raise ValueError(
+ "cannot define compression_params and " "write_content_size"
+ )
+
+ if compression_params and write_dict_id is not None:
+ raise ValueError(
+ "cannot define compression_params and " "write_dict_id"
+ )
+
+ if compression_params and threads:
+ raise ValueError("cannot define compression_params and threads")
+
+ if compression_params:
+ self._params = _make_cctx_params(compression_params)
+ else:
+ if write_dict_id is None:
+ write_dict_id = True
+
+ params = lib.ZSTD_createCCtxParams()
+ if params == ffi.NULL:
+ raise MemoryError()
+
+ self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
+
+ _set_compression_parameter(
+ self._params, lib.ZSTD_c_compressionLevel, level
+ )
+
+ _set_compression_parameter(
+ self._params,
+ lib.ZSTD_c_contentSizeFlag,
+ write_content_size if write_content_size is not None else 1,
+ )
+
+ _set_compression_parameter(
+ self._params,
+ lib.ZSTD_c_checksumFlag,
+ 1 if write_checksum else 0,
+ )
+
+ _set_compression_parameter(
+ self._params, lib.ZSTD_c_dictIDFlag, 1 if write_dict_id else 0
+ )
+
+ if threads:
+ _set_compression_parameter(
+ self._params, lib.ZSTD_c_nbWorkers, threads
+ )
+
+ cctx = lib.ZSTD_createCCtx()
+ if cctx == ffi.NULL:
+ raise MemoryError()
+
+ self._cctx = cctx
+ self._dict_data = dict_data
+
+ # We defer setting up garbage collection until after calling
+ # _setup_cctx() to ensure the memory size estimate is more accurate.
+ try:
+ self._setup_cctx()
+ finally:
+ self._cctx = ffi.gc(
+ cctx, lib.ZSTD_freeCCtx, size=lib.ZSTD_sizeof_CCtx(cctx)
+ )
+
+ def _setup_cctx(self):
+ zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(
+ self._cctx, self._params
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "could not set compression parameters: %s"
+ % _zstd_error(zresult)
+ )
+
+ dict_data = self._dict_data
+
+ if dict_data:
+ if dict_data._cdict:
+ zresult = lib.ZSTD_CCtx_refCDict(self._cctx, dict_data._cdict)
+ else:
+ zresult = lib.ZSTD_CCtx_loadDictionary_advanced(
+ self._cctx,
+ dict_data.as_bytes(),
+ len(dict_data),
+ lib.ZSTD_dlm_byRef,
+ dict_data._dict_type,
+ )
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "could not load compression dictionary: %s"
+ % _zstd_error(zresult)
+ )
+
+ def memory_size(self):
+ """Obtain the memory usage of this compressor, in bytes.
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> memory = cctx.memory_size()
+ """
+ return lib.ZSTD_sizeof_CCtx(self._cctx)
+
+ def compress(self, data):
+ """
+ Compress data in a single operation.
+
+ This is the simplest mechanism to perform compression: simply pass in a
+ value and get a compressed value back. It is almost the most prone to
+ abuse.
+
+ The input and output values must fit in memory, so passing in very large
+ values can result in excessive memory usage. For this reason, one of the
+ streaming based APIs is preferred for larger values.
+
+ :param data:
+ Source data to compress
+ :return:
+ Compressed data
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> compressed = cctx.compress(b"data to compress")
+ """
+ lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+ data_buffer = ffi.from_buffer(data)
+
+ dest_size = lib.ZSTD_compressBound(len(data_buffer))
+ out = new_nonzero("char[]", dest_size)
+
+ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer))
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
+
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+
+ out_buffer.dst = out
+ out_buffer.size = dest_size
+ out_buffer.pos = 0
+
+ in_buffer.src = data_buffer
+ in_buffer.size = len(data_buffer)
+ in_buffer.pos = 0
+
+ zresult = lib.ZSTD_compressStream2(
+ self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+ )
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError("cannot compress: %s" % _zstd_error(zresult))
+ elif zresult:
+ raise ZstdError("unexpected partial frame flush")
+
+ return ffi.buffer(out, out_buffer.pos)[:]
+
+ def compressobj(self, size=-1):
+ """
+ Obtain a compressor exposing the Python standard library compression API.
+
+ See :py:class:`ZstdCompressionObj` for the full documentation.
+
+ :param size:
+ Size in bytes of data that will be compressed.
+ :return:
+ :py:class:`ZstdCompressionObj`
+ """
+ lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+ if size < 0:
+ size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
+
+ cobj = ZstdCompressionObj()
+ cobj._out = ffi.new("ZSTD_outBuffer *")
+ cobj._dst_buffer = ffi.new(
+ "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ )
+ cobj._out.dst = cobj._dst_buffer
+ cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ cobj._out.pos = 0
+ cobj._compressor = self
+ cobj._finished = False
+
+ return cobj
+
+ def chunker(self, size=-1, chunk_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+ """
+ Create an object for iterative compressing to same-sized chunks.
+
+ This API is similar to :py:meth:`ZstdCompressor.compressobj` but has
+ better performance properties.
+
+ :param size:
+ Size in bytes of data that will be compressed.
+ :param chunk_size:
+ Size of compressed chunks.
+ :return:
+ :py:class:`ZstdCompressionChunker`
+ """
+ lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+ if size < 0:
+ size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
+
+ return ZstdCompressionChunker(self, chunk_size=chunk_size)
+
+ def copy_stream(
+ self,
+ ifh,
+ ofh,
+ size=-1,
+ read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+ write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+ ):
+ """
+ Copy data between 2 streams while compressing it.
+
+ Data will be read from ``ifh``, compressed, and written to ``ofh``.
+ ``ifh`` must have a ``read(size)`` method. ``ofh`` must have a
+ ``write(data)``
+ method.
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh:
+ ... cctx.copy_stream(ifh, ofh)
+
+ It is also possible to declare the size of the source stream:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> cctx.copy_stream(ifh, ofh, size=len_of_input)
+
+ You can also specify how large the chunks that are ``read()``
+ and ``write()`` from and to the streams:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> cctx.copy_stream(ifh, ofh, read_size=32768, write_size=16384)
+
+ The stream copier returns a 2-tuple of bytes read and written:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> read_count, write_count = cctx.copy_stream(ifh, ofh)
+
+ :param ifh:
+ Source stream to read from
+ :param ofh:
+ Destination stream to write to
+ :param size:
+ Size in bytes of the source stream. If defined, compression
+ parameters will be tuned for this size.
+ :param read_size:
+ Chunk sizes that source stream should be ``read()`` from.
+ :param write_size:
+ Chunk sizes that destination stream should be ``write()`` to.
+ :return:
+ 2-tuple of ints of bytes read and written, respectively.
+ """
+
+ if not hasattr(ifh, "read"):
+ raise ValueError("first argument must have a read() method")
+ if not hasattr(ofh, "write"):
+ raise ValueError("second argument must have a write() method")
+
+ lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+ if size < 0:
+ size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+
+ dst_buffer = ffi.new("char[]", write_size)
+ out_buffer.dst = dst_buffer
+ out_buffer.size = write_size
+ out_buffer.pos = 0
+
+ total_read, total_write = 0, 0
+
+ while True:
+ data = ifh.read(read_size)
+ if not data:
+ break
+
+ data_buffer = ffi.from_buffer(data)
+ total_read += len(data_buffer)
+ in_buffer.src = data_buffer
+ in_buffer.size = len(data_buffer)
+ in_buffer.pos = 0
+
+ while in_buffer.pos < in_buffer.size:
+ zresult = lib.ZSTD_compressStream2(
+ self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+ total_write += out_buffer.pos
+ out_buffer.pos = 0
+
+ # We've finished reading. Flush the compressor.
+ while True:
+ zresult = lib.ZSTD_compressStream2(
+ self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error ending compression stream: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+ total_write += out_buffer.pos
+ out_buffer.pos = 0
+
+ if zresult == 0:
+ break
+
+ return total_read, total_write
+
+ def stream_reader(
+ self,
+ source,
+ size=-1,
+ read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+ closefd=True,
+ ):
+ """
+ Wrap a readable source with a stream that can read compressed data.
+
+ This will produce an object conforming to the ``io.RawIOBase``
+ interface which can be ``read()`` from to retrieve compressed data
+ from a source.
+
+ The source object can be any object with a ``read(size)`` method
+ or an object that conforms to the buffer protocol.
+
+ See :py:class:`ZstdCompressionReader` for type documentation and usage
+ examples.
+
+ :param source:
+ Object to read source data from
+ :param size:
+ Size in bytes of source object.
+ :param read_size:
+ How many bytes to request when ``read()``'ing from the source.
+ :param closefd:
+ Whether to close the source stream when the returned stream is
+ closed.
+ :return:
+ :py:class:`ZstdCompressionReader`
+ """
+ lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+ try:
+ size = len(source)
+ except Exception:
+ pass
+
+ if size < 0:
+ size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
+
+ return ZstdCompressionReader(self, source, read_size, closefd=closefd)
+
+ def stream_writer(
+ self,
+ writer,
+ size=-1,
+ write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+ write_return_read=True,
+ closefd=True,
+ ):
+ """
+ Create a stream that will write compressed data into another stream.
+
+ The argument to ``stream_writer()`` must have a ``write(data)`` method.
+ As compressed data is available, ``write()`` will be called with the
+ compressed data as its argument. Many common Python types implement
+ ``write()``, including open file handles and ``io.BytesIO``.
+
+ See :py:class:`ZstdCompressionWriter` for more documentation, including
+ usage examples.
+
+ :param writer:
+ Stream to write compressed data to.
+ :param size:
+ Size in bytes of data to be compressed. If set, it will be used
+ to influence compression parameter tuning and could result in the
+ size being written into the header of the compressed data.
+ :param write_size:
+ How much data to ``write()`` to ``writer`` at a time.
+ :param write_return_read:
+ Whether ``write()`` should return the number of bytes that were
+ consumed from the input.
+ :param closefd:
+ Whether to ``close`` the ``writer`` when this stream is closed.
+ :return:
+ :py:class:`ZstdCompressionWriter`
+ """
+ if not hasattr(writer, "write"):
+ raise ValueError("must pass an object with a write() method")
+
+ lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+ if size < 0:
+ size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+ return ZstdCompressionWriter(
+ self, writer, size, write_size, write_return_read, closefd=closefd
+ )
+
+ def read_to_iter(
+ self,
+ reader,
+ size=-1,
+ read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+ write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+ ):
+ """
+ Read uncompressed data from a reader and return an iterator
+
+ Returns an iterator of compressed data produced from reading from
+ ``reader``.
+
+ This method provides a mechanism to stream compressed data out of a
+ source as an iterator of data chunks.
+
+ Uncompressed data will be obtained from ``reader`` by calling the
+ ``read(size)`` method of it or by reading a slice (if ``reader``
+ conforms to the *buffer protocol*). The source data will be streamed
+ into a compressor. As compressed data is available, it will be exposed
+ to the iterator.
+
+ Data is read from the source in chunks of ``read_size``. Compressed
+ chunks are at most ``write_size`` bytes. Both values default to the
+ zstd input and and output defaults, respectively.
+
+ If reading from the source via ``read()``, ``read()`` will be called
+ until it raises or returns an empty bytes (``b""``). It is perfectly
+ valid for the source to deliver fewer bytes than were what requested
+ by ``read(size)``.
+
+ The caller is partially in control of how fast data is fed into the
+ compressor by how it consumes the returned iterator. The compressor
+ will not consume from the reader unless the caller consumes from the
+ iterator.
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> for chunk in cctx.read_to_iter(fh):
+ ... # Do something with emitted data.
+
+ ``read_to_iter()`` accepts a ``size`` argument declaring the size of
+ the input stream:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> for chunk in cctx.read_to_iter(fh, size=some_int):
+ >>> pass
+
+ You can also control the size that data is ``read()`` from the source
+ and the ideal size of output chunks:
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> for chunk in cctx.read_to_iter(fh, read_size=16384, write_size=8192):
+ >>> pass
+
+ ``read_to_iter()`` does not give direct control over the sizes of chunks
+ fed into the compressor. Instead, chunk sizes will be whatever the object
+ being read from delivers. These will often be of a uniform size.
+
+ :param reader:
+ Stream providing data to be compressed.
+ :param size:
+ Size in bytes of input data.
+ :param read_size:
+ Controls how many bytes are ``read()`` from the source.
+ :param write_size:
+ Controls the output size of emitted chunks.
+ :return:
+ Iterator of ``bytes``.
+ """
+
+ if hasattr(reader, "read"):
+ have_read = True
+ elif hasattr(reader, "__getitem__"):
+ have_read = False
+ buffer_offset = 0
+ size = len(reader)
+ else:
+ raise ValueError(
+ "must pass an object with a read() method or "
+ "conforms to buffer protocol"
+ )
+
+ lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+ if size < 0:
+ size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+
+ in_buffer.src = ffi.NULL
+ in_buffer.size = 0
+ in_buffer.pos = 0
+
+ dst_buffer = ffi.new("char[]", write_size)
+ out_buffer.dst = dst_buffer
+ out_buffer.size = write_size
+ out_buffer.pos = 0
+
+ while True:
+ # We should never have output data sitting around after a previous
+ # iteration.
+ assert out_buffer.pos == 0
+
+ # Collect input data.
+ if have_read:
+ read_result = reader.read(read_size)
+ else:
+ remaining = len(reader) - buffer_offset
+ slice_size = min(remaining, read_size)
+ read_result = reader[buffer_offset : buffer_offset + slice_size]
+ buffer_offset += slice_size
+
+ # No new input data. Break out of the read loop.
+ if not read_result:
+ break
+
+ # Feed all read data into the compressor and emit output until
+ # exhausted.
+ read_buffer = ffi.from_buffer(read_result)
+ in_buffer.src = read_buffer
+ in_buffer.size = len(read_buffer)
+ in_buffer.pos = 0
+
+ while in_buffer.pos < in_buffer.size:
+ zresult = lib.ZSTD_compressStream2(
+ self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ out_buffer.pos = 0
+ yield data
+
+ assert out_buffer.pos == 0
+
+ # And repeat the loop to collect more data.
+ continue
+
+ # If we get here, input is exhausted. End the stream and emit what
+ # remains.
+ while True:
+ assert out_buffer.pos == 0
+ zresult = lib.ZSTD_compressStream2(
+ self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "error ending compression stream: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ out_buffer.pos = 0
+ yield data
+
+ if zresult == 0:
+ break
+
+ def multi_compress_to_buffer(self, data, threads=-1):
+ """
+ Compress multiple pieces of data as a single function call.
+
+ (Experimental. Not yet supported by CFFI backend.)
+
+ This function is optimized to perform multiple compression operations
+ as as possible with as little overhead as possible.
+
+ Data to be compressed can be passed as a ``BufferWithSegmentsCollection``,
+ a ``BufferWithSegments``, or a list containing byte like objects. Each
+ element of the container will be compressed individually using the
+ configured parameters on the ``ZstdCompressor`` instance.
+
+ The ``threads`` argument controls how many threads to use for
+ compression. The default is ``0`` which means to use a single thread.
+ Negative values use the number of logical CPUs in the machine.
+
+ The function returns a ``BufferWithSegmentsCollection``. This type
+ represents N discrete memory allocations, each holding 1 or more
+ compressed frames.
+
+ Output data is written to shared memory buffers. This means that unlike
+ regular Python objects, a reference to *any* object within the collection
+ keeps the shared buffer and therefore memory backing it alive. This can
+ have undesirable effects on process memory usage.
+
+ The API and behavior of this function is experimental and will likely
+ change. Known deficiencies include:
+
+ * If asked to use multiple threads, it will always spawn that many
+ threads, even if the input is too small to use them. It should
+ automatically lower the thread count when the extra threads would
+ just add overhead.
+ * The buffer allocation strategy is fixed. There is room to make it
+ dynamic, perhaps even to allow one output buffer per input,
+ facilitating a variation of the API to return a list without the
+ adverse effects of shared memory buffers.
+
+ :param data:
+ Source to read discrete pieces of data to compress.
+
+ Can be a ``BufferWithSegmentsCollection``, a ``BufferWithSegments``,
+ or a ``list[bytes]``.
+ :return:
+ BufferWithSegmentsCollection holding compressed data.
+ """
+ raise NotImplementedError()
+
+ def frame_progression(self):
+ """
+ Return information on how much work the compressor has done.
+
+ Returns a 3-tuple of (ingested, consumed, produced).
+
+ >>> cctx = zstandard.ZstdCompressor()
+ >>> (ingested, consumed, produced) = cctx.frame_progression()
+ """
+ progression = lib.ZSTD_getFrameProgression(self._cctx)
+
+ return progression.ingested, progression.consumed, progression.produced
+
+
+class FrameParameters(object):
+ """Information about a zstd frame.
+
+ Instances have the following attributes:
+
+ ``content_size``
+ Integer size of original, uncompressed content. This will be ``0`` if the
+ original content size isn't written to the frame (controlled with the
+ ``write_content_size`` argument to ``ZstdCompressor``) or if the input
+ content size was ``0``.
+
+ ``window_size``
+ Integer size of maximum back-reference distance in compressed data.
+
+ ``dict_id``
+ Integer of dictionary ID used for compression. ``0`` if no dictionary
+ ID was used or if the dictionary ID was ``0``.
+
+ ``has_checksum``
+ Bool indicating whether a 4 byte content checksum is stored at the end
+ of the frame.
+ """
+
+ def __init__(self, fparams):
+ self.content_size = fparams.frameContentSize
+ self.window_size = fparams.windowSize
+ self.dict_id = fparams.dictID
+ self.has_checksum = bool(fparams.checksumFlag)
+
+
+def frame_content_size(data):
+ """Obtain the decompressed size of a frame.
+
+ The returned value is usually accurate. But strictly speaking it should
+ not be trusted.
+
+ :return:
+ ``-1`` if size unknown and a non-negative integer otherwise.
+ """
+ data_buffer = ffi.from_buffer(data)
+
+ size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer))
+
+ if size == lib.ZSTD_CONTENTSIZE_ERROR:
+ raise ZstdError("error when determining content size")
+ elif size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
+ return -1
+ else:
+ return size
+
+
+def frame_header_size(data):
+ """Obtain the size of a frame header.
+
+ :return:
+ Integer size in bytes.
+ """
+ data_buffer = ffi.from_buffer(data)
+
+ zresult = lib.ZSTD_frameHeaderSize(data_buffer, len(data_buffer))
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "could not determine frame header size: %s" % _zstd_error(zresult)
+ )
+
+ return zresult
+
+
+def get_frame_parameters(data):
+ """
+ Parse a zstd frame header into frame parameters.
+
+ Depending on which fields are present in the frame and their values, the
+ length of the frame parameters varies. If insufficient bytes are passed
+ in to fully parse the frame parameters, ``ZstdError`` is raised. To ensure
+ frame parameters can be parsed, pass in at least 18 bytes.
+
+ :param data:
+ Data from which to read frame parameters.
+ :return:
+ :py:class:`FrameParameters`
+ """
+ params = ffi.new("ZSTD_frameHeader *")
+
+ data_buffer = ffi.from_buffer(data)
+ zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer))
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "cannot get frame parameters: %s" % _zstd_error(zresult)
+ )
+
+ if zresult:
+ raise ZstdError(
+ "not enough data for frame parameters; need %d bytes" % zresult
+ )
+
+ return FrameParameters(params[0])
+
+
+class ZstdCompressionDict(object):
+ """Represents a computed compression dictionary.
+
+ Instances are obtained by calling :py:func:`train_dictionary` or by
+ passing bytes obtained from another source into the constructor.
+
+ Instances can be constructed from bytes:
+
+ >>> dict_data = zstandard.ZstdCompressionDict(data)
+
+ It is possible to construct a dictionary from *any* data. If the data
+ doesn't begin with a magic header, it will be treated as a *prefix*
+ dictionary. *Prefix* dictionaries allow compression operations to
+ reference raw data within the dictionary.
+
+ It is possible to force the use of *prefix* dictionaries or to require
+ a dictionary header:
+
+ >>> dict_data = zstandard.ZstdCompressionDict(data, dict_type=zstandard.DICT_TYPE_RAWCONTENT)
+ >>> dict_data = zstandard.ZstdCompressionDict(data, dict_type=zstandard.DICT_TYPE_FULLDICT)
+
+ You can see how many bytes are in the dictionary by calling ``len()``:
+
+ >>> dict_data = zstandard.train_dictionary(size, samples)
+ >>> dict_size = len(dict_data) # will not be larger than ``size``
+
+ Once you have a dictionary, you can pass it to the objects performing
+ compression and decompression:
+
+ >>> dict_data = zstandard.train_dictionary(131072, samples)
+ >>> cctx = zstandard.ZstdCompressor(dict_data=dict_data)
+ >>> for source_data in input_data:
+ ... compressed = cctx.compress(source_data)
+ ... # Do something with compressed data.
+ ...
+ >>> dctx = zstandard.ZstdDecompressor(dict_data=dict_data)
+ >>> for compressed_data in input_data:
+ ... buffer = io.BytesIO()
+ ... with dctx.stream_writer(buffer) as decompressor:
+ ... decompressor.write(compressed_data)
+ ... # Do something with raw data in ``buffer``.
+
+ Dictionaries have unique integer IDs. You can retrieve this ID via:
+
+ >>> dict_id = zstandard.dictionary_id(dict_data)
+
+ You can obtain the raw data in the dict (useful for persisting and constructing
+ a ``ZstdCompressionDict`` later) via ``as_bytes()``:
+
+ >>> dict_data = zstandard.train_dictionary(size, samples)
+ >>> raw_data = dict_data.as_bytes()
+
+ By default, when a ``ZstdCompressionDict`` is *attached* to a
+ ``ZstdCompressor``, each ``ZstdCompressor`` performs work to prepare the
+ dictionary for use. This is fine if only 1 compression operation is being
+ performed or if the ``ZstdCompressor`` is being reused for multiple operations.
+ But if multiple ``ZstdCompressor`` instances are being used with the dictionary,
+ this can add overhead.
+
+ It is possible to *precompute* the dictionary so it can readily be consumed
+ by multiple ``ZstdCompressor`` instances:
+
+ >>> d = zstandard.ZstdCompressionDict(data)
+ >>> # Precompute for compression level 3.
+ >>> d.precompute_compress(level=3)
+ >>> # Precompute with specific compression parameters.
+ >>> params = zstandard.ZstdCompressionParameters(...)
+ >>> d.precompute_compress(compression_params=params)
+
+ .. note::
+
+ When a dictionary is precomputed, the compression parameters used to
+ precompute the dictionary overwrite some of the compression parameters
+ specified to ``ZstdCompressor``.
+
+ :param data:
+ Dictionary data.
+ :param dict_type:
+ Type of dictionary. One of the ``DICT_TYPE_*`` constants.
+ """
+
+ def __init__(self, data, dict_type=DICT_TYPE_AUTO, k=0, d=0):
+ assert isinstance(data, bytes)
+ self._data = data
+ self.k = k
+ self.d = d
+
+ if dict_type not in (
+ DICT_TYPE_AUTO,
+ DICT_TYPE_RAWCONTENT,
+ DICT_TYPE_FULLDICT,
+ ):
+ raise ValueError(
+ "invalid dictionary load mode: %d; must use "
+ "DICT_TYPE_* constants"
+ )
+
+ self._dict_type = dict_type
+ self._cdict = None
+
+ def __len__(self):
+ return len(self._data)
+
+ def dict_id(self):
+ """Obtain the integer ID of the dictionary."""
+ return int(lib.ZDICT_getDictID(self._data, len(self._data)))
+
+ def as_bytes(self):
+ """Obtain the ``bytes`` representation of the dictionary."""
+ return self._data
+
+ def precompute_compress(self, level=0, compression_params=None):
+ """Precompute a dictionary os it can be used by multiple compressors.
+
+ Calling this method on an instance that will be used by multiple
+ :py:class:`ZstdCompressor` instances will improve performance.
+ """
+ if level and compression_params:
+ raise ValueError(
+ "must only specify one of level or " "compression_params"
+ )
+
+ if not level and not compression_params:
+ raise ValueError("must specify one of level or compression_params")
+
+ if level:
+ cparams = lib.ZSTD_getCParams(level, 0, len(self._data))
+ else:
+ cparams = ffi.new("ZSTD_compressionParameters")
+ cparams.chainLog = compression_params.chain_log
+ cparams.hashLog = compression_params.hash_log
+ cparams.minMatch = compression_params.min_match
+ cparams.searchLog = compression_params.search_log
+ cparams.strategy = compression_params.strategy
+ cparams.targetLength = compression_params.target_length
+ cparams.windowLog = compression_params.window_log
+
+ cdict = lib.ZSTD_createCDict_advanced(
+ self._data,
+ len(self._data),
+ lib.ZSTD_dlm_byRef,
+ self._dict_type,
+ cparams,
+ lib.ZSTD_defaultCMem,
+ )
+ if cdict == ffi.NULL:
+ raise ZstdError("unable to precompute dictionary")
+
+ self._cdict = ffi.gc(
+ cdict, lib.ZSTD_freeCDict, size=lib.ZSTD_sizeof_CDict(cdict)
+ )
+
+ @property
+ def _ddict(self):
+ ddict = lib.ZSTD_createDDict_advanced(
+ self._data,
+ len(self._data),
+ lib.ZSTD_dlm_byRef,
+ self._dict_type,
+ lib.ZSTD_defaultCMem,
+ )
+
+ if ddict == ffi.NULL:
+ raise ZstdError("could not create decompression dict")
+
+ ddict = ffi.gc(
+ ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict)
+ )
+ self.__dict__["_ddict"] = ddict
+
+ return ddict
+
+
+def train_dictionary(
+ dict_size,
+ samples,
+ k=0,
+ d=0,
+ f=0,
+ split_point=0.0,
+ accel=0,
+ notifications=0,
+ dict_id=0,
+ level=0,
+ steps=0,
+ threads=0,
+):
+ """Train a dictionary from sample data using the COVER algorithm.
+
+ A compression dictionary of size ``dict_size`` will be created from the
+ iterable of ``samples``. The raw dictionary bytes will be returned.
+
+ The dictionary training mechanism is known as *cover*. More details about it
+ are available in the paper *Effective Construction of Relative Lempel-Ziv
+ Dictionaries* (authors: Liao, Petri, Moffat, Wirth).
+
+ The cover algorithm takes parameters ``k`` and ``d``. These are the
+ *segment size* and *dmer size*, respectively. The returned dictionary
+ instance created by this function has ``k`` and ``d`` attributes
+ containing the values for these parameters. If a ``ZstdCompressionDict``
+ is constructed from raw bytes data (a content-only dictionary), the
+ ``k`` and ``d`` attributes will be ``0``.
+
+ The segment and dmer size parameters to the cover algorithm can either be
+ specified manually or ``train_dictionary()`` can try multiple values
+ and pick the best one, where *best* means the smallest compressed data size.
+ This later mode is called *optimization* mode.
+
+ Under the hood, this function always calls
+ ``ZDICT_optimizeTrainFromBuffer_fastCover()``. See the corresponding C library
+ documentation for more.
+
+ If neither ``steps`` nor ``threads`` is defined, defaults for ``d``, ``steps``,
+ and ``level`` will be used that are equivalent with what
+ ``ZDICT_trainFromBuffer()`` would use.
+
+
+ :param dict_size:
+ Target size in bytes of the dictionary to generate.
+ :param samples:
+ A list of bytes holding samples the dictionary will be trained from.
+ :param k:
+ Segment size : constraint: 0 < k : Reasonable range [16, 2048+]
+ :param d:
+ dmer size : constraint: 0 < d <= k : Reasonable range [6, 16]
+ :param f:
+ log of size of frequency array : constraint: 0 < f <= 31 : 1 means
+ default(20)
+ :param split_point:
+ Percentage of samples used for training: Only used for optimization.
+ The first # samples * ``split_point`` samples will be used to training.
+ The last # samples * (1 - split_point) samples will be used for testing.
+ 0 means default (0.75), 1.0 when all samples are used for both training
+ and testing.
+ :param accel:
+ Acceleration level: constraint: 0 < accel <= 10. Higher means faster
+ and less accurate, 0 means default(1).
+ :param dict_id:
+ Integer dictionary ID for the produced dictionary. Default is 0, which uses
+ a random value.
+ :param steps:
+ Number of steps through ``k`` values to perform when trying parameter
+ variations.
+ :param threads:
+ Number of threads to use when trying parameter variations. Default is 0,
+ which means to use a single thread. A negative value can be specified to
+ use as many threads as there are detected logical CPUs.
+ :param level:
+ Integer target compression level when trying parameter variations.
+ :param notifications:
+ Controls writing of informational messages to ``stderr``. ``0`` (the
+ default) means to write nothing. ``1`` writes errors. ``2`` writes
+ progression info. ``3`` writes more details. And ``4`` writes all info.
+ """
+
+ if not isinstance(samples, list):
+ raise TypeError("samples must be a list")
+
+ if threads < 0:
+ threads = _cpu_count()
+
+ if not steps and not threads:
+ d = d or 8
+ steps = steps or 4
+ level = level or 3
+
+ total_size = sum(map(len, samples))
+
+ samples_buffer = new_nonzero("char[]", total_size)
+ sample_sizes = new_nonzero("size_t[]", len(samples))
+
+ offset = 0
+ for i, sample in enumerate(samples):
+ if not isinstance(sample, bytes):
+ raise ValueError("samples must be bytes")
+
+ l = len(sample)
+ ffi.memmove(samples_buffer + offset, sample, l)
+ offset += l
+ sample_sizes[i] = l
+
+ dict_data = new_nonzero("char[]", dict_size)
+
+ dparams = ffi.new("ZDICT_fastCover_params_t *")[0]
+ dparams.k = k
+ dparams.d = d
+ dparams.f = f
+ dparams.steps = steps
+ dparams.nbThreads = threads
+ dparams.splitPoint = split_point
+ dparams.accel = accel
+ dparams.zParams.notificationLevel = notifications
+ dparams.zParams.dictID = dict_id
+ dparams.zParams.compressionLevel = level
+
+ zresult = lib.ZDICT_optimizeTrainFromBuffer_fastCover(
+ ffi.addressof(dict_data),
+ dict_size,
+ ffi.addressof(samples_buffer),
+ ffi.addressof(sample_sizes, 0),
+ len(samples),
+ ffi.addressof(dparams),
+ )
+
+ if lib.ZDICT_isError(zresult):
+ msg = ffi.string(lib.ZDICT_getErrorName(zresult)).decode("utf-8")
+ raise ZstdError("cannot train dict: %s" % msg)
+
+ return ZstdCompressionDict(
+ ffi.buffer(dict_data, zresult)[:],
+ dict_type=DICT_TYPE_FULLDICT,
+ k=dparams.k,
+ d=dparams.d,
+ )
+
+
+class ZstdDecompressionObj(object):
+ """A standard library API compatible decompressor.
+
+ This type implements a compressor that conforms to the API by other
+ decompressors in Python's standard library. e.g. ``zlib.decompressobj``
+ or ``bz2.BZ2Decompressor``. This allows callers to use zstd compression
+ while conforming to a similar API.
+
+ Compressed data chunks are fed into ``decompress(data)`` and
+ uncompressed output (or an empty bytes) is returned. Output from
+ subsequent calls needs to be concatenated to reassemble the full
+ decompressed byte sequence.
+
+ Each instance is single use: once an input frame is decoded,
+ ``decompress()`` can no longer be called.
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> dobj = dctx.decompressobj()
+ >>> data = dobj.decompress(compressed_chunk_0)
+ >>> data = dobj.decompress(compressed_chunk_1)
+
+ By default, calls to ``decompress()`` write output data in chunks of size
+ ``DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE``. These chunks are concatenated
+ before being returned to the caller. It is possible to define the size of
+ these temporary chunks by passing ``write_size`` to ``decompressobj()``:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> dobj = dctx.decompressobj(write_size=1048576)
+
+ .. note::
+
+ Because calls to ``decompress()`` may need to perform multiple
+ memory (re)allocations, this streaming decompression API isn't as
+ efficient as other APIs.
+ """
+
+ def __init__(self, decompressor, write_size):
+ self._decompressor = decompressor
+ self._write_size = write_size
+ self._finished = False
+ self._unused_input = b""
+
+ def decompress(self, data):
+ """Send compressed data to the decompressor and obtain decompressed data.
+
+ :param data:
+ Data to feed into the decompressor.
+ :return:
+ Decompressed bytes.
+ """
+ if self._finished:
+ raise ZstdError("cannot use a decompressobj multiple times")
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+
+ data_buffer = ffi.from_buffer(data)
+
+ if len(data_buffer) == 0:
+ return b""
+
+ in_buffer.src = data_buffer
+ in_buffer.size = len(data_buffer)
+ in_buffer.pos = 0
+
+ dst_buffer = ffi.new("char[]", self._write_size)
+ out_buffer.dst = dst_buffer
+ out_buffer.size = len(dst_buffer)
+ out_buffer.pos = 0
+
+ chunks = []
+
+ while True:
+ zresult = lib.ZSTD_decompressStream(
+ self._decompressor._dctx, out_buffer, in_buffer
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd decompressor error: %s" % _zstd_error(zresult)
+ )
+
+ # Always record any output from decompressor.
+ if out_buffer.pos:
+ chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+
+ # 0 is only seen when a frame is fully decoded *and* fully flushed.
+ # But there may be extra input data: make that available to
+ # `unused_input`.
+ if zresult == 0:
+ self._finished = True
+ self._decompressor = None
+ self._unused_input = data[in_buffer.pos : in_buffer.size]
+ break
+
+ # We're not at the end of the frame *or* we're not fully flushed.
+
+ # The decompressor will write out all the bytes it can to the output
+ # buffer. So if the output buffer is partially filled and the input
+ # is exhausted, there's nothing more to write. So we've done all we
+ # can.
+ elif (
+ in_buffer.pos == in_buffer.size
+ and out_buffer.pos < out_buffer.size
+ ):
+ break
+ else:
+ out_buffer.pos = 0
+
+ return b"".join(chunks)
+
+ def flush(self, length=0):
+ """Effectively a no-op.
+
+ Implemented for compatibility with the standard library APIs.
+
+ Safe to call at any time.
+
+ :return:
+ Empty bytes.
+ """
+ return b""
+
+ @property
+ def unused_data(self):
+ """Bytes past the end of compressed data.
+
+ If ``decompress()`` is fed additional data beyond the end of a zstd
+ frame, this value will be non-empty once ``decompress()`` fully decodes
+ the input frame.
+ """
+ return self._unused_input
+
+ @property
+ def unconsumed_tail(self):
+ """Data that has not yet been fed into the decompressor."""
+ return b""
+
+ @property
+ def eof(self):
+ """Whether the end of the compressed data stream has been reached."""
+ return self._finished
+
+
+class ZstdDecompressionReader(object):
+ """Read only decompressor that pull uncompressed data from another stream.
+
+ This type provides a read-only stream interface for performing transparent
+ decompression from another stream or data source. It conforms to the
+ ``io.RawIOBase`` interface. Only methods relevant to reading are
+ implemented.
+
+ >>> with open(path, 'rb') as fh:
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> reader = dctx.stream_reader(fh)
+ >>> while True:
+ ... chunk = reader.read(16384)
+ ... if not chunk:
+ ... break
+ ... # Do something with decompressed chunk.
+
+ The stream can also be used as a context manager:
+
+ >>> with open(path, 'rb') as fh:
+ ... dctx = zstandard.ZstdDecompressor()
+ ... with dctx.stream_reader(fh) as reader:
+ ... ...
+
+ When used as a context manager, the stream is closed and the underlying
+ resources are released when the context manager exits. Future operations
+ against the stream will fail.
+
+ The ``source`` argument to ``stream_reader()`` can be any object with a
+ ``read(size)`` method or any object implementing the *buffer protocol*.
+
+ If the ``source`` is a stream, you can specify how large ``read()`` requests
+ to that stream should be via the ``read_size`` argument. It defaults to
+ ``zstandard.DECOMPRESSION_RECOMMENDED_INPUT_SIZE``.:
+
+ >>> with open(path, 'rb') as fh:
+ ... dctx = zstandard.ZstdDecompressor()
+ ... # Will perform fh.read(8192) when obtaining data for the decompressor.
+ ... with dctx.stream_reader(fh, read_size=8192) as reader:
+ ... ...
+
+ Instances are *partially* seekable. Absolute and relative positions
+ (``SEEK_SET`` and ``SEEK_CUR``) forward of the current position are
+ allowed. Offsets behind the current read position and offsets relative
+ to the end of stream are not allowed and will raise ``ValueError``
+ if attempted.
+
+ ``tell()`` returns the number of decompressed bytes read so far.
+
+ Not all I/O methods are implemented. Notably missing is support for
+ ``readline()``, ``readlines()``, and linewise iteration support. This is
+ because streams operate on binary data - not text data. If you want to
+ convert decompressed output to text, you can chain an ``io.TextIOWrapper``
+ to the stream:
+
+ >>> with open(path, 'rb') as fh:
+ ... dctx = zstandard.ZstdDecompressor()
+ ... stream_reader = dctx.stream_reader(fh)
+ ... text_stream = io.TextIOWrapper(stream_reader, encoding='utf-8')
+ ... for line in text_stream:
+ ... ...
+ """
+
+ def __init__(
+ self,
+ decompressor,
+ source,
+ read_size,
+ read_across_frames,
+ closefd=True,
+ ):
+ self._decompressor = decompressor
+ self._source = source
+ self._read_size = read_size
+ self._read_across_frames = bool(read_across_frames)
+ self._closefd = bool(closefd)
+ self._entered = False
+ self._closed = False
+ self._bytes_decompressed = 0
+ self._finished_input = False
+ self._finished_output = False
+ self._in_buffer = ffi.new("ZSTD_inBuffer *")
+ # Holds a ref to self._in_buffer.src.
+ self._source_buffer = None
+
+ def __enter__(self):
+ if self._entered:
+ raise ValueError("cannot __enter__ multiple times")
+
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ self._entered = True
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self._entered = False
+ self._decompressor = None
+ self.close()
+ self._source = None
+
+ return False
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ return False
+
+ def seekable(self):
+ return False
+
+ def readline(self, size=-1):
+ raise io.UnsupportedOperation()
+
+ def readlines(self, hint=-1):
+ raise io.UnsupportedOperation()
+
+ def write(self, data):
+ raise io.UnsupportedOperation()
+
+ def writelines(self, lines):
+ raise io.UnsupportedOperation()
+
+ def isatty(self):
+ return False
+
+ def flush(self):
+ return None
+
+ def close(self):
+ if self._closed:
+ return None
+
+ self._closed = True
+
+ f = getattr(self._source, "close", None)
+ if self._closefd and f:
+ f()
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def tell(self):
+ return self._bytes_decompressed
+
+ def readall(self):
+ chunks = []
+
+ while True:
+ chunk = self.read(1048576)
+ if not chunk:
+ break
+
+ chunks.append(chunk)
+
+ return b"".join(chunks)
+
+ def __iter__(self):
+ raise io.UnsupportedOperation()
+
+ def __next__(self):
+ raise io.UnsupportedOperation()
+
+ next = __next__
+
+ def _read_input(self):
+ # We have data left over in the input buffer. Use it.
+ if self._in_buffer.pos < self._in_buffer.size:
+ return
+
+ # All input data exhausted. Nothing to do.
+ if self._finished_input:
+ return
+
+ # Else populate the input buffer from our source.
+ if hasattr(self._source, "read"):
+ data = self._source.read(self._read_size)
+
+ if not data:
+ self._finished_input = True
+ return
+
+ self._source_buffer = ffi.from_buffer(data)
+ self._in_buffer.src = self._source_buffer
+ self._in_buffer.size = len(self._source_buffer)
+ self._in_buffer.pos = 0
+ else:
+ self._source_buffer = ffi.from_buffer(self._source)
+ self._in_buffer.src = self._source_buffer
+ self._in_buffer.size = len(self._source_buffer)
+ self._in_buffer.pos = 0
+
+ def _decompress_into_buffer(self, out_buffer):
+ """Decompress available input into an output buffer.
+
+ Returns True if data in output buffer should be emitted.
+ """
+ zresult = lib.ZSTD_decompressStream(
+ self._decompressor._dctx, out_buffer, self._in_buffer
+ )
+
+ if self._in_buffer.pos == self._in_buffer.size:
+ self._in_buffer.src = ffi.NULL
+ self._in_buffer.pos = 0
+ self._in_buffer.size = 0
+ self._source_buffer = None
+
+ if not hasattr(self._source, "read"):
+ self._finished_input = True
+
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+
+ # Emit data if there is data AND either:
+ # a) output buffer is full (read amount is satisfied)
+ # b) we're at end of a frame and not in frame spanning mode
+ return out_buffer.pos and (
+ out_buffer.pos == out_buffer.size
+ or zresult == 0
+ and not self._read_across_frames
+ )
+
+ def read(self, size=-1):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if size < -1:
+ raise ValueError("cannot read negative amounts less than -1")
+
+ if size == -1:
+ # This is recursive. But it gets the job done.
+ return self.readall()
+
+ if self._finished_output or size == 0:
+ return b""
+
+ # We /could/ call into readinto() here. But that introduces more
+ # overhead.
+ dst_buffer = ffi.new("char[]", size)
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = dst_buffer
+ out_buffer.size = size
+ out_buffer.pos = 0
+
+ self._read_input()
+ if self._decompress_into_buffer(out_buffer):
+ self._bytes_decompressed += out_buffer.pos
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ while not self._finished_input:
+ self._read_input()
+ if self._decompress_into_buffer(out_buffer):
+ self._bytes_decompressed += out_buffer.pos
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ self._bytes_decompressed += out_buffer.pos
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ def readinto(self, b):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if self._finished_output:
+ return 0
+
+ # TODO use writable=True once we require CFFI >= 1.12.
+ dest_buffer = ffi.from_buffer(b)
+ ffi.memmove(b, b"", 0)
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = dest_buffer
+ out_buffer.size = len(dest_buffer)
+ out_buffer.pos = 0
+
+ self._read_input()
+ if self._decompress_into_buffer(out_buffer):
+ self._bytes_decompressed += out_buffer.pos
+ return out_buffer.pos
+
+ while not self._finished_input:
+ self._read_input()
+ if self._decompress_into_buffer(out_buffer):
+ self._bytes_decompressed += out_buffer.pos
+ return out_buffer.pos
+
+ self._bytes_decompressed += out_buffer.pos
+ return out_buffer.pos
+
+ def read1(self, size=-1):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if size < -1:
+ raise ValueError("cannot read negative amounts less than -1")
+
+ if self._finished_output or size == 0:
+ return b""
+
+ # -1 returns arbitrary number of bytes.
+ if size == -1:
+ size = DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
+
+ dst_buffer = ffi.new("char[]", size)
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = dst_buffer
+ out_buffer.size = size
+ out_buffer.pos = 0
+
+ # read1() dictates that we can perform at most 1 call to underlying
+ # stream to get input. However, we can't satisfy this restriction with
+ # decompression because not all input generates output. So we allow
+ # multiple read(). But unlike read(), we stop once we have any output.
+ while not self._finished_input:
+ self._read_input()
+ self._decompress_into_buffer(out_buffer)
+
+ if out_buffer.pos:
+ break
+
+ self._bytes_decompressed += out_buffer.pos
+ return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+ def readinto1(self, b):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if self._finished_output:
+ return 0
+
+ # TODO use writable=True once we require CFFI >= 1.12.
+ dest_buffer = ffi.from_buffer(b)
+ ffi.memmove(b, b"", 0)
+
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = dest_buffer
+ out_buffer.size = len(dest_buffer)
+ out_buffer.pos = 0
+
+ while not self._finished_input and not self._finished_output:
+ self._read_input()
+ self._decompress_into_buffer(out_buffer)
+
+ if out_buffer.pos:
+ break
+
+ self._bytes_decompressed += out_buffer.pos
+ return out_buffer.pos
+
+ def seek(self, pos, whence=os.SEEK_SET):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ read_amount = 0
+
+ if whence == os.SEEK_SET:
+ if pos < 0:
+ raise OSError("cannot seek to negative position with SEEK_SET")
+
+ if pos < self._bytes_decompressed:
+ raise OSError(
+ "cannot seek zstd decompression stream " "backwards"
+ )
+
+ read_amount = pos - self._bytes_decompressed
+
+ elif whence == os.SEEK_CUR:
+ if pos < 0:
+ raise OSError(
+ "cannot seek zstd decompression stream " "backwards"
+ )
+
+ read_amount = pos
+ elif whence == os.SEEK_END:
+ raise OSError(
+ "zstd decompression streams cannot be seeked " "with SEEK_END"
+ )
+
+ while read_amount:
+ result = self.read(
+ min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+ )
+
+ if not result:
+ break
+
+ read_amount -= len(result)
+
+ return self._bytes_decompressed
+
+
+class ZstdDecompressionWriter(object):
+ """
+ Write-only stream wrapper that performs decompression.
+
+ This type provides a writable stream that performs decompression and writes
+ decompressed data to another stream.
+
+ This type implements the ``io.RawIOBase`` interface. Only methods that
+ involve writing will do useful things.
+
+ Behavior is similar to :py:meth:`ZstdCompressor.stream_writer`: compressed
+ data is sent to the decompressor by calling ``write(data)`` and decompressed
+ output is written to the inner stream by calling its ``write(data)``
+ method:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> decompressor = dctx.stream_writer(fh)
+ >>> # Will call fh.write() with uncompressed data.
+ >>> decompressor.write(compressed_data)
+
+ Instances can be used as context managers. However, context managers add no
+ extra special behavior other than automatically calling ``close()`` when
+ they exit.
+
+ Calling ``close()`` will mark the stream as closed and subsequent I/O
+ operations will raise ``ValueError`` (per the documented behavior of
+ ``io.RawIOBase``). ``close()`` will also call ``close()`` on the
+ underlying stream if such a method exists and the instance was created with
+ ``closefd=True``.
+
+ The size of chunks to ``write()`` to the destination can be specified:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> with dctx.stream_writer(fh, write_size=16384) as decompressor:
+ >>> pass
+
+ You can see how much memory is being used by the decompressor:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> with dctx.stream_writer(fh) as decompressor:
+ >>> byte_size = decompressor.memory_size()
+
+ ``stream_writer()`` accepts a ``write_return_read`` boolean argument to control
+ the return value of ``write()``. When ``True`` (the default)``, ``write()``
+ returns the number of bytes that were read from the input. When ``False``,
+ ``write()`` returns the number of bytes that were ``write()`` to the inner
+ stream.
+ """
+
+ def __init__(
+ self,
+ decompressor,
+ writer,
+ write_size,
+ write_return_read,
+ closefd=True,
+ ):
+ decompressor._ensure_dctx()
+
+ self._decompressor = decompressor
+ self._writer = writer
+ self._write_size = write_size
+ self._write_return_read = bool(write_return_read)
+ self._closefd = bool(closefd)
+ self._entered = False
+ self._closing = False
+ self._closed = False
+
+ def __enter__(self):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ if self._entered:
+ raise ZstdError("cannot __enter__ multiple times")
+
+ self._entered = True
+
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self._entered = False
+ self.close()
+
+ return False
+
+ def __iter__(self):
+ raise io.UnsupportedOperation()
+
+ def __next__(self):
+ raise io.UnsupportedOperation()
+
+ def memory_size(self):
+ return lib.ZSTD_sizeof_DCtx(self._decompressor._dctx)
+
+ def close(self):
+ if self._closed:
+ return
+
+ try:
+ self._closing = True
+ self.flush()
+ finally:
+ self._closing = False
+ self._closed = True
+
+ f = getattr(self._writer, "close", None)
+ if self._closefd and f:
+ f()
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def fileno(self):
+ f = getattr(self._writer, "fileno", None)
+ if f:
+ return f()
+ else:
+ raise OSError("fileno not available on underlying writer")
+
+ def flush(self):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ f = getattr(self._writer, "flush", None)
+ if f and not self._closing:
+ return f()
+
+ def isatty(self):
+ return False
+
+ def readable(self):
+ return False
+
+ def readline(self, size=-1):
+ raise io.UnsupportedOperation()
+
+ def readlines(self, hint=-1):
+ raise io.UnsupportedOperation()
+
+ def seek(self, offset, whence=None):
+ raise io.UnsupportedOperation()
+
+ def seekable(self):
+ return False
+
+ def tell(self):
+ raise io.UnsupportedOperation()
+
+ def truncate(self, size=None):
+ raise io.UnsupportedOperation()
+
+ def writable(self):
+ return True
+
+ def writelines(self, lines):
+ raise io.UnsupportedOperation()
+
+ def read(self, size=-1):
+ raise io.UnsupportedOperation()
+
+ def readall(self):
+ raise io.UnsupportedOperation()
+
+ def readinto(self, b):
+ raise io.UnsupportedOperation()
+
+ def write(self, data):
+ if self._closed:
+ raise ValueError("stream is closed")
+
+ total_write = 0
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+
+ data_buffer = ffi.from_buffer(data)
+ in_buffer.src = data_buffer
+ in_buffer.size = len(data_buffer)
+ in_buffer.pos = 0
+
+ dst_buffer = ffi.new("char[]", self._write_size)
+ out_buffer.dst = dst_buffer
+ out_buffer.size = len(dst_buffer)
+ out_buffer.pos = 0
+
+ dctx = self._decompressor._dctx
+
+ while in_buffer.pos < in_buffer.size:
+ zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd decompress error: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ self._writer.write(
+ ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ )
+ total_write += out_buffer.pos
+ out_buffer.pos = 0
+
+ if self._write_return_read:
+ return in_buffer.pos
+ else:
+ return total_write
+
+
+class ZstdDecompressor(object):
+ """
+ Context for performing zstandard decompression.
+
+ Each instance is essentially a wrapper around a ``ZSTD_DCtx`` from zstd's
+ C API.
+
+ An instance can compress data various ways. Instances can be used multiple
+ times.
+
+ The interface of this class is very similar to
+ :py:class:`zstandard.ZstdCompressor` (by design).
+
+ Assume that each ``ZstdDecompressor`` instance can only handle a single
+ logical compression operation at the same time. i.e. if you call a method
+ like ``decompressobj()`` to obtain multiple objects derived from the same
+ ``ZstdDecompressor`` instance and attempt to use them simultaneously, errors
+ will likely occur.
+
+ If you need to perform multiple logical decompression operations and you
+ can't guarantee those operations are temporally non-overlapping, you need
+ to obtain multiple ``ZstdDecompressor`` instances.
+
+ Unless specified otherwise, assume that no two methods of
+ ``ZstdDecompressor`` instances can be called from multiple Python
+ threads simultaneously. In other words, assume instances are not thread safe
+ unless stated otherwise.
+
+ :param dict_data:
+ Compression dictionary to use.
+ :param max_window_size:
+ Sets an upper limit on the window size for decompression operations in
+ kibibytes. This setting can be used to prevent large memory allocations
+ for inputs using large compression windows.
+ :param format:
+ Set the format of data for the decoder.
+
+ By default this is ``zstandard.FORMAT_ZSTD1``. It can be set to
+ ``zstandard.FORMAT_ZSTD1_MAGICLESS`` to allow decoding frames without
+ the 4 byte magic header. Not all decompression APIs support this mode.
+ """
+
+ def __init__(self, dict_data=None, max_window_size=0, format=FORMAT_ZSTD1):
+ self._dict_data = dict_data
+ self._max_window_size = max_window_size
+ self._format = format
+
+ dctx = lib.ZSTD_createDCtx()
+ if dctx == ffi.NULL:
+ raise MemoryError()
+
+ self._dctx = dctx
+
+ # Defer setting up garbage collection until full state is loaded so
+ # the memory size is more accurate.
+ try:
+ self._ensure_dctx()
+ finally:
+ self._dctx = ffi.gc(
+ dctx, lib.ZSTD_freeDCtx, size=lib.ZSTD_sizeof_DCtx(dctx)
+ )
+
+ def memory_size(self):
+ """Size of decompression context, in bytes.
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> size = dctx.memory_size()
+ """
+ return lib.ZSTD_sizeof_DCtx(self._dctx)
+
+ def decompress(
+ self,
+ data,
+ max_output_size=0,
+ read_across_frames=False,
+ allow_extra_data=True,
+ ):
+ """
+ Decompress data in a single operation.
+
+ This method will decompress the input data in a single operation and
+ return the decompressed data.
+
+ The input bytes are expected to contain at least 1 full Zstandard frame
+ (something compressed with :py:meth:`ZstdCompressor.compress` or
+ similar). If the input does not contain a full frame, an exception will
+ be raised.
+
+ ``read_across_frames`` controls whether to read multiple zstandard
+ frames in the input. When False, decompression stops after reading the
+ first frame. This feature is not yet implemented but the argument is
+ provided for forward API compatibility when the default is changed to
+ True in a future release. For now, if you need to decompress multiple
+ frames, use an API like :py:meth:`ZstdCompressor.stream_reader` with
+ ``read_across_frames=True``.
+
+ ``allow_extra_data`` controls how to handle extra input data after a
+ fully decoded frame. If False, any extra data (which could be a valid
+ zstd frame) will result in ``ZstdError`` being raised. If True, extra
+ data is silently ignored. The default will likely change to False in a
+ future release when ``read_across_frames`` defaults to True.
+
+ If the input contains extra data after a full frame, that extra input
+ data is silently ignored. This behavior is undesirable in many scenarios
+ and will likely be changed or controllable in a future release (see
+ #181).
+
+ If the frame header of the compressed data does not contain the content
+ size, ``max_output_size`` must be specified or ``ZstdError`` will be
+ raised. An allocation of size ``max_output_size`` will be performed and an
+ attempt will be made to perform decompression into that buffer. If the
+ buffer is too small or cannot be allocated, ``ZstdError`` will be
+ raised. The buffer will be resized if it is too large.
+
+ Uncompressed data could be much larger than compressed data. As a result,
+ calling this function could result in a very large memory allocation
+ being performed to hold the uncompressed data. This could potentially
+ result in ``MemoryError`` or system memory swapping. If you don't need
+ the full output data in a single contiguous array in memory, consider
+ using streaming decompression for more resilient memory behavior.
+
+ Usage:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> decompressed = dctx.decompress(data)
+
+ If the compressed data doesn't have its content size embedded within it,
+ decompression can be attempted by specifying the ``max_output_size``
+ argument:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> uncompressed = dctx.decompress(data, max_output_size=1048576)
+
+ Ideally, ``max_output_size`` will be identical to the decompressed
+ output size.
+
+ .. important::
+
+ If the exact size of decompressed data is unknown (not passed in
+ explicitly and not stored in the zstd frame), for performance
+ reasons it is encouraged to use a streaming API.
+
+ :param data:
+ Compressed data to decompress.
+ :param max_output_size:
+ Integer max size of response.
+
+ If ``0``, there is no limit and we can attempt to allocate an output
+ buffer of infinite size.
+ :return:
+ ``bytes`` representing decompressed output.
+ """
+
+ if read_across_frames:
+ raise ZstdError(
+ "ZstdDecompressor.read_across_frames=True is not yet implemented"
+ )
+
+ self._ensure_dctx()
+
+ data_buffer = ffi.from_buffer(data)
+
+ output_size = lib.ZSTD_getFrameContentSize(
+ data_buffer, len(data_buffer)
+ )
+
+ if output_size == lib.ZSTD_CONTENTSIZE_ERROR:
+ raise ZstdError("error determining content size from frame header")
+ elif output_size == 0:
+ return b""
+ elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
+ if not max_output_size:
+ raise ZstdError(
+ "could not determine content size in frame header"
+ )
+
+ result_buffer = ffi.new("char[]", max_output_size)
+ result_size = max_output_size
+ output_size = 0
+ else:
+ result_buffer = ffi.new("char[]", output_size)
+ result_size = output_size
+
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = result_buffer
+ out_buffer.size = result_size
+ out_buffer.pos = 0
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ in_buffer.src = data_buffer
+ in_buffer.size = len(data_buffer)
+ in_buffer.pos = 0
+
+ zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError("decompression error: %s" % _zstd_error(zresult))
+ elif zresult:
+ raise ZstdError(
+ "decompression error: did not decompress full frame"
+ )
+ elif output_size and out_buffer.pos != output_size:
+ raise ZstdError(
+ "decompression error: decompressed %d bytes; expected %d"
+ % (zresult, output_size)
+ )
+ elif not allow_extra_data and in_buffer.pos < in_buffer.size:
+ count = in_buffer.size - in_buffer.pos
+
+ raise ZstdError(
+ "compressed input contains %d bytes of unused data, which is disallowed"
+ % count
+ )
+
+ return ffi.buffer(result_buffer, out_buffer.pos)[:]
+
+ def stream_reader(
+ self,
+ source,
+ read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+ read_across_frames=False,
+ closefd=True,
+ ):
+ """
+ Read-only stream wrapper that performs decompression.
+
+ This method obtains an object that conforms to the ``io.RawIOBase``
+ interface and performs transparent decompression via ``read()``
+ operations. Source data is obtained by calling ``read()`` on a
+ source stream or object implementing the buffer protocol.
+
+ See :py:class:`zstandard.ZstdDecompressionReader` for more documentation
+ and usage examples.
+
+ :param source:
+ Source of compressed data to decompress. Can be any object
+ with a ``read(size)`` method or that conforms to the buffer protocol.
+ :param read_size:
+ Integer number of bytes to read from the source and feed into the
+ compressor at a time.
+ :param read_across_frames:
+ Whether to read data across multiple zstd frames. If False,
+ decompression is stopped at frame boundaries.
+ :param closefd:
+ Whether to close the source stream when this instance is closed.
+ :return:
+ :py:class:`zstandard.ZstdDecompressionReader`.
+ """
+ self._ensure_dctx()
+ return ZstdDecompressionReader(
+ self, source, read_size, read_across_frames, closefd=closefd
+ )
+
+ def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+ """Obtain a standard library compatible incremental decompressor.
+
+ See :py:class:`ZstdDecompressionObj` for more documentation
+ and usage examples.
+
+ :param write_size:
+ :return:
+ :py:class:`zstandard.ZstdDecompressionObj`
+ """
+ if write_size < 1:
+ raise ValueError("write_size must be positive")
+
+ self._ensure_dctx()
+ return ZstdDecompressionObj(self, write_size=write_size)
+
+ def read_to_iter(
+ self,
+ reader,
+ read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+ write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+ skip_bytes=0,
+ ):
+ """Read compressed data to an iterator of uncompressed chunks.
+
+ This method will read data from ``reader``, feed it to a decompressor,
+ and emit ``bytes`` chunks representing the decompressed result.
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> for chunk in dctx.read_to_iter(fh):
+ ... # Do something with original data.
+
+ ``read_to_iter()`` accepts an object with a ``read(size)`` method that
+ will return compressed bytes or an object conforming to the buffer
+ protocol.
+
+ ``read_to_iter()`` returns an iterator whose elements are chunks of the
+ decompressed data.
+
+ The size of requested ``read()`` from the source can be specified:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> for chunk in dctx.read_to_iter(fh, read_size=16384):
+ ... pass
+
+ It is also possible to skip leading bytes in the input data:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> for chunk in dctx.read_to_iter(fh, skip_bytes=1):
+ ... pass
+
+ .. tip::
+
+ Skipping leading bytes is useful if the source data contains extra
+ *header* data. Traditionally, you would need to create a slice or
+ ``memoryview`` of the data you want to decompress. This would create
+ overhead. It is more efficient to pass the offset into this API.
+
+ Similarly to :py:meth:`ZstdCompressor.read_to_iter`, the consumer of the
+ iterator controls when data is decompressed. If the iterator isn't consumed,
+ decompression is put on hold.
+
+ When ``read_to_iter()`` is passed an object conforming to the buffer protocol,
+ the behavior may seem similar to what occurs when the simple decompression
+ API is used. However, this API works when the decompressed size is unknown.
+ Furthermore, if feeding large inputs, the decompressor will work in chunks
+ instead of performing a single operation.
+
+ :param reader:
+ Source of compressed data. Can be any object with a
+ ``read(size)`` method or any object conforming to the buffer
+ protocol.
+ :param read_size:
+ Integer size of data chunks to read from ``reader`` and feed into
+ the decompressor.
+ :param write_size:
+ Integer size of data chunks to emit from iterator.
+ :param skip_bytes:
+ Integer number of bytes to skip over before sending data into
+ the decompressor.
+ :return:
+ Iterator of ``bytes`` representing uncompressed data.
+ """
+
+ if skip_bytes >= read_size:
+ raise ValueError("skip_bytes must be smaller than read_size")
+
+ if hasattr(reader, "read"):
+ have_read = True
+ elif hasattr(reader, "__getitem__"):
+ have_read = False
+ buffer_offset = 0
+ size = len(reader)
+ else:
+ raise ValueError(
+ "must pass an object with a read() method or "
+ "conforms to buffer protocol"
+ )
+
+ if skip_bytes:
+ if have_read:
+ reader.read(skip_bytes)
+ else:
+ if skip_bytes > size:
+ raise ValueError("skip_bytes larger than first input chunk")
+
+ buffer_offset = skip_bytes
+
+ self._ensure_dctx()
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+
+ dst_buffer = ffi.new("char[]", write_size)
+ out_buffer.dst = dst_buffer
+ out_buffer.size = len(dst_buffer)
+ out_buffer.pos = 0
+
+ while True:
+ assert out_buffer.pos == 0
+
+ if have_read:
+ read_result = reader.read(read_size)
+ else:
+ remaining = size - buffer_offset
+ slice_size = min(remaining, read_size)
+ read_result = reader[buffer_offset : buffer_offset + slice_size]
+ buffer_offset += slice_size
+
+ # No new input. Break out of read loop.
+ if not read_result:
+ break
+
+ # Feed all read data into decompressor and emit output until
+ # exhausted.
+ read_buffer = ffi.from_buffer(read_result)
+ in_buffer.src = read_buffer
+ in_buffer.size = len(read_buffer)
+ in_buffer.pos = 0
+
+ while in_buffer.pos < in_buffer.size:
+ assert out_buffer.pos == 0
+
+ zresult = lib.ZSTD_decompressStream(
+ self._dctx, out_buffer, in_buffer
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd decompress error: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ out_buffer.pos = 0
+ yield data
+
+ if zresult == 0:
+ return
+
+ # Repeat loop to collect more input data.
+ continue
+
+ # If we get here, input is exhausted.
+
+ def stream_writer(
+ self,
+ writer,
+ write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+ write_return_read=True,
+ closefd=True,
+ ):
+ """
+ Push-based stream wrapper that performs decompression.
+
+ This method constructs a stream wrapper that conforms to the
+ ``io.RawIOBase`` interface and performs transparent decompression
+ when writing to a wrapper stream.
+
+ See :py:class:`zstandard.ZstdDecompressionWriter` for more documentation
+ and usage examples.
+
+ :param writer:
+ Destination for decompressed output. Can be any object with a
+ ``write(data)``.
+ :param write_size:
+ Integer size of chunks to ``write()`` to ``writer``.
+ :param write_return_read:
+ Whether ``write()`` should return the number of bytes of input
+ consumed. If False, ``write()`` returns the number of bytes sent
+ to the inner stream.
+ :param closefd:
+ Whether to ``close()`` the inner stream when this stream is closed.
+ :return:
+ :py:class:`zstandard.ZstdDecompressionWriter`
+ """
+ if not hasattr(writer, "write"):
+ raise ValueError("must pass an object with a write() method")
+
+ return ZstdDecompressionWriter(
+ self,
+ writer,
+ write_size,
+ write_return_read,
+ closefd=closefd,
+ )
+
+ def copy_stream(
+ self,
+ ifh,
+ ofh,
+ read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+ write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+ ):
+ """
+ Copy data between streams, decompressing in the process.
+
+ Compressed data will be read from ``ifh``, decompressed, and written
+ to ``ofh``.
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> dctx.copy_stream(ifh, ofh)
+
+ e.g. to decompress a file to another file:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> with open(input_path, 'rb') as ifh, open(output_path, 'wb') as ofh:
+ ... dctx.copy_stream(ifh, ofh)
+
+ The size of chunks being ``read()`` and ``write()`` from and to the
+ streams can be specified:
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> dctx.copy_stream(ifh, ofh, read_size=8192, write_size=16384)
+
+ :param ifh:
+ Source stream to read compressed data from.
+
+ Must have a ``read()`` method.
+ :param ofh:
+ Destination stream to write uncompressed data to.
+
+ Must have a ``write()`` method.
+ :param read_size:
+ The number of bytes to ``read()`` from the source in a single
+ operation.
+ :param write_size:
+ The number of bytes to ``write()`` to the destination in a single
+ operation.
+ :return:
+ 2-tuple of integers representing the number of bytes read and
+ written, respectively.
+ """
+
+ if not hasattr(ifh, "read"):
+ raise ValueError("first argument must have a read() method")
+ if not hasattr(ofh, "write"):
+ raise ValueError("second argument must have a write() method")
+
+ self._ensure_dctx()
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+
+ dst_buffer = ffi.new("char[]", write_size)
+ out_buffer.dst = dst_buffer
+ out_buffer.size = write_size
+ out_buffer.pos = 0
+
+ total_read, total_write = 0, 0
+
+ # Read all available input.
+ while True:
+ data = ifh.read(read_size)
+ if not data:
+ break
+
+ data_buffer = ffi.from_buffer(data)
+ total_read += len(data_buffer)
+ in_buffer.src = data_buffer
+ in_buffer.size = len(data_buffer)
+ in_buffer.pos = 0
+
+ # Flush all read data to output.
+ while in_buffer.pos < in_buffer.size:
+ zresult = lib.ZSTD_decompressStream(
+ self._dctx, out_buffer, in_buffer
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "zstd decompressor error: %s" % _zstd_error(zresult)
+ )
+
+ if out_buffer.pos:
+ ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+ total_write += out_buffer.pos
+ out_buffer.pos = 0
+
+ # Continue loop to keep reading.
+
+ return total_read, total_write
+
+ def decompress_content_dict_chain(self, frames):
+ """
+ Decompress a series of frames using the content dictionary chaining technique.
+
+ Such a list of frames is produced by compressing discrete inputs where
+ each non-initial input is compressed with a *prefix* dictionary consisting
+ of the content of the previous input.
+
+ For example, say you have the following inputs:
+
+ >>> inputs = [b"input 1", b"input 2", b"input 3"]
+
+ The zstd frame chain consists of:
+
+ 1. ``b"input 1"`` compressed in standalone/discrete mode
+ 2. ``b"input 2"`` compressed using ``b"input 1"`` as a *prefix* dictionary
+ 3. ``b"input 3"`` compressed using ``b"input 2"`` as a *prefix* dictionary
+
+ Each zstd frame **must** have the content size written.
+
+ The following Python code can be used to produce a *prefix dictionary chain*:
+
+ >>> def make_chain(inputs):
+ ... frames = []
+ ...
+ ... # First frame is compressed in standalone/discrete mode.
+ ... zctx = zstandard.ZstdCompressor()
+ ... frames.append(zctx.compress(inputs[0]))
+ ...
+ ... # Subsequent frames use the previous fulltext as a prefix dictionary
+ ... for i, raw in enumerate(inputs[1:]):
+ ... dict_data = zstandard.ZstdCompressionDict(
+ ... inputs[i], dict_type=zstandard.DICT_TYPE_RAWCONTENT)
+ ... zctx = zstandard.ZstdCompressor(dict_data=dict_data)
+ ... frames.append(zctx.compress(raw))
+ ...
+ ... return frames
+
+ ``decompress_content_dict_chain()`` returns the uncompressed data of the last
+ element in the input chain.
+
+ .. note::
+
+ It is possible to implement *prefix dictionary chain* decompression
+ on top of other APIs. However, this function will likely be faster -
+ especially for long input chains - as it avoids the overhead of
+ instantiating and passing around intermediate objects between
+ multiple functions.
+
+ :param frames:
+ List of ``bytes`` holding compressed zstd frames.
+ :return:
+ """
+ if not isinstance(frames, list):
+ raise TypeError("argument must be a list")
+
+ if not frames:
+ raise ValueError("empty input chain")
+
+ # First chunk should not be using a dictionary. We handle it specially.
+ chunk = frames[0]
+ if not isinstance(chunk, bytes):
+ raise ValueError("chunk 0 must be bytes")
+
+ # All chunks should be zstd frames and should have content size set.
+ chunk_buffer = ffi.from_buffer(chunk)
+ params = ffi.new("ZSTD_frameHeader *")
+ zresult = lib.ZSTD_getFrameHeader(
+ params, chunk_buffer, len(chunk_buffer)
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ValueError("chunk 0 is not a valid zstd frame")
+ elif zresult:
+ raise ValueError("chunk 0 is too small to contain a zstd frame")
+
+ if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
+ raise ValueError("chunk 0 missing content size in frame")
+
+ self._ensure_dctx(load_dict=False)
+
+ last_buffer = ffi.new("char[]", params.frameContentSize)
+
+ out_buffer = ffi.new("ZSTD_outBuffer *")
+ out_buffer.dst = last_buffer
+ out_buffer.size = len(last_buffer)
+ out_buffer.pos = 0
+
+ in_buffer = ffi.new("ZSTD_inBuffer *")
+ in_buffer.src = chunk_buffer
+ in_buffer.size = len(chunk_buffer)
+ in_buffer.pos = 0
+
+ zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "could not decompress chunk 0: %s" % _zstd_error(zresult)
+ )
+ elif zresult:
+ raise ZstdError("chunk 0 did not decompress full frame")
+
+ # Special case of chain length of 1
+ if len(frames) == 1:
+ return ffi.buffer(last_buffer, len(last_buffer))[:]
+
+ i = 1
+ while i < len(frames):
+ chunk = frames[i]
+ if not isinstance(chunk, bytes):
+ raise ValueError("chunk %d must be bytes" % i)
+
+ chunk_buffer = ffi.from_buffer(chunk)
+ zresult = lib.ZSTD_getFrameHeader(
+ params, chunk_buffer, len(chunk_buffer)
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ValueError("chunk %d is not a valid zstd frame" % i)
+ elif zresult:
+ raise ValueError(
+ "chunk %d is too small to contain a zstd frame" % i
+ )
+
+ if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
+ raise ValueError("chunk %d missing content size in frame" % i)
+
+ dest_buffer = ffi.new("char[]", params.frameContentSize)
+
+ out_buffer.dst = dest_buffer
+ out_buffer.size = len(dest_buffer)
+ out_buffer.pos = 0
+
+ in_buffer.src = chunk_buffer
+ in_buffer.size = len(chunk_buffer)
+ in_buffer.pos = 0
+
+ zresult = lib.ZSTD_decompressStream(
+ self._dctx, out_buffer, in_buffer
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "could not decompress chunk %d: %s" % _zstd_error(zresult)
+ )
+ elif zresult:
+ raise ZstdError("chunk %d did not decompress full frame" % i)
+
+ last_buffer = dest_buffer
+ i += 1
+
+ return ffi.buffer(last_buffer, len(last_buffer))[:]
+
+ def multi_decompress_to_buffer(
+ self, frames, decompressed_sizes=None, threads=0
+ ):
+ """
+ Decompress multiple zstd frames to output buffers as a single operation.
+
+ (Experimental. Not available in CFFI backend.)
+
+ Compressed frames can be passed to the function as a
+ ``BufferWithSegments``, a ``BufferWithSegmentsCollection``, or as a
+ list containing objects that conform to the buffer protocol. For best
+ performance, pass a ``BufferWithSegmentsCollection`` or a
+ ``BufferWithSegments``, as minimal input validation will be done for
+ that type. If calling from Python (as opposed to C), constructing one
+ of these instances may add overhead cancelling out the performance
+ overhead of validation for list inputs.
+
+ Returns a ``BufferWithSegmentsCollection`` containing the decompressed
+ data. All decompressed data is allocated in a single memory buffer. The
+ ``BufferWithSegments`` instance tracks which objects are at which offsets
+ and their respective lengths.
+
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> results = dctx.multi_decompress_to_buffer([b'...', b'...'])
+
+ The decompressed size of each frame MUST be discoverable. It can either be
+ embedded within the zstd frame or passed in via the ``decompressed_sizes``
+ argument.
+
+ The ``decompressed_sizes`` argument is an object conforming to the buffer
+ protocol which holds an array of 64-bit unsigned integers in the machine's
+ native format defining the decompressed sizes of each frame. If this argument
+ is passed, it avoids having to scan each frame for its decompressed size.
+ This frame scanning can add noticeable overhead in some scenarios.
+
+ >>> frames = [...]
+ >>> sizes = struct.pack('=QQQQ', len0, len1, len2, len3)
+ >>>
+ >>> dctx = zstandard.ZstdDecompressor()
+ >>> results = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes)
+
+ .. note::
+
+ It is possible to pass a ``mmap.mmap()`` instance into this function by
+ wrapping it with a ``BufferWithSegments`` instance (which will define the
+ offsets of frames within the memory mapped region).
+
+ This function is logically equivalent to performing
+ :py:meth:`ZstdCompressor.decompress` on each input frame and returning the
+ result.
+
+ This function exists to perform decompression on multiple frames as fast
+ as possible by having as little overhead as possible. Since decompression is
+ performed as a single operation and since the decompressed output is stored in
+ a single buffer, extra memory allocations, Python objects, and Python function
+ calls are avoided. This is ideal for scenarios where callers know up front that
+ they need to access data for multiple frames, such as when *delta chains* are
+ being used.
+
+ Currently, the implementation always spawns multiple threads when requested,
+ even if the amount of work to do is small. In the future, it will be smarter
+ about avoiding threads and their associated overhead when the amount of
+ work to do is small.
+
+ :param frames:
+ Source defining zstd frames to decompress.
+ :param decompressed_sizes:
+ Array of integers representing sizes of decompressed zstd frames.
+ :param threads:
+ How many threads to use for decompression operations.
+
+ Negative values will use the same number of threads as logical CPUs
+ on the machine. Values ``0`` or ``1`` use a single thread.
+ :return:
+ ``BufferWithSegmentsCollection``
+ """
+ raise NotImplementedError()
+
+ def _ensure_dctx(self, load_dict=True):
+ lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only)
+
+ if self._max_window_size:
+ zresult = lib.ZSTD_DCtx_setMaxWindowSize(
+ self._dctx, self._max_window_size
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "unable to set max window size: %s" % _zstd_error(zresult)
+ )
+
+ zresult = lib.ZSTD_DCtx_setParameter(
+ self._dctx, lib.ZSTD_d_format, self._format
+ )
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "unable to set decoding format: %s" % _zstd_error(zresult)
+ )
+
+ if self._dict_data and load_dict:
+ zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict)
+ if lib.ZSTD_isError(zresult):
+ raise ZstdError(
+ "unable to reference prepared dictionary: %s"
+ % _zstd_error(zresult)
+ )
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/py.typed b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/pkgs/pip-21.1.3-py39h06a4308_0.conda b/my_container_sandbox/workspace/anaconda3/pkgs/pip-21.1.3-py39h06a4308_0.conda
new file mode 100644
index 0000000000000000000000000000000000000000..af494618bc51dc5a07ea786826863cc87dd29738
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/pkgs/pip-21.1.3-py39h06a4308_0.conda
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74aa86915c1e34a0fbab5f6c2ca8d4a78a6167304e23f78073c5453107095211
+size 1861423
diff --git a/my_container_sandbox/workspace/anaconda3/pkgs/ruamel_yaml-0.15.80-py38h497a2fe_1006.tar.bz2 b/my_container_sandbox/workspace/anaconda3/pkgs/ruamel_yaml-0.15.80-py38h497a2fe_1006.tar.bz2
new file mode 100644
index 0000000000000000000000000000000000000000..70891616ef8ef5c93d61a330e1eb6c462e2ad5d1
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/pkgs/ruamel_yaml-0.15.80-py38h497a2fe_1006.tar.bz2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53dbd46b27e914380b7e426283b7f932aba2ed8097303e9981560370e59f5551
+size 276942
diff --git a/tmp_inputs_32_5/case00007.nii.gz b/tmp_inputs_32_5/case00007.nii.gz
new file mode 100644
index 0000000000000000000000000000000000000000..46a46b4acccdf029d0601c38c05a56c753fccc9f
--- /dev/null
+++ b/tmp_inputs_32_5/case00007.nii.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bfb57ca91cd75e7a7241083bae87afa12db6379cc4ca25f16820b300bc072719
+size 30317508