diff --git a/.gitattributes b/.gitattributes
index bcb7a73348a180539f5859397af7ad6568052bbe..35dcbbf76048b53907a0e61358ece33a29bae1bf 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -62,3 +62,7 @@ parrot/bin/bunzip2 filter=lfs diff=lfs merge=lfs -text
parrot/bin/bzcat filter=lfs diff=lfs merge=lfs -text
parrot/bin/lzcat filter=lfs diff=lfs merge=lfs -text
parrot/lib/libz.a filter=lfs diff=lfs merge=lfs -text
+parrot/lib/libncurses++w.a filter=lfs diff=lfs merge=lfs -text
+parrot/bin/unlzma filter=lfs diff=lfs merge=lfs -text
+parrot/lib/libz.so.1 filter=lfs diff=lfs merge=lfs -text
+parrot/lib/libatomic.so filter=lfs diff=lfs merge=lfs -text
diff --git a/parrot/bin/unlzma b/parrot/bin/unlzma
new file mode 100644
index 0000000000000000000000000000000000000000..5903b5b116e3ed0a206e08d2c4419013bb4fd3c2
--- /dev/null
+++ b/parrot/bin/unlzma
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e7a3b4440bfa8684bcc3d6019326803cb53637c2cafecad558f04327e593fa40
+size 108336
diff --git a/parrot/lib/libatomic.so b/parrot/lib/libatomic.so
new file mode 100644
index 0000000000000000000000000000000000000000..0ac3eb4db4afc304563eb9424ebd8f5d8e2a38e0
--- /dev/null
+++ b/parrot/lib/libatomic.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f1a92c18f01c13c9a89908fb86a7309ae5b89a882db9914114957bc4b6fed92
+size 143648
diff --git a/parrot/lib/libncurses++w.a b/parrot/lib/libncurses++w.a
new file mode 100644
index 0000000000000000000000000000000000000000..592b1b981d3fb155dffb6c4dcc9335849efc088c
--- /dev/null
+++ b/parrot/lib/libncurses++w.a
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93b48c40f5d7b07e1a8c4bd9419df55c28e250cca1166be4aafd2fc7caf18823
+size 187604
diff --git a/parrot/lib/libz.so.1 b/parrot/lib/libz.so.1
new file mode 100644
index 0000000000000000000000000000000000000000..64cd6a309bad00dc38040a577191f6681e18bf89
--- /dev/null
+++ b/parrot/lib/libz.so.1
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b0e682a9dc7fd4895a6783288f851b793dc89633f28714027974fa4d66f3914
+size 124744
diff --git a/parrot/lib/python3.10/encodings/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d23ed9968b2a853c7dba3736aaff5dbe330e866
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/ascii.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/ascii.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd1904a69d676583819c2c2355919bcc60918239
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/ascii.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/charmap.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/charmap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b2f8d1291d5970a0c1f3d6bc0fef10168d16519
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/charmap.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp1006.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp1006.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c6f7a02fa4f0b35c613a1ac4e05e932c17ba524
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp1006.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp1250.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp1250.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ee5cde6a2310336e852d2617902962b615b31f2d
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp1250.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp1251.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp1251.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a75a6a2e2bb6e921533ea665efe2a91105bfcea
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp1251.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp1254.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp1254.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d5e10d88347f3374b6e64f77a08fe34dbc87e34
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp1254.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp424.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp424.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62ac44282f34573f6957f8b1058f88d825d50ec8
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp424.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp437.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp437.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5cf8eecf3e80ad24860027aa9048bd3169e07cc
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp437.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp737.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp737.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0985cef925836f6e22b6ed53e38c06e7ce887591
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp737.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp850.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp850.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bb9f99baee3171d20b15837b897c149f1b93dc8
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp850.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp855.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp855.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a233a104a6e87b23eeba8d4b89d62fd5809c3ca7
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp855.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp856.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp856.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6be7d623abf1035a7e1de99d21468f42af2d2685
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp856.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp858.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp858.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85667020ce82f4ccf5211f0f008b156e7d906152
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp858.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp860.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp860.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82c13ab680f3be945bf5148c1b2ddafa383122bf
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp860.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp864.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp864.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7276ac594eb7cc83189acf53d2c401241b89bf62
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp864.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp865.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp865.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04519521d8a68b808f7cb08014dd647022a1c8ee
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp865.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp866.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp866.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4bb27629379a6c0b4dfd3cd2f579108d27727fdc
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp866.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp869.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp869.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..233d8f2a7951857da36bdcf030fc3e599ef9f27e
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp869.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp874.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp874.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d5a155f170d4174ac410cee3a37309fc2f78440
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp874.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp875.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp875.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7fb5854b56860b83f56c142781c47b83ab53722
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp875.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp932.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp932.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..241b667f0d8e44b97ff363416920396976b8d295
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp932.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/cp949.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/cp949.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74a674d4c27f8c50b64e56fb4f07fc2a003408e4
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/cp949.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/euc_jp.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/euc_jp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..907210cad4aa00ac1485f330227fc5886acb3bf9
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/euc_jp.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/euc_kr.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/euc_kr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8571a1d123144c44b1983385aff177e95105ff3b
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/euc_kr.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/gb18030.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/gb18030.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70a423b8d2163ddad41dd742617d44ec2106ea52
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/gb18030.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/gb2312.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/gb2312.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..30ea83cab85f2183d64cdd37d703e185fb367836
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/gb2312.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/gbk.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/gbk.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d7e1f6fbb5f3c958b40b7be05b22cc6bc5fb3e9d
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/gbk.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/idna.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/idna.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..566175549e2226ccc080812f3a0c800095708a93
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/idna.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4ecde78cae356c32eb8f3e20dfcdbce54dbfa6e
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_2004.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_2004.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b6a0b4d81f9c08356ea7895aaadf87c8e54d410b
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_2004.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_3.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc2fa11930f3e99cc975298f310a28c94a595701
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_3.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_ext.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_ext.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f8cccebb46280b29e209b93f159617b8e09ec1ab
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso2022_jp_ext.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso2022_kr.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso2022_kr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..87f7389446985a75e39ba516f65216020883ec80
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso2022_kr.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso8859_10.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso8859_10.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ef04631d8f7058971dc9d91b6c30c1a193a01b5
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso8859_10.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso8859_13.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso8859_13.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84a97a484d35546d2000f2fc4accf21a8522d031
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso8859_13.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso8859_15.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso8859_15.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3df27d409a3963def673194d49e392a68572b2e4
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso8859_15.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso8859_16.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso8859_16.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..978675b34589e0668a509e3113b3e6283bce74f2
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso8859_16.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso8859_2.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso8859_2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08abfec9c0d68c55d6948ff0ddb3345ba9fff3b8
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso8859_2.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso8859_3.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso8859_3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33c107ef772b94b05a04206776400ab86f8ff6d3
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso8859_3.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/iso8859_5.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/iso8859_5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e8fc7faab60e63bf496b190c0ebbfd2adf764df
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/iso8859_5.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/koi8_t.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/koi8_t.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be9e465f71b01431585043135bf6402f049a22ee
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/koi8_t.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/latin_1.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/latin_1.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c87bdf39a682f36e63f8eb3a020276cfc7496ea
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/latin_1.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/mac_croatian.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/mac_croatian.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33777d24bb308c042f6ad039699d73e7aa291eb0
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/mac_croatian.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/mac_cyrillic.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/mac_cyrillic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c165927333272aefd58e80bcf0a4e640fda3dcb
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/mac_cyrillic.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/mac_farsi.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/mac_farsi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..043697ed760705c76519fbcd55260bc02bf34ef0
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/mac_farsi.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/mac_iceland.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/mac_iceland.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e1f18483fe8496d04b56ddc19a70a20f98a9a50
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/mac_iceland.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/mac_latin2.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/mac_latin2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1273afd379062e4aaf79a95d670fac0f762a771
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/mac_latin2.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/mac_turkish.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/mac_turkish.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c3a67e0a07b51812f9c92f32a823892fffa390ad
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/mac_turkish.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/mbcs.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/mbcs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4bf23fc1d09e337cb9a4a3f6512c74d3bc96824
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/mbcs.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/quopri_codec.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/quopri_codec.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79af8152982c92923ff33e513a31485b145ec417
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/quopri_codec.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/raw_unicode_escape.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/raw_unicode_escape.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cc04c6ad50bfa52ee095d1e1feb91da5ac253ce
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/raw_unicode_escape.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/tis_620.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/tis_620.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c90d036dad2b73a098e10b29bacfb00edcd4e818
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/tis_620.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/utf_16.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/utf_16.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d383d61bb56e7d27815a42c22c0c4ff71628d29b
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/utf_16.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/utf_16_be.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/utf_16_be.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53480b054e227e9b9b89429e3f7f8e189690214c
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/utf_16_be.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/utf_32_be.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/utf_32_be.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d468fd761fdee25616938b6ba55a0fe75cdbca5d
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/utf_32_be.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/utf_7.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/utf_7.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cbd14e45ff2aa069f5b2a0bc3fe61811b730c4f
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/utf_7.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/utf_8.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/utf_8.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d22947cf3294539e564f7da95ec72b52584ffbe
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/utf_8.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/utf_8_sig.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/utf_8_sig.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f63a91f3adec0c13f2b41bfc6fe2623660732ec
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/utf_8_sig.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/encodings/__pycache__/uu_codec.cpython-310.pyc b/parrot/lib/python3.10/encodings/__pycache__/uu_codec.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2508efc4c10416f99b7e1aeebf17b33984bbf770
Binary files /dev/null and b/parrot/lib/python3.10/encodings/__pycache__/uu_codec.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/lib2to3/Grammar3.10.16.final.0.pickle b/parrot/lib/python3.10/lib2to3/Grammar3.10.16.final.0.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..83bf5040259fd33c076f2a97b05a5e88cd70efe7
--- /dev/null
+++ b/parrot/lib/python3.10/lib2to3/Grammar3.10.16.final.0.pickle
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97c8ed74d091fcfd23498029bb819c29d096c3dcb1326edee5dfb0591ade2e4b
+size 15313
diff --git a/parrot/lib/python3.10/xml/__init__.py b/parrot/lib/python3.10/xml/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf6d8ddfd04c934003004472d7f03bfa3f06b5f5
--- /dev/null
+++ b/parrot/lib/python3.10/xml/__init__.py
@@ -0,0 +1,20 @@
+"""Core XML support for Python.
+
+This package contains four sub-packages:
+
+dom -- The W3C Document Object Model. This supports DOM Level 1 +
+ Namespaces.
+
+parsers -- Python wrappers for XML parsers (currently only supports Expat).
+
+sax -- The Simple API for XML, developed by XML-Dev, led by David
+ Megginson and ported to Python by Lars Marius Garshol. This
+ supports the SAX 2 API.
+
+etree -- The ElementTree XML library. This is a subset of the full
+ ElementTree XML release.
+
+"""
+
+
+__all__ = ["dom", "parsers", "sax", "etree"]
diff --git a/parrot/lib/python3.10/xml/dom/__init__.py b/parrot/lib/python3.10/xml/dom/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..97cf9a6429993d97e26e1867b038d433055cf419
--- /dev/null
+++ b/parrot/lib/python3.10/xml/dom/__init__.py
@@ -0,0 +1,140 @@
+"""W3C Document Object Model implementation for Python.
+
+The Python mapping of the Document Object Model is documented in the
+Python Library Reference in the section on the xml.dom package.
+
+This package contains the following modules:
+
+minidom -- A simple implementation of the Level 1 DOM with namespace
+ support added (based on the Level 2 specification) and other
+ minor Level 2 functionality.
+
+pulldom -- DOM builder supporting on-demand tree-building for selected
+ subtrees of the document.
+
+"""
+
+
+class Node:
+ """Class giving the NodeType constants."""
+ __slots__ = ()
+
+ # DOM implementations may use this as a base class for their own
+ # Node implementations. If they don't, the constants defined here
+ # should still be used as the canonical definitions as they match
+ # the values given in the W3C recommendation. Client code can
+ # safely refer to these values in all tests of Node.nodeType
+ # values.
+
+ ELEMENT_NODE = 1
+ ATTRIBUTE_NODE = 2
+ TEXT_NODE = 3
+ CDATA_SECTION_NODE = 4
+ ENTITY_REFERENCE_NODE = 5
+ ENTITY_NODE = 6
+ PROCESSING_INSTRUCTION_NODE = 7
+ COMMENT_NODE = 8
+ DOCUMENT_NODE = 9
+ DOCUMENT_TYPE_NODE = 10
+ DOCUMENT_FRAGMENT_NODE = 11
+ NOTATION_NODE = 12
+
+
+#ExceptionCode
+INDEX_SIZE_ERR = 1
+DOMSTRING_SIZE_ERR = 2
+HIERARCHY_REQUEST_ERR = 3
+WRONG_DOCUMENT_ERR = 4
+INVALID_CHARACTER_ERR = 5
+NO_DATA_ALLOWED_ERR = 6
+NO_MODIFICATION_ALLOWED_ERR = 7
+NOT_FOUND_ERR = 8
+NOT_SUPPORTED_ERR = 9
+INUSE_ATTRIBUTE_ERR = 10
+INVALID_STATE_ERR = 11
+SYNTAX_ERR = 12
+INVALID_MODIFICATION_ERR = 13
+NAMESPACE_ERR = 14
+INVALID_ACCESS_ERR = 15
+VALIDATION_ERR = 16
+
+
+class DOMException(Exception):
+ """Abstract base class for DOM exceptions.
+ Exceptions with specific codes are specializations of this class."""
+
+ def __init__(self, *args, **kw):
+ if self.__class__ is DOMException:
+ raise RuntimeError(
+ "DOMException should not be instantiated directly")
+ Exception.__init__(self, *args, **kw)
+
+ def _get_code(self):
+ return self.code
+
+
+class IndexSizeErr(DOMException):
+ code = INDEX_SIZE_ERR
+
+class DomstringSizeErr(DOMException):
+ code = DOMSTRING_SIZE_ERR
+
+class HierarchyRequestErr(DOMException):
+ code = HIERARCHY_REQUEST_ERR
+
+class WrongDocumentErr(DOMException):
+ code = WRONG_DOCUMENT_ERR
+
+class InvalidCharacterErr(DOMException):
+ code = INVALID_CHARACTER_ERR
+
+class NoDataAllowedErr(DOMException):
+ code = NO_DATA_ALLOWED_ERR
+
+class NoModificationAllowedErr(DOMException):
+ code = NO_MODIFICATION_ALLOWED_ERR
+
+class NotFoundErr(DOMException):
+ code = NOT_FOUND_ERR
+
+class NotSupportedErr(DOMException):
+ code = NOT_SUPPORTED_ERR
+
+class InuseAttributeErr(DOMException):
+ code = INUSE_ATTRIBUTE_ERR
+
+class InvalidStateErr(DOMException):
+ code = INVALID_STATE_ERR
+
+class SyntaxErr(DOMException):
+ code = SYNTAX_ERR
+
+class InvalidModificationErr(DOMException):
+ code = INVALID_MODIFICATION_ERR
+
+class NamespaceErr(DOMException):
+ code = NAMESPACE_ERR
+
+class InvalidAccessErr(DOMException):
+ code = INVALID_ACCESS_ERR
+
+class ValidationErr(DOMException):
+ code = VALIDATION_ERR
+
+class UserDataHandler:
+ """Class giving the operation constants for UserDataHandler.handle()."""
+
+ # Based on DOM Level 3 (WD 9 April 2002)
+
+ NODE_CLONED = 1
+ NODE_IMPORTED = 2
+ NODE_DELETED = 3
+ NODE_RENAMED = 4
+
+XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
+XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
+XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
+EMPTY_NAMESPACE = None
+EMPTY_PREFIX = None
+
+from .domreg import getDOMImplementation, registerDOMImplementation
diff --git a/parrot/lib/python3.10/xml/dom/domreg.py b/parrot/lib/python3.10/xml/dom/domreg.py
new file mode 100644
index 0000000000000000000000000000000000000000..69c17eebb265daad3243e62018409d72942a93af
--- /dev/null
+++ b/parrot/lib/python3.10/xml/dom/domreg.py
@@ -0,0 +1,99 @@
+"""Registration facilities for DOM. This module should not be used
+directly. Instead, the functions getDOMImplementation and
+registerDOMImplementation should be imported from xml.dom."""
+
+# This is a list of well-known implementations. Well-known names
+# should be published by posting to xml-sig@python.org, and are
+# subsequently recorded in this file.
+
+import sys
+
+well_known_implementations = {
+ 'minidom':'xml.dom.minidom',
+ '4DOM': 'xml.dom.DOMImplementation',
+ }
+
+# DOM implementations not officially registered should register
+# themselves with their
+
+registered = {}
+
+def registerDOMImplementation(name, factory):
+ """registerDOMImplementation(name, factory)
+
+ Register the factory function with the name. The factory function
+ should return an object which implements the DOMImplementation
+ interface. The factory function can either return the same object,
+ or a new one (e.g. if that implementation supports some
+ customization)."""
+
+ registered[name] = factory
+
+def _good_enough(dom, features):
+ "_good_enough(dom, features) -> Return 1 if the dom offers the features"
+ for f,v in features:
+ if not dom.hasFeature(f,v):
+ return 0
+ return 1
+
+def getDOMImplementation(name=None, features=()):
+ """getDOMImplementation(name = None, features = ()) -> DOM implementation.
+
+ Return a suitable DOM implementation. The name is either
+ well-known, the module name of a DOM implementation, or None. If
+ it is not None, imports the corresponding module and returns
+ DOMImplementation object if the import succeeds.
+
+ If name is not given, consider the available implementations to
+ find one with the required feature set. If no implementation can
+ be found, raise an ImportError. The features list must be a sequence
+ of (feature, version) pairs which are passed to hasFeature."""
+
+ import os
+ creator = None
+ mod = well_known_implementations.get(name)
+ if mod:
+ mod = __import__(mod, {}, {}, ['getDOMImplementation'])
+ return mod.getDOMImplementation()
+ elif name:
+ return registered[name]()
+ elif not sys.flags.ignore_environment and "PYTHON_DOM" in os.environ:
+ return getDOMImplementation(name = os.environ["PYTHON_DOM"])
+
+ # User did not specify a name, try implementations in arbitrary
+ # order, returning the one that has the required features
+ if isinstance(features, str):
+ features = _parse_feature_string(features)
+ for creator in registered.values():
+ dom = creator()
+ if _good_enough(dom, features):
+ return dom
+
+ for creator in well_known_implementations.keys():
+ try:
+ dom = getDOMImplementation(name = creator)
+ except Exception: # typically ImportError, or AttributeError
+ continue
+ if _good_enough(dom, features):
+ return dom
+
+ raise ImportError("no suitable DOM implementation found")
+
+def _parse_feature_string(s):
+ features = []
+ parts = s.split()
+ i = 0
+ length = len(parts)
+ while i < length:
+ feature = parts[i]
+ if feature[0] in "0123456789":
+ raise ValueError("bad feature name: %r" % (feature,))
+ i = i + 1
+ version = None
+ if i < length:
+ v = parts[i]
+ if v[0] in "0123456789":
+ i = i + 1
+ version = v
+ features.append((feature, version))
+ return tuple(features)
diff --git a/parrot/lib/python3.10/xml/dom/minidom.py b/parrot/lib/python3.10/xml/dom/minidom.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef8a159833bbc07672cd3a17a3e49943a97c85e1
--- /dev/null
+++ b/parrot/lib/python3.10/xml/dom/minidom.py
@@ -0,0 +1,2013 @@
+"""Simple implementation of the Level 1 DOM.
+
+Namespaces and other minor Level 2 features are also supported.
+
+parse("foo.xml")
+
+parseString("")
+
+Todo:
+=====
+ * convenience methods for getting elements and text.
+ * more testing
+ * bring some of the writer and linearizer code into conformance with this
+ interface
+ * SAX 2 namespaces
+"""
+
+import io
+import xml.dom
+
+from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
+from xml.dom.minicompat import *
+from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
+
+# This is used by the ID-cache invalidation checks; the list isn't
+# actually complete, since the nodes being checked will never be the
+# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
+# the node being added or removed, not the node being modified.)
+#
+_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
+ xml.dom.Node.ENTITY_REFERENCE_NODE)
+
+
+class Node(xml.dom.Node):
+ namespaceURI = None # this is non-null only for elements and attributes
+ parentNode = None
+ ownerDocument = None
+ nextSibling = None
+ previousSibling = None
+
+ prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
+
+ def __bool__(self):
+ return True
+
+ def toxml(self, encoding=None, standalone=None):
+ return self.toprettyxml("", "", encoding, standalone)
+
+ def toprettyxml(self, indent="\t", newl="\n", encoding=None,
+ standalone=None):
+ if encoding is None:
+ writer = io.StringIO()
+ else:
+ writer = io.TextIOWrapper(io.BytesIO(),
+ encoding=encoding,
+ errors="xmlcharrefreplace",
+ newline='\n')
+ if self.nodeType == Node.DOCUMENT_NODE:
+ # Can pass encoding only to document, to put it into XML header
+ self.writexml(writer, "", indent, newl, encoding, standalone)
+ else:
+ self.writexml(writer, "", indent, newl)
+ if encoding is None:
+ return writer.getvalue()
+ else:
+ return writer.detach().getvalue()
+
+ def hasChildNodes(self):
+ return bool(self.childNodes)
+
+ def _get_childNodes(self):
+ return self.childNodes
+
+ def _get_firstChild(self):
+ if self.childNodes:
+ return self.childNodes[0]
+
+ def _get_lastChild(self):
+ if self.childNodes:
+ return self.childNodes[-1]
+
+ def insertBefore(self, newChild, refChild):
+ if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
+ for c in tuple(newChild.childNodes):
+ self.insertBefore(c, refChild)
+ ### The DOM does not clearly specify what to return in this case
+ return newChild
+ if newChild.nodeType not in self._child_node_types:
+ raise xml.dom.HierarchyRequestErr(
+ "%s cannot be child of %s" % (repr(newChild), repr(self)))
+ if newChild.parentNode is not None:
+ newChild.parentNode.removeChild(newChild)
+ if refChild is None:
+ self.appendChild(newChild)
+ else:
+ try:
+ index = self.childNodes.index(refChild)
+ except ValueError:
+ raise xml.dom.NotFoundErr()
+ if newChild.nodeType in _nodeTypes_with_children:
+ _clear_id_cache(self)
+ self.childNodes.insert(index, newChild)
+ newChild.nextSibling = refChild
+ refChild.previousSibling = newChild
+ if index:
+ node = self.childNodes[index-1]
+ node.nextSibling = newChild
+ newChild.previousSibling = node
+ else:
+ newChild.previousSibling = None
+ newChild.parentNode = self
+ return newChild
+
+ def appendChild(self, node):
+ if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
+ for c in tuple(node.childNodes):
+ self.appendChild(c)
+ ### The DOM does not clearly specify what to return in this case
+ return node
+ if node.nodeType not in self._child_node_types:
+ raise xml.dom.HierarchyRequestErr(
+ "%s cannot be child of %s" % (repr(node), repr(self)))
+ elif node.nodeType in _nodeTypes_with_children:
+ _clear_id_cache(self)
+ if node.parentNode is not None:
+ node.parentNode.removeChild(node)
+ _append_child(self, node)
+ node.nextSibling = None
+ return node
+
+ def replaceChild(self, newChild, oldChild):
+ if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
+ refChild = oldChild.nextSibling
+ self.removeChild(oldChild)
+ return self.insertBefore(newChild, refChild)
+ if newChild.nodeType not in self._child_node_types:
+ raise xml.dom.HierarchyRequestErr(
+ "%s cannot be child of %s" % (repr(newChild), repr(self)))
+ if newChild is oldChild:
+ return
+ if newChild.parentNode is not None:
+ newChild.parentNode.removeChild(newChild)
+ try:
+ index = self.childNodes.index(oldChild)
+ except ValueError:
+ raise xml.dom.NotFoundErr()
+ self.childNodes[index] = newChild
+ newChild.parentNode = self
+ oldChild.parentNode = None
+ if (newChild.nodeType in _nodeTypes_with_children
+ or oldChild.nodeType in _nodeTypes_with_children):
+ _clear_id_cache(self)
+ newChild.nextSibling = oldChild.nextSibling
+ newChild.previousSibling = oldChild.previousSibling
+ oldChild.nextSibling = None
+ oldChild.previousSibling = None
+ if newChild.previousSibling:
+ newChild.previousSibling.nextSibling = newChild
+ if newChild.nextSibling:
+ newChild.nextSibling.previousSibling = newChild
+ return oldChild
+
+ def removeChild(self, oldChild):
+ try:
+ self.childNodes.remove(oldChild)
+ except ValueError:
+ raise xml.dom.NotFoundErr()
+ if oldChild.nextSibling is not None:
+ oldChild.nextSibling.previousSibling = oldChild.previousSibling
+ if oldChild.previousSibling is not None:
+ oldChild.previousSibling.nextSibling = oldChild.nextSibling
+ oldChild.nextSibling = oldChild.previousSibling = None
+ if oldChild.nodeType in _nodeTypes_with_children:
+ _clear_id_cache(self)
+
+ oldChild.parentNode = None
+ return oldChild
+
+ def normalize(self):
+ L = []
+ for child in self.childNodes:
+ if child.nodeType == Node.TEXT_NODE:
+ if not child.data:
+ # empty text node; discard
+ if L:
+ L[-1].nextSibling = child.nextSibling
+ if child.nextSibling:
+ child.nextSibling.previousSibling = child.previousSibling
+ child.unlink()
+ elif L and L[-1].nodeType == child.nodeType:
+ # collapse text node
+ node = L[-1]
+ node.data = node.data + child.data
+ node.nextSibling = child.nextSibling
+ if child.nextSibling:
+ child.nextSibling.previousSibling = node
+ child.unlink()
+ else:
+ L.append(child)
+ else:
+ L.append(child)
+ if child.nodeType == Node.ELEMENT_NODE:
+ child.normalize()
+ self.childNodes[:] = L
+
+ def cloneNode(self, deep):
+ return _clone_node(self, deep, self.ownerDocument or self)
+
+ def isSupported(self, feature, version):
+ return self.ownerDocument.implementation.hasFeature(feature, version)
+
+ def _get_localName(self):
+ # Overridden in Element and Attr where localName can be Non-Null
+ return None
+
+ # Node interfaces from Level 3 (WD 9 April 2002)
+
+ def isSameNode(self, other):
+ return self is other
+
+ def getInterface(self, feature):
+ if self.isSupported(feature, None):
+ return self
+ else:
+ return None
+
+ # The "user data" functions use a dictionary that is only present
+ # if some user data has been set, so be careful not to assume it
+ # exists.
+
+ def getUserData(self, key):
+ try:
+ return self._user_data[key][0]
+ except (AttributeError, KeyError):
+ return None
+
+ def setUserData(self, key, data, handler):
+ old = None
+ try:
+ d = self._user_data
+ except AttributeError:
+ d = {}
+ self._user_data = d
+ if key in d:
+ old = d[key][0]
+ if data is None:
+ # ignore handlers passed for None
+ handler = None
+ if old is not None:
+ del d[key]
+ else:
+ d[key] = (data, handler)
+ return old
+
+ def _call_user_data_handler(self, operation, src, dst):
+ if hasattr(self, "_user_data"):
+ for key, (data, handler) in list(self._user_data.items()):
+ if handler is not None:
+ handler.handle(operation, key, data, src, dst)
+
+ # minidom-specific API:
+
+ def unlink(self):
+ self.parentNode = self.ownerDocument = None
+ if self.childNodes:
+ for child in self.childNodes:
+ child.unlink()
+ self.childNodes = NodeList()
+ self.previousSibling = None
+ self.nextSibling = None
+
+ # A Node is its own context manager, to ensure that an unlink() call occurs.
+ # This is similar to how a file object works.
+ def __enter__(self):
+ return self
+
+ def __exit__(self, et, ev, tb):
+ self.unlink()
+
+defproperty(Node, "firstChild", doc="First child node, or None.")
+defproperty(Node, "lastChild", doc="Last child node, or None.")
+defproperty(Node, "localName", doc="Namespace-local name of this node.")
+
+
+def _append_child(self, node):
+ # fast path with less checks; usable by DOM builders if careful
+ childNodes = self.childNodes
+ if childNodes:
+ last = childNodes[-1]
+ node.previousSibling = last
+ last.nextSibling = node
+ childNodes.append(node)
+ node.parentNode = self
+
+def _in_document(node):
+ # return True iff node is part of a document tree
+ while node is not None:
+ if node.nodeType == Node.DOCUMENT_NODE:
+ return True
+ node = node.parentNode
+ return False
+
+def _write_data(writer, data):
+ "Writes datachars to writer."
+ if data:
+ data = data.replace("&", "&").replace("<", "<"). \
+ replace("\"", """).replace(">", ">")
+ writer.write(data)
+
+def _get_elements_by_tagName_helper(parent, name, rc):
+ for node in parent.childNodes:
+ if node.nodeType == Node.ELEMENT_NODE and \
+ (name == "*" or node.tagName == name):
+ rc.append(node)
+ _get_elements_by_tagName_helper(node, name, rc)
+ return rc
+
+def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
+ for node in parent.childNodes:
+ if node.nodeType == Node.ELEMENT_NODE:
+ if ((localName == "*" or node.localName == localName) and
+ (nsURI == "*" or node.namespaceURI == nsURI)):
+ rc.append(node)
+ _get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
+ return rc
+
+class DocumentFragment(Node):
+ nodeType = Node.DOCUMENT_FRAGMENT_NODE
+ nodeName = "#document-fragment"
+ nodeValue = None
+ attributes = None
+ parentNode = None
+ _child_node_types = (Node.ELEMENT_NODE,
+ Node.TEXT_NODE,
+ Node.CDATA_SECTION_NODE,
+ Node.ENTITY_REFERENCE_NODE,
+ Node.PROCESSING_INSTRUCTION_NODE,
+ Node.COMMENT_NODE,
+ Node.NOTATION_NODE)
+
+ def __init__(self):
+ self.childNodes = NodeList()
+
+
+class Attr(Node):
+ __slots__=('_name', '_value', 'namespaceURI',
+ '_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement')
+ nodeType = Node.ATTRIBUTE_NODE
+ attributes = None
+ specified = False
+ _is_id = False
+
+ _child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
+
+ def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
+ prefix=None):
+ self.ownerElement = None
+ self._name = qName
+ self.namespaceURI = namespaceURI
+ self._prefix = prefix
+ if localName is not None:
+ self._localName = localName
+ self.childNodes = NodeList()
+
+ # Add the single child node that represents the value of the attr
+ self.childNodes.append(Text())
+
+ # nodeValue and value are set elsewhere
+
+ def _get_localName(self):
+ try:
+ return self._localName
+ except AttributeError:
+ return self.nodeName.split(":", 1)[-1]
+
+ def _get_specified(self):
+ return self.specified
+
+ def _get_name(self):
+ return self._name
+
+ def _set_name(self, value):
+ self._name = value
+ if self.ownerElement is not None:
+ _clear_id_cache(self.ownerElement)
+
+ nodeName = name = property(_get_name, _set_name)
+
+ def _get_value(self):
+ return self._value
+
+ def _set_value(self, value):
+ self._value = value
+ self.childNodes[0].data = value
+ if self.ownerElement is not None:
+ _clear_id_cache(self.ownerElement)
+ self.childNodes[0].data = value
+
+ nodeValue = value = property(_get_value, _set_value)
+
+ def _get_prefix(self):
+ return self._prefix
+
+ def _set_prefix(self, prefix):
+ nsuri = self.namespaceURI
+ if prefix == "xmlns":
+ if nsuri and nsuri != XMLNS_NAMESPACE:
+ raise xml.dom.NamespaceErr(
+ "illegal use of 'xmlns' prefix for the wrong namespace")
+ self._prefix = prefix
+ if prefix is None:
+ newName = self.localName
+ else:
+ newName = "%s:%s" % (prefix, self.localName)
+ if self.ownerElement:
+ _clear_id_cache(self.ownerElement)
+ self.name = newName
+
+ prefix = property(_get_prefix, _set_prefix)
+
+ def unlink(self):
+ # This implementation does not call the base implementation
+ # since most of that is not needed, and the expense of the
+ # method call is not warranted. We duplicate the removal of
+ # children, but that's all we needed from the base class.
+ elem = self.ownerElement
+ if elem is not None:
+ del elem._attrs[self.nodeName]
+ del elem._attrsNS[(self.namespaceURI, self.localName)]
+ if self._is_id:
+ self._is_id = False
+ elem._magic_id_nodes -= 1
+ self.ownerDocument._magic_id_count -= 1
+ for child in self.childNodes:
+ child.unlink()
+ del self.childNodes[:]
+
+ def _get_isId(self):
+ if self._is_id:
+ return True
+ doc = self.ownerDocument
+ elem = self.ownerElement
+ if doc is None or elem is None:
+ return False
+
+ info = doc._get_elem_info(elem)
+ if info is None:
+ return False
+ if self.namespaceURI:
+ return info.isIdNS(self.namespaceURI, self.localName)
+ else:
+ return info.isId(self.nodeName)
+
+ def _get_schemaType(self):
+ doc = self.ownerDocument
+ elem = self.ownerElement
+ if doc is None or elem is None:
+ return _no_type
+
+ info = doc._get_elem_info(elem)
+ if info is None:
+ return _no_type
+ if self.namespaceURI:
+ return info.getAttributeTypeNS(self.namespaceURI, self.localName)
+ else:
+ return info.getAttributeType(self.nodeName)
+
+defproperty(Attr, "isId", doc="True if this attribute is an ID.")
+defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
+defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
+
+
+class NamedNodeMap(object):
+ """The attribute list is a transient interface to the underlying
+ dictionaries. Mutations here will change the underlying element's
+ dictionary.
+
+ Ordering is imposed artificially and does not reflect the order of
+ attributes as found in an input document.
+ """
+
+ __slots__ = ('_attrs', '_attrsNS', '_ownerElement')
+
+ def __init__(self, attrs, attrsNS, ownerElement):
+ self._attrs = attrs
+ self._attrsNS = attrsNS
+ self._ownerElement = ownerElement
+
+ def _get_length(self):
+ return len(self._attrs)
+
+ def item(self, index):
+ try:
+ return self[list(self._attrs.keys())[index]]
+ except IndexError:
+ return None
+
+ def items(self):
+ L = []
+ for node in self._attrs.values():
+ L.append((node.nodeName, node.value))
+ return L
+
+ def itemsNS(self):
+ L = []
+ for node in self._attrs.values():
+ L.append(((node.namespaceURI, node.localName), node.value))
+ return L
+
+ def __contains__(self, key):
+ if isinstance(key, str):
+ return key in self._attrs
+ else:
+ return key in self._attrsNS
+
+ def keys(self):
+ return self._attrs.keys()
+
+ def keysNS(self):
+ return self._attrsNS.keys()
+
+ def values(self):
+ return self._attrs.values()
+
+ def get(self, name, value=None):
+ return self._attrs.get(name, value)
+
+ __len__ = _get_length
+
+ def _cmp(self, other):
+ if self._attrs is getattr(other, "_attrs", None):
+ return 0
+ else:
+ return (id(self) > id(other)) - (id(self) < id(other))
+
+ def __eq__(self, other):
+ return self._cmp(other) == 0
+
+ def __ge__(self, other):
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ return self._cmp(other) > 0
+
+ def __le__(self, other):
+ return self._cmp(other) <= 0
+
+ def __lt__(self, other):
+ return self._cmp(other) < 0
+
+ def __getitem__(self, attname_or_tuple):
+ if isinstance(attname_or_tuple, tuple):
+ return self._attrsNS[attname_or_tuple]
+ else:
+ return self._attrs[attname_or_tuple]
+
+ # same as set
+ def __setitem__(self, attname, value):
+ if isinstance(value, str):
+ try:
+ node = self._attrs[attname]
+ except KeyError:
+ node = Attr(attname)
+ node.ownerDocument = self._ownerElement.ownerDocument
+ self.setNamedItem(node)
+ node.value = value
+ else:
+ if not isinstance(value, Attr):
+ raise TypeError("value must be a string or Attr object")
+ node = value
+ self.setNamedItem(node)
+
+ def getNamedItem(self, name):
+ try:
+ return self._attrs[name]
+ except KeyError:
+ return None
+
+ def getNamedItemNS(self, namespaceURI, localName):
+ try:
+ return self._attrsNS[(namespaceURI, localName)]
+ except KeyError:
+ return None
+
+ def removeNamedItem(self, name):
+ n = self.getNamedItem(name)
+ if n is not None:
+ _clear_id_cache(self._ownerElement)
+ del self._attrs[n.nodeName]
+ del self._attrsNS[(n.namespaceURI, n.localName)]
+ if hasattr(n, 'ownerElement'):
+ n.ownerElement = None
+ return n
+ else:
+ raise xml.dom.NotFoundErr()
+
+ def removeNamedItemNS(self, namespaceURI, localName):
+ n = self.getNamedItemNS(namespaceURI, localName)
+ if n is not None:
+ _clear_id_cache(self._ownerElement)
+ del self._attrsNS[(n.namespaceURI, n.localName)]
+ del self._attrs[n.nodeName]
+ if hasattr(n, 'ownerElement'):
+ n.ownerElement = None
+ return n
+ else:
+ raise xml.dom.NotFoundErr()
+
+ def setNamedItem(self, node):
+ if not isinstance(node, Attr):
+ raise xml.dom.HierarchyRequestErr(
+ "%s cannot be child of %s" % (repr(node), repr(self)))
+ old = self._attrs.get(node.name)
+ if old:
+ old.unlink()
+ self._attrs[node.name] = node
+ self._attrsNS[(node.namespaceURI, node.localName)] = node
+ node.ownerElement = self._ownerElement
+ _clear_id_cache(node.ownerElement)
+ return old
+
+ def setNamedItemNS(self, node):
+ return self.setNamedItem(node)
+
+ def __delitem__(self, attname_or_tuple):
+ node = self[attname_or_tuple]
+ _clear_id_cache(node.ownerElement)
+ node.unlink()
+
+ def __getstate__(self):
+ return self._attrs, self._attrsNS, self._ownerElement
+
+ def __setstate__(self, state):
+ self._attrs, self._attrsNS, self._ownerElement = state
+
+defproperty(NamedNodeMap, "length",
+ doc="Number of nodes in the NamedNodeMap.")
+
+AttributeList = NamedNodeMap
+
+
+class TypeInfo(object):
+ __slots__ = 'namespace', 'name'
+
+ def __init__(self, namespace, name):
+ self.namespace = namespace
+ self.name = name
+
+ def __repr__(self):
+ if self.namespace:
+ return "<%s %r (from %r)>" % (self.__class__.__name__, self.name,
+ self.namespace)
+ else:
+ return "<%s %r>" % (self.__class__.__name__, self.name)
+
+ def _get_name(self):
+ return self.name
+
+ def _get_namespace(self):
+ return self.namespace
+
+_no_type = TypeInfo(None, None)
+
+class Element(Node):
+ __slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix',
+ 'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS',
+ 'nextSibling', 'previousSibling')
+ nodeType = Node.ELEMENT_NODE
+ nodeValue = None
+ schemaType = _no_type
+
+ _magic_id_nodes = 0
+
+ _child_node_types = (Node.ELEMENT_NODE,
+ Node.PROCESSING_INSTRUCTION_NODE,
+ Node.COMMENT_NODE,
+ Node.TEXT_NODE,
+ Node.CDATA_SECTION_NODE,
+ Node.ENTITY_REFERENCE_NODE)
+
+ def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
+ localName=None):
+ self.parentNode = None
+ self.tagName = self.nodeName = tagName
+ self.prefix = prefix
+ self.namespaceURI = namespaceURI
+ self.childNodes = NodeList()
+ self.nextSibling = self.previousSibling = None
+
+ # Attribute dictionaries are lazily created
+ # attributes are double-indexed:
+ # tagName -> Attribute
+ # URI,localName -> Attribute
+ # in the future: consider lazy generation
+ # of attribute objects this is too tricky
+ # for now because of headaches with
+ # namespaces.
+ self._attrs = None
+ self._attrsNS = None
+
+ def _ensure_attributes(self):
+ if self._attrs is None:
+ self._attrs = {}
+ self._attrsNS = {}
+
+ def _get_localName(self):
+ try:
+ return self._localName
+ except AttributeError:
+ return self.tagName.split(":", 1)[-1]
+
+ def _get_tagName(self):
+ return self.tagName
+
+ def unlink(self):
+ if self._attrs is not None:
+ for attr in list(self._attrs.values()):
+ attr.unlink()
+ self._attrs = None
+ self._attrsNS = None
+ Node.unlink(self)
+
+ def getAttribute(self, attname):
+ """Returns the value of the specified attribute.
+
+ Returns the value of the element's attribute named attname as
+ a string. An empty string is returned if the element does not
+ have such an attribute. Note that an empty string may also be
+ returned as an explicitly given attribute value, use the
+ hasAttribute method to distinguish these two cases.
+ """
+ if self._attrs is None:
+ return ""
+ try:
+ return self._attrs[attname].value
+ except KeyError:
+ return ""
+
+ def getAttributeNS(self, namespaceURI, localName):
+ if self._attrsNS is None:
+ return ""
+ try:
+ return self._attrsNS[(namespaceURI, localName)].value
+ except KeyError:
+ return ""
+
+ def setAttribute(self, attname, value):
+ attr = self.getAttributeNode(attname)
+ if attr is None:
+ attr = Attr(attname)
+ attr.value = value # also sets nodeValue
+ attr.ownerDocument = self.ownerDocument
+ self.setAttributeNode(attr)
+ elif value != attr.value:
+ attr.value = value
+ if attr.isId:
+ _clear_id_cache(self)
+
+ def setAttributeNS(self, namespaceURI, qualifiedName, value):
+ prefix, localname = _nssplit(qualifiedName)
+ attr = self.getAttributeNodeNS(namespaceURI, localname)
+ if attr is None:
+ attr = Attr(qualifiedName, namespaceURI, localname, prefix)
+ attr.value = value
+ attr.ownerDocument = self.ownerDocument
+ self.setAttributeNode(attr)
+ else:
+ if value != attr.value:
+ attr.value = value
+ if attr.isId:
+ _clear_id_cache(self)
+ if attr.prefix != prefix:
+ attr.prefix = prefix
+ attr.nodeName = qualifiedName
+
+ def getAttributeNode(self, attrname):
+ if self._attrs is None:
+ return None
+ return self._attrs.get(attrname)
+
+ def getAttributeNodeNS(self, namespaceURI, localName):
+ if self._attrsNS is None:
+ return None
+ return self._attrsNS.get((namespaceURI, localName))
+
+ def setAttributeNode(self, attr):
+ if attr.ownerElement not in (None, self):
+ raise xml.dom.InuseAttributeErr("attribute node already owned")
+ self._ensure_attributes()
+ old1 = self._attrs.get(attr.name, None)
+ if old1 is not None:
+ self.removeAttributeNode(old1)
+ old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
+ if old2 is not None and old2 is not old1:
+ self.removeAttributeNode(old2)
+ _set_attribute_node(self, attr)
+
+ if old1 is not attr:
+ # It might have already been part of this node, in which case
+ # it doesn't represent a change, and should not be returned.
+ return old1
+ if old2 is not attr:
+ return old2
+
+ setAttributeNodeNS = setAttributeNode
+
+ def removeAttribute(self, name):
+ if self._attrsNS is None:
+ raise xml.dom.NotFoundErr()
+ try:
+ attr = self._attrs[name]
+ except KeyError:
+ raise xml.dom.NotFoundErr()
+ self.removeAttributeNode(attr)
+
+ def removeAttributeNS(self, namespaceURI, localName):
+ if self._attrsNS is None:
+ raise xml.dom.NotFoundErr()
+ try:
+ attr = self._attrsNS[(namespaceURI, localName)]
+ except KeyError:
+ raise xml.dom.NotFoundErr()
+ self.removeAttributeNode(attr)
+
+ def removeAttributeNode(self, node):
+ if node is None:
+ raise xml.dom.NotFoundErr()
+ try:
+ self._attrs[node.name]
+ except KeyError:
+ raise xml.dom.NotFoundErr()
+ _clear_id_cache(self)
+ node.unlink()
+ # Restore this since the node is still useful and otherwise
+ # unlinked
+ node.ownerDocument = self.ownerDocument
+ return node
+
+ removeAttributeNodeNS = removeAttributeNode
+
+ def hasAttribute(self, name):
+ """Checks whether the element has an attribute with the specified name.
+
+ Returns True if the element has an attribute with the specified name.
+ Otherwise, returns False.
+ """
+ if self._attrs is None:
+ return False
+ return name in self._attrs
+
+ def hasAttributeNS(self, namespaceURI, localName):
+ if self._attrsNS is None:
+ return False
+ return (namespaceURI, localName) in self._attrsNS
+
+ def getElementsByTagName(self, name):
+ """Returns all descendant elements with the given tag name.
+
+ Returns the list of all descendant elements (not direct children
+ only) with the specified tag name.
+ """
+ return _get_elements_by_tagName_helper(self, name, NodeList())
+
+ def getElementsByTagNameNS(self, namespaceURI, localName):
+ return _get_elements_by_tagName_ns_helper(
+ self, namespaceURI, localName, NodeList())
+
+ def __repr__(self):
+ return "" % (self.tagName, id(self))
+
+ def writexml(self, writer, indent="", addindent="", newl=""):
+ """Write an XML element to a file-like object
+
+ Write the element to the writer object that must provide
+ a write method (e.g. a file or StringIO object).
+ """
+ # indent = current indentation
+ # addindent = indentation to add to higher levels
+ # newl = newline string
+ writer.write(indent+"<" + self.tagName)
+
+ attrs = self._get_attributes()
+
+ for a_name in attrs.keys():
+ writer.write(" %s=\"" % a_name)
+ _write_data(writer, attrs[a_name].value)
+ writer.write("\"")
+ if self.childNodes:
+ writer.write(">")
+ if (len(self.childNodes) == 1 and
+ self.childNodes[0].nodeType in (
+ Node.TEXT_NODE, Node.CDATA_SECTION_NODE)):
+ self.childNodes[0].writexml(writer, '', '', '')
+ else:
+ writer.write(newl)
+ for node in self.childNodes:
+ node.writexml(writer, indent+addindent, addindent, newl)
+ writer.write(indent)
+ writer.write("%s>%s" % (self.tagName, newl))
+ else:
+ writer.write("/>%s"%(newl))
+
+ def _get_attributes(self):
+ self._ensure_attributes()
+ return NamedNodeMap(self._attrs, self._attrsNS, self)
+
+ def hasAttributes(self):
+ if self._attrs:
+ return True
+ else:
+ return False
+
+ # DOM Level 3 attributes, based on the 22 Oct 2002 draft
+
+ def setIdAttribute(self, name):
+ idAttr = self.getAttributeNode(name)
+ self.setIdAttributeNode(idAttr)
+
+ def setIdAttributeNS(self, namespaceURI, localName):
+ idAttr = self.getAttributeNodeNS(namespaceURI, localName)
+ self.setIdAttributeNode(idAttr)
+
+ def setIdAttributeNode(self, idAttr):
+ if idAttr is None or not self.isSameNode(idAttr.ownerElement):
+ raise xml.dom.NotFoundErr()
+ if _get_containing_entref(self) is not None:
+ raise xml.dom.NoModificationAllowedErr()
+ if not idAttr._is_id:
+ idAttr._is_id = True
+ self._magic_id_nodes += 1
+ self.ownerDocument._magic_id_count += 1
+ _clear_id_cache(self)
+
+defproperty(Element, "attributes",
+ doc="NamedNodeMap of attributes on the element.")
+defproperty(Element, "localName",
+ doc="Namespace-local name of this element.")
+
+
+def _set_attribute_node(element, attr):
+ _clear_id_cache(element)
+ element._ensure_attributes()
+ element._attrs[attr.name] = attr
+ element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
+
+ # This creates a circular reference, but Element.unlink()
+ # breaks the cycle since the references to the attribute
+ # dictionaries are tossed.
+ attr.ownerElement = element
+
+class Childless:
+ """Mixin that makes childless-ness easy to implement and avoids
+ the complexity of the Node methods that deal with children.
+ """
+ __slots__ = ()
+
+ attributes = None
+ childNodes = EmptyNodeList()
+ firstChild = None
+ lastChild = None
+
+ def _get_firstChild(self):
+ return None
+
+ def _get_lastChild(self):
+ return None
+
+ def appendChild(self, node):
+ raise xml.dom.HierarchyRequestErr(
+ self.nodeName + " nodes cannot have children")
+
+ def hasChildNodes(self):
+ return False
+
+ def insertBefore(self, newChild, refChild):
+ raise xml.dom.HierarchyRequestErr(
+ self.nodeName + " nodes do not have children")
+
+ def removeChild(self, oldChild):
+ raise xml.dom.NotFoundErr(
+ self.nodeName + " nodes do not have children")
+
+ def normalize(self):
+ # For childless nodes, normalize() has nothing to do.
+ pass
+
+ def replaceChild(self, newChild, oldChild):
+ raise xml.dom.HierarchyRequestErr(
+ self.nodeName + " nodes do not have children")
+
+
+class ProcessingInstruction(Childless, Node):
+ nodeType = Node.PROCESSING_INSTRUCTION_NODE
+ __slots__ = ('target', 'data')
+
+ def __init__(self, target, data):
+ self.target = target
+ self.data = data
+
+ # nodeValue is an alias for data
+ def _get_nodeValue(self):
+ return self.data
+ def _set_nodeValue(self, value):
+ self.data = value
+ nodeValue = property(_get_nodeValue, _set_nodeValue)
+
+ # nodeName is an alias for target
+ def _get_nodeName(self):
+ return self.target
+ def _set_nodeName(self, value):
+ self.target = value
+ nodeName = property(_get_nodeName, _set_nodeName)
+
+ def writexml(self, writer, indent="", addindent="", newl=""):
+ writer.write("%s%s %s?>%s" % (indent,self.target, self.data, newl))
+
+
+class CharacterData(Childless, Node):
+ __slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling')
+
+ def __init__(self):
+ self.ownerDocument = self.parentNode = None
+ self.previousSibling = self.nextSibling = None
+ self._data = ''
+ Node.__init__(self)
+
+ def _get_length(self):
+ return len(self.data)
+ __len__ = _get_length
+
+ def _get_data(self):
+ return self._data
+ def _set_data(self, data):
+ self._data = data
+
+ data = nodeValue = property(_get_data, _set_data)
+
+ def __repr__(self):
+ data = self.data
+ if len(data) > 10:
+ dotdotdot = "..."
+ else:
+ dotdotdot = ""
+ return '' % (
+ self.__class__.__name__, data[0:10], dotdotdot)
+
+ def substringData(self, offset, count):
+ if offset < 0:
+ raise xml.dom.IndexSizeErr("offset cannot be negative")
+ if offset >= len(self.data):
+ raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
+ if count < 0:
+ raise xml.dom.IndexSizeErr("count cannot be negative")
+ return self.data[offset:offset+count]
+
+ def appendData(self, arg):
+ self.data = self.data + arg
+
+ def insertData(self, offset, arg):
+ if offset < 0:
+ raise xml.dom.IndexSizeErr("offset cannot be negative")
+ if offset >= len(self.data):
+ raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
+ if arg:
+ self.data = "%s%s%s" % (
+ self.data[:offset], arg, self.data[offset:])
+
+ def deleteData(self, offset, count):
+ if offset < 0:
+ raise xml.dom.IndexSizeErr("offset cannot be negative")
+ if offset >= len(self.data):
+ raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
+ if count < 0:
+ raise xml.dom.IndexSizeErr("count cannot be negative")
+ if count:
+ self.data = self.data[:offset] + self.data[offset+count:]
+
+ def replaceData(self, offset, count, arg):
+ if offset < 0:
+ raise xml.dom.IndexSizeErr("offset cannot be negative")
+ if offset >= len(self.data):
+ raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
+ if count < 0:
+ raise xml.dom.IndexSizeErr("count cannot be negative")
+ if count:
+ self.data = "%s%s%s" % (
+ self.data[:offset], arg, self.data[offset+count:])
+
+defproperty(CharacterData, "length", doc="Length of the string data.")
+
+
+class Text(CharacterData):
+ __slots__ = ()
+
+ nodeType = Node.TEXT_NODE
+ nodeName = "#text"
+ attributes = None
+
+ def splitText(self, offset):
+ if offset < 0 or offset > len(self.data):
+ raise xml.dom.IndexSizeErr("illegal offset value")
+ newText = self.__class__()
+ newText.data = self.data[offset:]
+ newText.ownerDocument = self.ownerDocument
+ next = self.nextSibling
+ if self.parentNode and self in self.parentNode.childNodes:
+ if next is None:
+ self.parentNode.appendChild(newText)
+ else:
+ self.parentNode.insertBefore(newText, next)
+ self.data = self.data[:offset]
+ return newText
+
+ def writexml(self, writer, indent="", addindent="", newl=""):
+ _write_data(writer, "%s%s%s" % (indent, self.data, newl))
+
+ # DOM Level 3 (WD 9 April 2002)
+
+ def _get_wholeText(self):
+ L = [self.data]
+ n = self.previousSibling
+ while n is not None:
+ if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
+ L.insert(0, n.data)
+ n = n.previousSibling
+ else:
+ break
+ n = self.nextSibling
+ while n is not None:
+ if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
+ L.append(n.data)
+ n = n.nextSibling
+ else:
+ break
+ return ''.join(L)
+
+ def replaceWholeText(self, content):
+ # XXX This needs to be seriously changed if minidom ever
+ # supports EntityReference nodes.
+ parent = self.parentNode
+ n = self.previousSibling
+ while n is not None:
+ if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
+ next = n.previousSibling
+ parent.removeChild(n)
+ n = next
+ else:
+ break
+ n = self.nextSibling
+ if not content:
+ parent.removeChild(self)
+ while n is not None:
+ if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
+ next = n.nextSibling
+ parent.removeChild(n)
+ n = next
+ else:
+ break
+ if content:
+ self.data = content
+ return self
+ else:
+ return None
+
+ def _get_isWhitespaceInElementContent(self):
+ if self.data.strip():
+ return False
+ elem = _get_containing_element(self)
+ if elem is None:
+ return False
+ info = self.ownerDocument._get_elem_info(elem)
+ if info is None:
+ return False
+ else:
+ return info.isElementContent()
+
+defproperty(Text, "isWhitespaceInElementContent",
+ doc="True iff this text node contains only whitespace"
+ " and is in element content.")
+defproperty(Text, "wholeText",
+ doc="The text of all logically-adjacent text nodes.")
+
+
+def _get_containing_element(node):
+ c = node.parentNode
+ while c is not None:
+ if c.nodeType == Node.ELEMENT_NODE:
+ return c
+ c = c.parentNode
+ return None
+
+def _get_containing_entref(node):
+ c = node.parentNode
+ while c is not None:
+ if c.nodeType == Node.ENTITY_REFERENCE_NODE:
+ return c
+ c = c.parentNode
+ return None
+
+
+class Comment(CharacterData):
+ nodeType = Node.COMMENT_NODE
+ nodeName = "#comment"
+
+ def __init__(self, data):
+ CharacterData.__init__(self)
+ self._data = data
+
+ def writexml(self, writer, indent="", addindent="", newl=""):
+ if "--" in self.data:
+ raise ValueError("'--' is not allowed in a comment node")
+ writer.write("%s%s" % (indent, self.data, newl))
+
+
+class CDATASection(Text):
+ __slots__ = ()
+
+ nodeType = Node.CDATA_SECTION_NODE
+ nodeName = "#cdata-section"
+
+ def writexml(self, writer, indent="", addindent="", newl=""):
+ if self.data.find("]]>") >= 0:
+ raise ValueError("']]>' not allowed in a CDATA section")
+ writer.write("" % self.data)
+
+
+class ReadOnlySequentialNamedNodeMap(object):
+ __slots__ = '_seq',
+
+ def __init__(self, seq=()):
+ # seq should be a list or tuple
+ self._seq = seq
+
+ def __len__(self):
+ return len(self._seq)
+
+ def _get_length(self):
+ return len(self._seq)
+
+ def getNamedItem(self, name):
+ for n in self._seq:
+ if n.nodeName == name:
+ return n
+
+ def getNamedItemNS(self, namespaceURI, localName):
+ for n in self._seq:
+ if n.namespaceURI == namespaceURI and n.localName == localName:
+ return n
+
+ def __getitem__(self, name_or_tuple):
+ if isinstance(name_or_tuple, tuple):
+ node = self.getNamedItemNS(*name_or_tuple)
+ else:
+ node = self.getNamedItem(name_or_tuple)
+ if node is None:
+ raise KeyError(name_or_tuple)
+ return node
+
+ def item(self, index):
+ if index < 0:
+ return None
+ try:
+ return self._seq[index]
+ except IndexError:
+ return None
+
+ def removeNamedItem(self, name):
+ raise xml.dom.NoModificationAllowedErr(
+ "NamedNodeMap instance is read-only")
+
+ def removeNamedItemNS(self, namespaceURI, localName):
+ raise xml.dom.NoModificationAllowedErr(
+ "NamedNodeMap instance is read-only")
+
+ def setNamedItem(self, node):
+ raise xml.dom.NoModificationAllowedErr(
+ "NamedNodeMap instance is read-only")
+
+ def setNamedItemNS(self, node):
+ raise xml.dom.NoModificationAllowedErr(
+ "NamedNodeMap instance is read-only")
+
+ def __getstate__(self):
+ return [self._seq]
+
+ def __setstate__(self, state):
+ self._seq = state[0]
+
+defproperty(ReadOnlySequentialNamedNodeMap, "length",
+ doc="Number of entries in the NamedNodeMap.")
+
+
+class Identified:
+ """Mix-in class that supports the publicId and systemId attributes."""
+
+ __slots__ = 'publicId', 'systemId'
+
+ def _identified_mixin_init(self, publicId, systemId):
+ self.publicId = publicId
+ self.systemId = systemId
+
+ def _get_publicId(self):
+ return self.publicId
+
+ def _get_systemId(self):
+ return self.systemId
+
+class DocumentType(Identified, Childless, Node):
+ nodeType = Node.DOCUMENT_TYPE_NODE
+ nodeValue = None
+ name = None
+ publicId = None
+ systemId = None
+ internalSubset = None
+
+ def __init__(self, qualifiedName):
+ self.entities = ReadOnlySequentialNamedNodeMap()
+ self.notations = ReadOnlySequentialNamedNodeMap()
+ if qualifiedName:
+ prefix, localname = _nssplit(qualifiedName)
+ self.name = localname
+ self.nodeName = self.name
+
+ def _get_internalSubset(self):
+ return self.internalSubset
+
+ def cloneNode(self, deep):
+ if self.ownerDocument is None:
+ # it's ok
+ clone = DocumentType(None)
+ clone.name = self.name
+ clone.nodeName = self.name
+ operation = xml.dom.UserDataHandler.NODE_CLONED
+ if deep:
+ clone.entities._seq = []
+ clone.notations._seq = []
+ for n in self.notations._seq:
+ notation = Notation(n.nodeName, n.publicId, n.systemId)
+ clone.notations._seq.append(notation)
+ n._call_user_data_handler(operation, n, notation)
+ for e in self.entities._seq:
+ entity = Entity(e.nodeName, e.publicId, e.systemId,
+ e.notationName)
+ entity.actualEncoding = e.actualEncoding
+ entity.encoding = e.encoding
+ entity.version = e.version
+ clone.entities._seq.append(entity)
+ e._call_user_data_handler(operation, e, entity)
+ self._call_user_data_handler(operation, self, clone)
+ return clone
+ else:
+ return None
+
+ def writexml(self, writer, indent="", addindent="", newl=""):
+ writer.write(""+newl)
+
+class Entity(Identified, Node):
+ attributes = None
+ nodeType = Node.ENTITY_NODE
+ nodeValue = None
+
+ actualEncoding = None
+ encoding = None
+ version = None
+
+ def __init__(self, name, publicId, systemId, notation):
+ self.nodeName = name
+ self.notationName = notation
+ self.childNodes = NodeList()
+ self._identified_mixin_init(publicId, systemId)
+
+ def _get_actualEncoding(self):
+ return self.actualEncoding
+
+ def _get_encoding(self):
+ return self.encoding
+
+ def _get_version(self):
+ return self.version
+
+ def appendChild(self, newChild):
+ raise xml.dom.HierarchyRequestErr(
+ "cannot append children to an entity node")
+
+ def insertBefore(self, newChild, refChild):
+ raise xml.dom.HierarchyRequestErr(
+ "cannot insert children below an entity node")
+
+ def removeChild(self, oldChild):
+ raise xml.dom.HierarchyRequestErr(
+ "cannot remove children from an entity node")
+
+ def replaceChild(self, newChild, oldChild):
+ raise xml.dom.HierarchyRequestErr(
+ "cannot replace children of an entity node")
+
+class Notation(Identified, Childless, Node):
+ nodeType = Node.NOTATION_NODE
+ nodeValue = None
+
+ def __init__(self, name, publicId, systemId):
+ self.nodeName = name
+ self._identified_mixin_init(publicId, systemId)
+
+
+class DOMImplementation(DOMImplementationLS):
+ _features = [("core", "1.0"),
+ ("core", "2.0"),
+ ("core", None),
+ ("xml", "1.0"),
+ ("xml", "2.0"),
+ ("xml", None),
+ ("ls-load", "3.0"),
+ ("ls-load", None),
+ ]
+
+ def hasFeature(self, feature, version):
+ if version == "":
+ version = None
+ return (feature.lower(), version) in self._features
+
+ def createDocument(self, namespaceURI, qualifiedName, doctype):
+ if doctype and doctype.parentNode is not None:
+ raise xml.dom.WrongDocumentErr(
+ "doctype object owned by another DOM tree")
+ doc = self._create_document()
+
+ add_root_element = not (namespaceURI is None
+ and qualifiedName is None
+ and doctype is None)
+
+ if not qualifiedName and add_root_element:
+ # The spec is unclear what to raise here; SyntaxErr
+ # would be the other obvious candidate. Since Xerces raises
+ # InvalidCharacterErr, and since SyntaxErr is not listed
+ # for createDocument, that seems to be the better choice.
+ # XXX: need to check for illegal characters here and in
+ # createElement.
+
+ # DOM Level III clears this up when talking about the return value
+ # of this function. If namespaceURI, qName and DocType are
+ # Null the document is returned without a document element
+ # Otherwise if doctype or namespaceURI are not None
+ # Then we go back to the above problem
+ raise xml.dom.InvalidCharacterErr("Element with no name")
+
+ if add_root_element:
+ prefix, localname = _nssplit(qualifiedName)
+ if prefix == "xml" \
+ and namespaceURI != "http://www.w3.org/XML/1998/namespace":
+ raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
+ if prefix and not namespaceURI:
+ raise xml.dom.NamespaceErr(
+ "illegal use of prefix without namespaces")
+ element = doc.createElementNS(namespaceURI, qualifiedName)
+ if doctype:
+ doc.appendChild(doctype)
+ doc.appendChild(element)
+
+ if doctype:
+ doctype.parentNode = doctype.ownerDocument = doc
+
+ doc.doctype = doctype
+ doc.implementation = self
+ return doc
+
+ def createDocumentType(self, qualifiedName, publicId, systemId):
+ doctype = DocumentType(qualifiedName)
+ doctype.publicId = publicId
+ doctype.systemId = systemId
+ return doctype
+
+ # DOM Level 3 (WD 9 April 2002)
+
+ def getInterface(self, feature):
+ if self.hasFeature(feature, None):
+ return self
+ else:
+ return None
+
+ # internal
+ def _create_document(self):
+ return Document()
+
+class ElementInfo(object):
+ """Object that represents content-model information for an element.
+
+ This implementation is not expected to be used in practice; DOM
+ builders should provide implementations which do the right thing
+ using information available to it.
+
+ """
+
+ __slots__ = 'tagName',
+
+ def __init__(self, name):
+ self.tagName = name
+
+ def getAttributeType(self, aname):
+ return _no_type
+
+ def getAttributeTypeNS(self, namespaceURI, localName):
+ return _no_type
+
+ def isElementContent(self):
+ return False
+
+ def isEmpty(self):
+ """Returns true iff this element is declared to have an EMPTY
+ content model."""
+ return False
+
+ def isId(self, aname):
+ """Returns true iff the named attribute is a DTD-style ID."""
+ return False
+
+ def isIdNS(self, namespaceURI, localName):
+ """Returns true iff the identified attribute is a DTD-style ID."""
+ return False
+
+ def __getstate__(self):
+ return self.tagName
+
+ def __setstate__(self, state):
+ self.tagName = state
+
+def _clear_id_cache(node):
+ if node.nodeType == Node.DOCUMENT_NODE:
+ node._id_cache.clear()
+ node._id_search_stack = None
+ elif _in_document(node):
+ node.ownerDocument._id_cache.clear()
+ node.ownerDocument._id_search_stack= None
+
+class Document(Node, DocumentLS):
+ __slots__ = ('_elem_info', 'doctype',
+ '_id_search_stack', 'childNodes', '_id_cache')
+ _child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
+ Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
+
+ implementation = DOMImplementation()
+ nodeType = Node.DOCUMENT_NODE
+ nodeName = "#document"
+ nodeValue = None
+ attributes = None
+ parentNode = None
+ previousSibling = nextSibling = None
+
+
+ # Document attributes from Level 3 (WD 9 April 2002)
+
+ actualEncoding = None
+ encoding = None
+ standalone = None
+ version = None
+ strictErrorChecking = False
+ errorHandler = None
+ documentURI = None
+
+ _magic_id_count = 0
+
+ def __init__(self):
+ self.doctype = None
+ self.childNodes = NodeList()
+ # mapping of (namespaceURI, localName) -> ElementInfo
+ # and tagName -> ElementInfo
+ self._elem_info = {}
+ self._id_cache = {}
+ self._id_search_stack = None
+
+ def _get_elem_info(self, element):
+ if element.namespaceURI:
+ key = element.namespaceURI, element.localName
+ else:
+ key = element.tagName
+ return self._elem_info.get(key)
+
+ def _get_actualEncoding(self):
+ return self.actualEncoding
+
+ def _get_doctype(self):
+ return self.doctype
+
+ def _get_documentURI(self):
+ return self.documentURI
+
+ def _get_encoding(self):
+ return self.encoding
+
+ def _get_errorHandler(self):
+ return self.errorHandler
+
+ def _get_standalone(self):
+ return self.standalone
+
+ def _get_strictErrorChecking(self):
+ return self.strictErrorChecking
+
+ def _get_version(self):
+ return self.version
+
+ def appendChild(self, node):
+ if node.nodeType not in self._child_node_types:
+ raise xml.dom.HierarchyRequestErr(
+ "%s cannot be child of %s" % (repr(node), repr(self)))
+ if node.parentNode is not None:
+ # This needs to be done before the next test since this
+ # may *be* the document element, in which case it should
+ # end up re-ordered to the end.
+ node.parentNode.removeChild(node)
+
+ if node.nodeType == Node.ELEMENT_NODE \
+ and self._get_documentElement():
+ raise xml.dom.HierarchyRequestErr(
+ "two document elements disallowed")
+ return Node.appendChild(self, node)
+
+ def removeChild(self, oldChild):
+ try:
+ self.childNodes.remove(oldChild)
+ except ValueError:
+ raise xml.dom.NotFoundErr()
+ oldChild.nextSibling = oldChild.previousSibling = None
+ oldChild.parentNode = None
+ if self.documentElement is oldChild:
+ self.documentElement = None
+
+ return oldChild
+
+ def _get_documentElement(self):
+ for node in self.childNodes:
+ if node.nodeType == Node.ELEMENT_NODE:
+ return node
+
+ def unlink(self):
+ if self.doctype is not None:
+ self.doctype.unlink()
+ self.doctype = None
+ Node.unlink(self)
+
+ def cloneNode(self, deep):
+ if not deep:
+ return None
+ clone = self.implementation.createDocument(None, None, None)
+ clone.encoding = self.encoding
+ clone.standalone = self.standalone
+ clone.version = self.version
+ for n in self.childNodes:
+ childclone = _clone_node(n, deep, clone)
+ assert childclone.ownerDocument.isSameNode(clone)
+ clone.childNodes.append(childclone)
+ if childclone.nodeType == Node.DOCUMENT_NODE:
+ assert clone.documentElement is None
+ elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
+ assert clone.doctype is None
+ clone.doctype = childclone
+ childclone.parentNode = clone
+ self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
+ self, clone)
+ return clone
+
+ def createDocumentFragment(self):
+ d = DocumentFragment()
+ d.ownerDocument = self
+ return d
+
+ def createElement(self, tagName):
+ e = Element(tagName)
+ e.ownerDocument = self
+ return e
+
+ def createTextNode(self, data):
+ if not isinstance(data, str):
+ raise TypeError("node contents must be a string")
+ t = Text()
+ t.data = data
+ t.ownerDocument = self
+ return t
+
+ def createCDATASection(self, data):
+ if not isinstance(data, str):
+ raise TypeError("node contents must be a string")
+ c = CDATASection()
+ c.data = data
+ c.ownerDocument = self
+ return c
+
+ def createComment(self, data):
+ c = Comment(data)
+ c.ownerDocument = self
+ return c
+
+ def createProcessingInstruction(self, target, data):
+ p = ProcessingInstruction(target, data)
+ p.ownerDocument = self
+ return p
+
+ def createAttribute(self, qName):
+ a = Attr(qName)
+ a.ownerDocument = self
+ a.value = ""
+ return a
+
+ def createElementNS(self, namespaceURI, qualifiedName):
+ prefix, localName = _nssplit(qualifiedName)
+ e = Element(qualifiedName, namespaceURI, prefix)
+ e.ownerDocument = self
+ return e
+
+ def createAttributeNS(self, namespaceURI, qualifiedName):
+ prefix, localName = _nssplit(qualifiedName)
+ a = Attr(qualifiedName, namespaceURI, localName, prefix)
+ a.ownerDocument = self
+ a.value = ""
+ return a
+
+ # A couple of implementation-specific helpers to create node types
+ # not supported by the W3C DOM specs:
+
+ def _create_entity(self, name, publicId, systemId, notationName):
+ e = Entity(name, publicId, systemId, notationName)
+ e.ownerDocument = self
+ return e
+
+ def _create_notation(self, name, publicId, systemId):
+ n = Notation(name, publicId, systemId)
+ n.ownerDocument = self
+ return n
+
+ def getElementById(self, id):
+ if id in self._id_cache:
+ return self._id_cache[id]
+ if not (self._elem_info or self._magic_id_count):
+ return None
+
+ stack = self._id_search_stack
+ if stack is None:
+ # we never searched before, or the cache has been cleared
+ stack = [self.documentElement]
+ self._id_search_stack = stack
+ elif not stack:
+ # Previous search was completed and cache is still valid;
+ # no matching node.
+ return None
+
+ result = None
+ while stack:
+ node = stack.pop()
+ # add child elements to stack for continued searching
+ stack.extend([child for child in node.childNodes
+ if child.nodeType in _nodeTypes_with_children])
+ # check this node
+ info = self._get_elem_info(node)
+ if info:
+ # We have to process all ID attributes before
+ # returning in order to get all the attributes set to
+ # be IDs using Element.setIdAttribute*().
+ for attr in node.attributes.values():
+ if attr.namespaceURI:
+ if info.isIdNS(attr.namespaceURI, attr.localName):
+ self._id_cache[attr.value] = node
+ if attr.value == id:
+ result = node
+ elif not node._magic_id_nodes:
+ break
+ elif info.isId(attr.name):
+ self._id_cache[attr.value] = node
+ if attr.value == id:
+ result = node
+ elif not node._magic_id_nodes:
+ break
+ elif attr._is_id:
+ self._id_cache[attr.value] = node
+ if attr.value == id:
+ result = node
+ elif node._magic_id_nodes == 1:
+ break
+ elif node._magic_id_nodes:
+ for attr in node.attributes.values():
+ if attr._is_id:
+ self._id_cache[attr.value] = node
+ if attr.value == id:
+ result = node
+ if result is not None:
+ break
+ return result
+
+ def getElementsByTagName(self, name):
+ return _get_elements_by_tagName_helper(self, name, NodeList())
+
+ def getElementsByTagNameNS(self, namespaceURI, localName):
+ return _get_elements_by_tagName_ns_helper(
+ self, namespaceURI, localName, NodeList())
+
+ def isSupported(self, feature, version):
+ return self.implementation.hasFeature(feature, version)
+
+ def importNode(self, node, deep):
+ if node.nodeType == Node.DOCUMENT_NODE:
+ raise xml.dom.NotSupportedErr("cannot import document nodes")
+ elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
+ raise xml.dom.NotSupportedErr("cannot import document type nodes")
+ return _clone_node(node, deep, self)
+
+ def writexml(self, writer, indent="", addindent="", newl="", encoding=None,
+ standalone=None):
+ declarations = []
+
+ if encoding:
+ declarations.append(f'encoding="{encoding}"')
+ if standalone is not None:
+ declarations.append(f'standalone="{"yes" if standalone else "no"}"')
+
+ writer.write(f'{newl}')
+
+ for node in self.childNodes:
+ node.writexml(writer, indent, addindent, newl)
+
+ # DOM Level 3 (WD 9 April 2002)
+
+ def renameNode(self, n, namespaceURI, name):
+ if n.ownerDocument is not self:
+ raise xml.dom.WrongDocumentErr(
+ "cannot rename nodes from other documents;\n"
+ "expected %s,\nfound %s" % (self, n.ownerDocument))
+ if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
+ raise xml.dom.NotSupportedErr(
+ "renameNode() only applies to element and attribute nodes")
+ if namespaceURI != EMPTY_NAMESPACE:
+ if ':' in name:
+ prefix, localName = name.split(':', 1)
+ if ( prefix == "xmlns"
+ and namespaceURI != xml.dom.XMLNS_NAMESPACE):
+ raise xml.dom.NamespaceErr(
+ "illegal use of 'xmlns' prefix")
+ else:
+ if ( name == "xmlns"
+ and namespaceURI != xml.dom.XMLNS_NAMESPACE
+ and n.nodeType == Node.ATTRIBUTE_NODE):
+ raise xml.dom.NamespaceErr(
+ "illegal use of the 'xmlns' attribute")
+ prefix = None
+ localName = name
+ else:
+ prefix = None
+ localName = None
+ if n.nodeType == Node.ATTRIBUTE_NODE:
+ element = n.ownerElement
+ if element is not None:
+ is_id = n._is_id
+ element.removeAttributeNode(n)
+ else:
+ element = None
+ n.prefix = prefix
+ n._localName = localName
+ n.namespaceURI = namespaceURI
+ n.nodeName = name
+ if n.nodeType == Node.ELEMENT_NODE:
+ n.tagName = name
+ else:
+ # attribute node
+ n.name = name
+ if element is not None:
+ element.setAttributeNode(n)
+ if is_id:
+ element.setIdAttributeNode(n)
+ # It's not clear from a semantic perspective whether we should
+ # call the user data handlers for the NODE_RENAMED event since
+ # we're re-using the existing node. The draft spec has been
+ # interpreted as meaning "no, don't call the handler unless a
+ # new node is created."
+ return n
+
+defproperty(Document, "documentElement",
+ doc="Top-level element of this document.")
+
+
+def _clone_node(node, deep, newOwnerDocument):
+ """
+ Clone a node and give it the new owner document.
+ Called by Node.cloneNode and Document.importNode
+ """
+ if node.ownerDocument.isSameNode(newOwnerDocument):
+ operation = xml.dom.UserDataHandler.NODE_CLONED
+ else:
+ operation = xml.dom.UserDataHandler.NODE_IMPORTED
+ if node.nodeType == Node.ELEMENT_NODE:
+ clone = newOwnerDocument.createElementNS(node.namespaceURI,
+ node.nodeName)
+ for attr in node.attributes.values():
+ clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
+ a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
+ a.specified = attr.specified
+
+ if deep:
+ for child in node.childNodes:
+ c = _clone_node(child, deep, newOwnerDocument)
+ clone.appendChild(c)
+
+ elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
+ clone = newOwnerDocument.createDocumentFragment()
+ if deep:
+ for child in node.childNodes:
+ c = _clone_node(child, deep, newOwnerDocument)
+ clone.appendChild(c)
+
+ elif node.nodeType == Node.TEXT_NODE:
+ clone = newOwnerDocument.createTextNode(node.data)
+ elif node.nodeType == Node.CDATA_SECTION_NODE:
+ clone = newOwnerDocument.createCDATASection(node.data)
+ elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
+ clone = newOwnerDocument.createProcessingInstruction(node.target,
+ node.data)
+ elif node.nodeType == Node.COMMENT_NODE:
+ clone = newOwnerDocument.createComment(node.data)
+ elif node.nodeType == Node.ATTRIBUTE_NODE:
+ clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
+ node.nodeName)
+ clone.specified = True
+ clone.value = node.value
+ elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
+ assert node.ownerDocument is not newOwnerDocument
+ operation = xml.dom.UserDataHandler.NODE_IMPORTED
+ clone = newOwnerDocument.implementation.createDocumentType(
+ node.name, node.publicId, node.systemId)
+ clone.ownerDocument = newOwnerDocument
+ if deep:
+ clone.entities._seq = []
+ clone.notations._seq = []
+ for n in node.notations._seq:
+ notation = Notation(n.nodeName, n.publicId, n.systemId)
+ notation.ownerDocument = newOwnerDocument
+ clone.notations._seq.append(notation)
+ if hasattr(n, '_call_user_data_handler'):
+ n._call_user_data_handler(operation, n, notation)
+ for e in node.entities._seq:
+ entity = Entity(e.nodeName, e.publicId, e.systemId,
+ e.notationName)
+ entity.actualEncoding = e.actualEncoding
+ entity.encoding = e.encoding
+ entity.version = e.version
+ entity.ownerDocument = newOwnerDocument
+ clone.entities._seq.append(entity)
+ if hasattr(e, '_call_user_data_handler'):
+ e._call_user_data_handler(operation, e, entity)
+ else:
+ # Note the cloning of Document and DocumentType nodes is
+ # implementation specific. minidom handles those cases
+ # directly in the cloneNode() methods.
+ raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
+
+ # Check for _call_user_data_handler() since this could conceivably
+ # used with other DOM implementations (one of the FourThought
+ # DOMs, perhaps?).
+ if hasattr(node, '_call_user_data_handler'):
+ node._call_user_data_handler(operation, node, clone)
+ return clone
+
+
+def _nssplit(qualifiedName):
+ fields = qualifiedName.split(':', 1)
+ if len(fields) == 2:
+ return fields
+ else:
+ return (None, fields[0])
+
+
+def _do_pulldom_parse(func, args, kwargs):
+ events = func(*args, **kwargs)
+ toktype, rootNode = events.getEvent()
+ events.expandNode(rootNode)
+ events.clear()
+ return rootNode
+
+def parse(file, parser=None, bufsize=None):
+ """Parse a file into a DOM by filename or file object."""
+ if parser is None and not bufsize:
+ from xml.dom import expatbuilder
+ return expatbuilder.parse(file)
+ else:
+ from xml.dom import pulldom
+ return _do_pulldom_parse(pulldom.parse, (file,),
+ {'parser': parser, 'bufsize': bufsize})
+
+def parseString(string, parser=None):
+ """Parse a file into a DOM from a string."""
+ if parser is None:
+ from xml.dom import expatbuilder
+ return expatbuilder.parseString(string)
+ else:
+ from xml.dom import pulldom
+ return _do_pulldom_parse(pulldom.parseString, (string,),
+ {'parser': parser})
+
+def getDOMImplementation(features=None):
+ if features:
+ if isinstance(features, str):
+ features = domreg._parse_feature_string(features)
+ for f, v in features:
+ if not Document.implementation.hasFeature(f, v):
+ return None
+ return Document.implementation
diff --git a/parrot/lib/python3.10/xml/dom/pulldom.py b/parrot/lib/python3.10/xml/dom/pulldom.py
new file mode 100644
index 0000000000000000000000000000000000000000..96a8d59519ef45957b101b7295d044c316d4e84c
--- /dev/null
+++ b/parrot/lib/python3.10/xml/dom/pulldom.py
@@ -0,0 +1,349 @@
+import xml.sax
+import xml.sax.handler
+
+START_ELEMENT = "START_ELEMENT"
+END_ELEMENT = "END_ELEMENT"
+COMMENT = "COMMENT"
+START_DOCUMENT = "START_DOCUMENT"
+END_DOCUMENT = "END_DOCUMENT"
+PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
+IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
+CHARACTERS = "CHARACTERS"
+
+class PullDOM(xml.sax.ContentHandler):
+ _locator = None
+ document = None
+
+ def __init__(self, documentFactory=None):
+ from xml.dom import XML_NAMESPACE
+ self.documentFactory = documentFactory
+ self.firstEvent = [None, None]
+ self.lastEvent = self.firstEvent
+ self.elementStack = []
+ self.push = self.elementStack.append
+ try:
+ self.pop = self.elementStack.pop
+ except AttributeError:
+ # use class' pop instead
+ pass
+ self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
+ self._current_context = self._ns_contexts[-1]
+ self.pending_events = []
+
+ def pop(self):
+ result = self.elementStack[-1]
+ del self.elementStack[-1]
+ return result
+
+ def setDocumentLocator(self, locator):
+ self._locator = locator
+
+ def startPrefixMapping(self, prefix, uri):
+ if not hasattr(self, '_xmlns_attrs'):
+ self._xmlns_attrs = []
+ self._xmlns_attrs.append((prefix or 'xmlns', uri))
+ self._ns_contexts.append(self._current_context.copy())
+ self._current_context[uri] = prefix or None
+
+ def endPrefixMapping(self, prefix):
+ self._current_context = self._ns_contexts.pop()
+
+ def startElementNS(self, name, tagName , attrs):
+ # Retrieve xml namespace declaration attributes.
+ xmlns_uri = 'http://www.w3.org/2000/xmlns/'
+ xmlns_attrs = getattr(self, '_xmlns_attrs', None)
+ if xmlns_attrs is not None:
+ for aname, value in xmlns_attrs:
+ attrs._attrs[(xmlns_uri, aname)] = value
+ self._xmlns_attrs = []
+ uri, localname = name
+ if uri:
+ # When using namespaces, the reader may or may not
+ # provide us with the original name. If not, create
+ # *a* valid tagName from the current context.
+ if tagName is None:
+ prefix = self._current_context[uri]
+ if prefix:
+ tagName = prefix + ":" + localname
+ else:
+ tagName = localname
+ if self.document:
+ node = self.document.createElementNS(uri, tagName)
+ else:
+ node = self.buildDocument(uri, tagName)
+ else:
+ # When the tagname is not prefixed, it just appears as
+ # localname
+ if self.document:
+ node = self.document.createElement(localname)
+ else:
+ node = self.buildDocument(None, localname)
+
+ for aname,value in attrs.items():
+ a_uri, a_localname = aname
+ if a_uri == xmlns_uri:
+ if a_localname == 'xmlns':
+ qname = a_localname
+ else:
+ qname = 'xmlns:' + a_localname
+ attr = self.document.createAttributeNS(a_uri, qname)
+ node.setAttributeNodeNS(attr)
+ elif a_uri:
+ prefix = self._current_context[a_uri]
+ if prefix:
+ qname = prefix + ":" + a_localname
+ else:
+ qname = a_localname
+ attr = self.document.createAttributeNS(a_uri, qname)
+ node.setAttributeNodeNS(attr)
+ else:
+ attr = self.document.createAttribute(a_localname)
+ node.setAttributeNode(attr)
+ attr.value = value
+
+ self.lastEvent[1] = [(START_ELEMENT, node), None]
+ self.lastEvent = self.lastEvent[1]
+ self.push(node)
+
+ def endElementNS(self, name, tagName):
+ self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
+ self.lastEvent = self.lastEvent[1]
+
+ def startElement(self, name, attrs):
+ if self.document:
+ node = self.document.createElement(name)
+ else:
+ node = self.buildDocument(None, name)
+
+ for aname,value in attrs.items():
+ attr = self.document.createAttribute(aname)
+ attr.value = value
+ node.setAttributeNode(attr)
+
+ self.lastEvent[1] = [(START_ELEMENT, node), None]
+ self.lastEvent = self.lastEvent[1]
+ self.push(node)
+
+ def endElement(self, name):
+ self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
+ self.lastEvent = self.lastEvent[1]
+
+ def comment(self, s):
+ if self.document:
+ node = self.document.createComment(s)
+ self.lastEvent[1] = [(COMMENT, node), None]
+ self.lastEvent = self.lastEvent[1]
+ else:
+ event = [(COMMENT, s), None]
+ self.pending_events.append(event)
+
+ def processingInstruction(self, target, data):
+ if self.document:
+ node = self.document.createProcessingInstruction(target, data)
+ self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
+ self.lastEvent = self.lastEvent[1]
+ else:
+ event = [(PROCESSING_INSTRUCTION, target, data), None]
+ self.pending_events.append(event)
+
+ def ignorableWhitespace(self, chars):
+ node = self.document.createTextNode(chars)
+ self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
+ self.lastEvent = self.lastEvent[1]
+
+ def characters(self, chars):
+ node = self.document.createTextNode(chars)
+ self.lastEvent[1] = [(CHARACTERS, node), None]
+ self.lastEvent = self.lastEvent[1]
+
+ def startDocument(self):
+ if self.documentFactory is None:
+ import xml.dom.minidom
+ self.documentFactory = xml.dom.minidom.Document.implementation
+
+ def buildDocument(self, uri, tagname):
+ # Can't do that in startDocument, since we need the tagname
+ # XXX: obtain DocumentType
+ node = self.documentFactory.createDocument(uri, tagname, None)
+ self.document = node
+ self.lastEvent[1] = [(START_DOCUMENT, node), None]
+ self.lastEvent = self.lastEvent[1]
+ self.push(node)
+ # Put everything we have seen so far into the document
+ for e in self.pending_events:
+ if e[0][0] == PROCESSING_INSTRUCTION:
+ _,target,data = e[0]
+ n = self.document.createProcessingInstruction(target, data)
+ e[0] = (PROCESSING_INSTRUCTION, n)
+ elif e[0][0] == COMMENT:
+ n = self.document.createComment(e[0][1])
+ e[0] = (COMMENT, n)
+ else:
+ raise AssertionError("Unknown pending event ",e[0][0])
+ self.lastEvent[1] = e
+ self.lastEvent = e
+ self.pending_events = None
+ return node.firstChild
+
+ def endDocument(self):
+ self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
+ self.pop()
+
+ def clear(self):
+ "clear(): Explicitly release parsing structures"
+ self.document = None
+
+class ErrorHandler:
+ def warning(self, exception):
+ print(exception)
+ def error(self, exception):
+ raise exception
+ def fatalError(self, exception):
+ raise exception
+
+class DOMEventStream:
+ def __init__(self, stream, parser, bufsize):
+ self.stream = stream
+ self.parser = parser
+ self.bufsize = bufsize
+ if not hasattr(self.parser, 'feed'):
+ self.getEvent = self._slurp
+ self.reset()
+
+ def reset(self):
+ self.pulldom = PullDOM()
+ # This content handler relies on namespace support
+ self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
+ self.parser.setContentHandler(self.pulldom)
+
+ def __getitem__(self, pos):
+ import warnings
+ warnings.warn(
+ "DOMEventStream's __getitem__ method ignores 'pos' parameter. "
+ "Use iterator protocol instead.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+ rc = self.getEvent()
+ if rc:
+ return rc
+ raise IndexError
+
+ def __next__(self):
+ rc = self.getEvent()
+ if rc:
+ return rc
+ raise StopIteration
+
+ def __iter__(self):
+ return self
+
+ def expandNode(self, node):
+ event = self.getEvent()
+ parents = [node]
+ while event:
+ token, cur_node = event
+ if cur_node is node:
+ return
+ if token != END_ELEMENT:
+ parents[-1].appendChild(cur_node)
+ if token == START_ELEMENT:
+ parents.append(cur_node)
+ elif token == END_ELEMENT:
+ del parents[-1]
+ event = self.getEvent()
+
+ def getEvent(self):
+ # use IncrementalParser interface, so we get the desired
+ # pull effect
+ if not self.pulldom.firstEvent[1]:
+ self.pulldom.lastEvent = self.pulldom.firstEvent
+ while not self.pulldom.firstEvent[1]:
+ buf = self.stream.read(self.bufsize)
+ if not buf:
+ self.parser.close()
+ return None
+ self.parser.feed(buf)
+ rc = self.pulldom.firstEvent[1][0]
+ self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
+ return rc
+
+ def _slurp(self):
+ """ Fallback replacement for getEvent() using the
+ standard SAX2 interface, which means we slurp the
+ SAX events into memory (no performance gain, but
+ we are compatible to all SAX parsers).
+ """
+ self.parser.parse(self.stream)
+ self.getEvent = self._emit
+ return self._emit()
+
+ def _emit(self):
+ """ Fallback replacement for getEvent() that emits
+ the events that _slurp() read previously.
+ """
+ rc = self.pulldom.firstEvent[1][0]
+ self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
+ return rc
+
+ def clear(self):
+ """clear(): Explicitly release parsing objects"""
+ self.pulldom.clear()
+ del self.pulldom
+ self.parser = None
+ self.stream = None
+
+class SAX2DOM(PullDOM):
+
+ def startElementNS(self, name, tagName , attrs):
+ PullDOM.startElementNS(self, name, tagName, attrs)
+ curNode = self.elementStack[-1]
+ parentNode = self.elementStack[-2]
+ parentNode.appendChild(curNode)
+
+ def startElement(self, name, attrs):
+ PullDOM.startElement(self, name, attrs)
+ curNode = self.elementStack[-1]
+ parentNode = self.elementStack[-2]
+ parentNode.appendChild(curNode)
+
+ def processingInstruction(self, target, data):
+ PullDOM.processingInstruction(self, target, data)
+ node = self.lastEvent[0][1]
+ parentNode = self.elementStack[-1]
+ parentNode.appendChild(node)
+
+ def ignorableWhitespace(self, chars):
+ PullDOM.ignorableWhitespace(self, chars)
+ node = self.lastEvent[0][1]
+ parentNode = self.elementStack[-1]
+ parentNode.appendChild(node)
+
+ def characters(self, chars):
+ PullDOM.characters(self, chars)
+ node = self.lastEvent[0][1]
+ parentNode = self.elementStack[-1]
+ parentNode.appendChild(node)
+
+
+default_bufsize = (2 ** 14) - 20
+
+def parse(stream_or_string, parser=None, bufsize=None):
+ if bufsize is None:
+ bufsize = default_bufsize
+ if isinstance(stream_or_string, str):
+ stream = open(stream_or_string, 'rb')
+ else:
+ stream = stream_or_string
+ if not parser:
+ parser = xml.sax.make_parser()
+ return DOMEventStream(stream, parser, bufsize)
+
+def parseString(string, parser=None):
+ from io import StringIO
+
+ bufsize = len(string)
+ buf = StringIO(string)
+ if not parser:
+ parser = xml.sax.make_parser()
+ return DOMEventStream(buf, parser, bufsize)
diff --git a/parrot/lib/python3.10/xml/sax/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/xml/sax/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8a4e2f0a0eab536e2fc98b2012339790b6c5b6f
Binary files /dev/null and b/parrot/lib/python3.10/xml/sax/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/xml/sax/__pycache__/_exceptions.cpython-310.pyc b/parrot/lib/python3.10/xml/sax/__pycache__/_exceptions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5fb075f72612aa1f168ab27058d531bc813665f
Binary files /dev/null and b/parrot/lib/python3.10/xml/sax/__pycache__/_exceptions.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/xml/sax/__pycache__/expatreader.cpython-310.pyc b/parrot/lib/python3.10/xml/sax/__pycache__/expatreader.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a9a5820df42ab7f0dbb5417ec74676b501b7311
Binary files /dev/null and b/parrot/lib/python3.10/xml/sax/__pycache__/expatreader.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/zoneinfo/__init__.py b/parrot/lib/python3.10/zoneinfo/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5510ee049751345ef4788401ca427549be3aab5
--- /dev/null
+++ b/parrot/lib/python3.10/zoneinfo/__init__.py
@@ -0,0 +1,31 @@
+__all__ = [
+ "ZoneInfo",
+ "reset_tzpath",
+ "available_timezones",
+ "TZPATH",
+ "ZoneInfoNotFoundError",
+ "InvalidTZPathWarning",
+]
+
+from . import _tzpath
+from ._common import ZoneInfoNotFoundError
+
+try:
+ from _zoneinfo import ZoneInfo
+except ImportError: # pragma: nocover
+ from ._zoneinfo import ZoneInfo
+
+reset_tzpath = _tzpath.reset_tzpath
+available_timezones = _tzpath.available_timezones
+InvalidTZPathWarning = _tzpath.InvalidTZPathWarning
+
+
+def __getattr__(name):
+ if name == "TZPATH":
+ return _tzpath.TZPATH
+ else:
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
+
+
+def __dir__():
+ return sorted(list(globals()) + ["TZPATH"])
diff --git a/parrot/lib/python3.10/zoneinfo/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/zoneinfo/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9571e8865badf619eb5b44a0cb8614293b1dc416
Binary files /dev/null and b/parrot/lib/python3.10/zoneinfo/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/zoneinfo/__pycache__/_common.cpython-310.pyc b/parrot/lib/python3.10/zoneinfo/__pycache__/_common.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6499b48a4f8c6db4b821036a9fd578abeb216671
Binary files /dev/null and b/parrot/lib/python3.10/zoneinfo/__pycache__/_common.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/zoneinfo/__pycache__/_tzpath.cpython-310.pyc b/parrot/lib/python3.10/zoneinfo/__pycache__/_tzpath.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..704af81d76dab1acf333086f6b4bd29eb7d391a5
Binary files /dev/null and b/parrot/lib/python3.10/zoneinfo/__pycache__/_tzpath.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/zoneinfo/__pycache__/_zoneinfo.cpython-310.pyc b/parrot/lib/python3.10/zoneinfo/__pycache__/_zoneinfo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..77c908a27b4df71fac1909f0533baf553c39211b
Binary files /dev/null and b/parrot/lib/python3.10/zoneinfo/__pycache__/_zoneinfo.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/zoneinfo/_common.py b/parrot/lib/python3.10/zoneinfo/_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c24f01bd7b270eac5579680bee572c7b7ef0e88
--- /dev/null
+++ b/parrot/lib/python3.10/zoneinfo/_common.py
@@ -0,0 +1,164 @@
+import struct
+
+
+def load_tzdata(key):
+ import importlib.resources
+
+ components = key.split("/")
+ package_name = ".".join(["tzdata.zoneinfo"] + components[:-1])
+ resource_name = components[-1]
+
+ try:
+ return importlib.resources.open_binary(package_name, resource_name)
+ except (ImportError, FileNotFoundError, UnicodeEncodeError):
+ # There are three types of exception that can be raised that all amount
+ # to "we cannot find this key":
+ #
+ # ImportError: If package_name doesn't exist (e.g. if tzdata is not
+ # installed, or if there's an error in the folder name like
+ # Amrica/New_York)
+ # FileNotFoundError: If resource_name doesn't exist in the package
+ # (e.g. Europe/Krasnoy)
+ # UnicodeEncodeError: If package_name or resource_name are not UTF-8,
+ # such as keys containing a surrogate character.
+ raise ZoneInfoNotFoundError(f"No time zone found with key {key}")
+
+
+def load_data(fobj):
+ header = _TZifHeader.from_file(fobj)
+
+ if header.version == 1:
+ time_size = 4
+ time_type = "l"
+ else:
+ # Version 2+ has 64-bit integer transition times
+ time_size = 8
+ time_type = "q"
+
+ # Version 2+ also starts with a Version 1 header and data, which
+ # we need to skip now
+ skip_bytes = (
+ header.timecnt * 5 # Transition times and types
+ + header.typecnt * 6 # Local time type records
+ + header.charcnt # Time zone designations
+ + header.leapcnt * 8 # Leap second records
+ + header.isstdcnt # Standard/wall indicators
+ + header.isutcnt # UT/local indicators
+ )
+
+ fobj.seek(skip_bytes, 1)
+
+ # Now we need to read the second header, which is not the same
+ # as the first
+ header = _TZifHeader.from_file(fobj)
+
+ typecnt = header.typecnt
+ timecnt = header.timecnt
+ charcnt = header.charcnt
+
+ # The data portion starts with timecnt transitions and indices
+ if timecnt:
+ trans_list_utc = struct.unpack(
+ f">{timecnt}{time_type}", fobj.read(timecnt * time_size)
+ )
+ trans_idx = struct.unpack(f">{timecnt}B", fobj.read(timecnt))
+ else:
+ trans_list_utc = ()
+ trans_idx = ()
+
+ # Read the ttinfo struct, (utoff, isdst, abbrind)
+ if typecnt:
+ utcoff, isdst, abbrind = zip(
+ *(struct.unpack(">lbb", fobj.read(6)) for i in range(typecnt))
+ )
+ else:
+ utcoff = ()
+ isdst = ()
+ abbrind = ()
+
+ # Now read the abbreviations. They are null-terminated strings, indexed
+ # not by position in the array but by position in the unsplit
+ # abbreviation string. I suppose this makes more sense in C, which uses
+ # null to terminate the strings, but it's inconvenient here...
+ abbr_vals = {}
+ abbr_chars = fobj.read(charcnt)
+
+ def get_abbr(idx):
+ # Gets a string starting at idx and running until the next \x00
+ #
+ # We cannot pre-populate abbr_vals by splitting on \x00 because there
+ # are some zones that use subsets of longer abbreviations, like so:
+ #
+ # LMT\x00AHST\x00HDT\x00
+ #
+ # Where the idx to abbr mapping should be:
+ #
+ # {0: "LMT", 4: "AHST", 5: "HST", 9: "HDT"}
+ if idx not in abbr_vals:
+ span_end = abbr_chars.find(b"\x00", idx)
+ abbr_vals[idx] = abbr_chars[idx:span_end].decode()
+
+ return abbr_vals[idx]
+
+ abbr = tuple(get_abbr(idx) for idx in abbrind)
+
+ # The remainder of the file consists of leap seconds (currently unused) and
+ # the standard/wall and ut/local indicators, which are metadata we don't need.
+ # In version 2 files, we need to skip the unnecessary data to get at the TZ string:
+ if header.version >= 2:
+ # Each leap second record has size (time_size + 4)
+ skip_bytes = header.isutcnt + header.isstdcnt + header.leapcnt * 12
+ fobj.seek(skip_bytes, 1)
+
+ c = fobj.read(1) # Should be \n
+ assert c == b"\n", c
+
+ tz_bytes = b""
+ while (c := fobj.read(1)) != b"\n":
+ tz_bytes += c
+
+ tz_str = tz_bytes
+ else:
+ tz_str = None
+
+ return trans_idx, trans_list_utc, utcoff, isdst, abbr, tz_str
+
+
+class _TZifHeader:
+ __slots__ = [
+ "version",
+ "isutcnt",
+ "isstdcnt",
+ "leapcnt",
+ "timecnt",
+ "typecnt",
+ "charcnt",
+ ]
+
+ def __init__(self, *args):
+ for attr, val in zip(self.__slots__, args, strict=True):
+ setattr(self, attr, val)
+
+ @classmethod
+ def from_file(cls, stream):
+ # The header starts with a 4-byte "magic" value
+ if stream.read(4) != b"TZif":
+ raise ValueError("Invalid TZif file: magic not found")
+
+ _version = stream.read(1)
+ if _version == b"\x00":
+ version = 1
+ else:
+ version = int(_version)
+ stream.read(15)
+
+ args = (version,)
+
+ # Slots are defined in the order that the bytes are arranged
+ args = args + struct.unpack(">6l", stream.read(24))
+
+ return cls(*args)
+
+
+class ZoneInfoNotFoundError(KeyError):
+ """Exception raised when a ZoneInfo key is not found."""
diff --git a/parrot/lib/python3.10/zoneinfo/_zoneinfo.py b/parrot/lib/python3.10/zoneinfo/_zoneinfo.py
new file mode 100644
index 0000000000000000000000000000000000000000..eede15b8271058a25500dcf6df26486caa554e44
--- /dev/null
+++ b/parrot/lib/python3.10/zoneinfo/_zoneinfo.py
@@ -0,0 +1,752 @@
+import bisect
+import calendar
+import collections
+import functools
+import re
+import weakref
+from datetime import datetime, timedelta, tzinfo
+
+from . import _common, _tzpath
+
+EPOCH = datetime(1970, 1, 1)
+EPOCHORDINAL = datetime(1970, 1, 1).toordinal()
+
+# It is relatively expensive to construct new timedelta objects, and in most
+# cases we're looking at the same deltas, like integer numbers of hours, etc.
+# To improve speed and memory use, we'll keep a dictionary with references
+# to the ones we've already used so far.
+#
+# Loading every time zone in the 2020a version of the time zone database
+# requires 447 timedeltas, which requires approximately the amount of space
+# that ZoneInfo("America/New_York") with 236 transitions takes up, so we will
+# set the cache size to 512 so that in the common case we always get cache
+# hits, but specifically crafted ZoneInfo objects don't leak arbitrary amounts
+# of memory.
+@functools.lru_cache(maxsize=512)
+def _load_timedelta(seconds):
+ return timedelta(seconds=seconds)
+
+
+class ZoneInfo(tzinfo):
+ _strong_cache_size = 8
+ _strong_cache = collections.OrderedDict()
+ _weak_cache = weakref.WeakValueDictionary()
+ __module__ = "zoneinfo"
+
+ def __init_subclass__(cls):
+ cls._strong_cache = collections.OrderedDict()
+ cls._weak_cache = weakref.WeakValueDictionary()
+
+ def __new__(cls, key):
+ instance = cls._weak_cache.get(key, None)
+ if instance is None:
+ instance = cls._weak_cache.setdefault(key, cls._new_instance(key))
+ instance._from_cache = True
+
+ # Update the "strong" cache
+ cls._strong_cache[key] = cls._strong_cache.pop(key, instance)
+
+ if len(cls._strong_cache) > cls._strong_cache_size:
+ cls._strong_cache.popitem(last=False)
+
+ return instance
+
+ @classmethod
+ def no_cache(cls, key):
+ obj = cls._new_instance(key)
+ obj._from_cache = False
+
+ return obj
+
+ @classmethod
+ def _new_instance(cls, key):
+ obj = super().__new__(cls)
+ obj._key = key
+ obj._file_path = obj._find_tzfile(key)
+
+ if obj._file_path is not None:
+ file_obj = open(obj._file_path, "rb")
+ else:
+ file_obj = _common.load_tzdata(key)
+
+ with file_obj as f:
+ obj._load_file(f)
+
+ return obj
+
+ @classmethod
+ def from_file(cls, fobj, /, key=None):
+ obj = super().__new__(cls)
+ obj._key = key
+ obj._file_path = None
+ obj._load_file(fobj)
+ obj._file_repr = repr(fobj)
+
+ # Disable pickling for objects created from files
+ obj.__reduce__ = obj._file_reduce
+
+ return obj
+
+ @classmethod
+ def clear_cache(cls, *, only_keys=None):
+ if only_keys is not None:
+ for key in only_keys:
+ cls._weak_cache.pop(key, None)
+ cls._strong_cache.pop(key, None)
+
+ else:
+ cls._weak_cache.clear()
+ cls._strong_cache.clear()
+
+ @property
+ def key(self):
+ return self._key
+
+ def utcoffset(self, dt):
+ return self._find_trans(dt).utcoff
+
+ def dst(self, dt):
+ return self._find_trans(dt).dstoff
+
+ def tzname(self, dt):
+ return self._find_trans(dt).tzname
+
+ def fromutc(self, dt):
+ """Convert from datetime in UTC to datetime in local time"""
+
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ timestamp = self._get_local_timestamp(dt)
+ num_trans = len(self._trans_utc)
+
+ if num_trans >= 1 and timestamp < self._trans_utc[0]:
+ tti = self._tti_before
+ fold = 0
+ elif (
+ num_trans == 0 or timestamp > self._trans_utc[-1]
+ ) and not isinstance(self._tz_after, _ttinfo):
+ tti, fold = self._tz_after.get_trans_info_fromutc(
+ timestamp, dt.year
+ )
+ elif num_trans == 0:
+ tti = self._tz_after
+ fold = 0
+ else:
+ idx = bisect.bisect_right(self._trans_utc, timestamp)
+
+ if num_trans > 1 and timestamp >= self._trans_utc[1]:
+ tti_prev, tti = self._ttinfos[idx - 2 : idx]
+ elif timestamp > self._trans_utc[-1]:
+ tti_prev = self._ttinfos[-1]
+ tti = self._tz_after
+ else:
+ tti_prev = self._tti_before
+ tti = self._ttinfos[0]
+
+ # Detect fold
+ shift = tti_prev.utcoff - tti.utcoff
+ fold = shift.total_seconds() > timestamp - self._trans_utc[idx - 1]
+ dt += tti.utcoff
+ if fold:
+ return dt.replace(fold=1)
+ else:
+ return dt
+
+ def _find_trans(self, dt):
+ if dt is None:
+ if self._fixed_offset:
+ return self._tz_after
+ else:
+ return _NO_TTINFO
+
+ ts = self._get_local_timestamp(dt)
+
+ lt = self._trans_local[dt.fold]
+
+ num_trans = len(lt)
+
+ if num_trans and ts < lt[0]:
+ return self._tti_before
+ elif not num_trans or ts > lt[-1]:
+ if isinstance(self._tz_after, _TZStr):
+ return self._tz_after.get_trans_info(ts, dt.year, dt.fold)
+ else:
+ return self._tz_after
+ else:
+ # idx is the transition that occurs after this timestamp, so we
+ # subtract off 1 to get the current ttinfo
+ idx = bisect.bisect_right(lt, ts) - 1
+ assert idx >= 0
+ return self._ttinfos[idx]
+
+ def _get_local_timestamp(self, dt):
+ return (
+ (dt.toordinal() - EPOCHORDINAL) * 86400
+ + dt.hour * 3600
+ + dt.minute * 60
+ + dt.second
+ )
+
+ def __str__(self):
+ if self._key is not None:
+ return f"{self._key}"
+ else:
+ return repr(self)
+
+ def __repr__(self):
+ if self._key is not None:
+ return f"{self.__class__.__name__}(key={self._key!r})"
+ else:
+ return f"{self.__class__.__name__}.from_file({self._file_repr})"
+
+ def __reduce__(self):
+ return (self.__class__._unpickle, (self._key, self._from_cache))
+
+ def _file_reduce(self):
+ import pickle
+
+ raise pickle.PicklingError(
+ "Cannot pickle a ZoneInfo file created from a file stream."
+ )
+
+ @classmethod
+ def _unpickle(cls, key, from_cache, /):
+ if from_cache:
+ return cls(key)
+ else:
+ return cls.no_cache(key)
+
+ def _find_tzfile(self, key):
+ return _tzpath.find_tzfile(key)
+
+ def _load_file(self, fobj):
+ # Retrieve all the data as it exists in the zoneinfo file
+ trans_idx, trans_utc, utcoff, isdst, abbr, tz_str = _common.load_data(
+ fobj
+ )
+
+ # Infer the DST offsets (needed for .dst()) from the data
+ dstoff = self._utcoff_to_dstoff(trans_idx, utcoff, isdst)
+
+ # Convert all the transition times (UTC) into "seconds since 1970-01-01 local time"
+ trans_local = self._ts_to_local(trans_idx, trans_utc, utcoff)
+
+ # Construct `_ttinfo` objects for each transition in the file
+ _ttinfo_list = [
+ _ttinfo(
+ _load_timedelta(utcoffset), _load_timedelta(dstoffset), tzname
+ )
+ for utcoffset, dstoffset, tzname in zip(utcoff, dstoff, abbr)
+ ]
+
+ self._trans_utc = trans_utc
+ self._trans_local = trans_local
+ self._ttinfos = [_ttinfo_list[idx] for idx in trans_idx]
+
+ # Find the first non-DST transition
+ for i in range(len(isdst)):
+ if not isdst[i]:
+ self._tti_before = _ttinfo_list[i]
+ break
+ else:
+ if self._ttinfos:
+ self._tti_before = self._ttinfos[0]
+ else:
+ self._tti_before = None
+
+ # Set the "fallback" time zone
+ if tz_str is not None and tz_str != b"":
+ self._tz_after = _parse_tz_str(tz_str.decode())
+ else:
+ if not self._ttinfos and not _ttinfo_list:
+ raise ValueError("No time zone information found.")
+
+ if self._ttinfos:
+ self._tz_after = self._ttinfos[-1]
+ else:
+ self._tz_after = _ttinfo_list[-1]
+
+ # Determine if this is a "fixed offset" zone, meaning that the output
+ # of the utcoffset, dst and tzname functions does not depend on the
+ # specific datetime passed.
+ #
+ # We make three simplifying assumptions here:
+ #
+ # 1. If _tz_after is not a _ttinfo, it has transitions that might
+ # actually occur (it is possible to construct TZ strings that
+ # specify STD and DST but no transitions ever occur, such as
+ # AAA0BBB,0/0,J365/25).
+ # 2. If _ttinfo_list contains more than one _ttinfo object, the objects
+ # represent different offsets.
+ # 3. _ttinfo_list contains no unused _ttinfos (in which case an
+ # otherwise fixed-offset zone with extra _ttinfos defined may
+ # appear to *not* be a fixed offset zone).
+ #
+ # Violations to these assumptions would be fairly exotic, and exotic
+ # zones should almost certainly not be used with datetime.time (the
+ # only thing that would be affected by this).
+ if len(_ttinfo_list) > 1 or not isinstance(self._tz_after, _ttinfo):
+ self._fixed_offset = False
+ elif not _ttinfo_list:
+ self._fixed_offset = True
+ else:
+ self._fixed_offset = _ttinfo_list[0] == self._tz_after
+
+ @staticmethod
+ def _utcoff_to_dstoff(trans_idx, utcoffsets, isdsts):
+ # Now we must transform our ttis and abbrs into `_ttinfo` objects,
+ # but there is an issue: .dst() must return a timedelta with the
+ # difference between utcoffset() and the "standard" offset, but
+ # the "base offset" and "DST offset" are not encoded in the file;
+ # we can infer what they are from the isdst flag, but it is not
+ # sufficient to just look at the last standard offset, because
+ # occasionally countries will shift both DST offset and base offset.
+
+ typecnt = len(isdsts)
+ dstoffs = [0] * typecnt # Provisionally assign all to 0.
+ dst_cnt = sum(isdsts)
+ dst_found = 0
+
+ for i in range(1, len(trans_idx)):
+ if dst_cnt == dst_found:
+ break
+
+ idx = trans_idx[i]
+
+ dst = isdsts[idx]
+
+ # We're only going to look at daylight saving time
+ if not dst:
+ continue
+
+ # Skip any offsets that have already been assigned
+ if dstoffs[idx] != 0:
+ continue
+
+ dstoff = 0
+ utcoff = utcoffsets[idx]
+
+ comp_idx = trans_idx[i - 1]
+
+ if not isdsts[comp_idx]:
+ dstoff = utcoff - utcoffsets[comp_idx]
+
+ if not dstoff and idx < (typecnt - 1):
+ comp_idx = trans_idx[i + 1]
+
+ # If the following transition is also DST and we couldn't
+ # find the DST offset by this point, we're going to have to
+ # skip it and hope this transition gets assigned later
+ if isdsts[comp_idx]:
+ continue
+
+ dstoff = utcoff - utcoffsets[comp_idx]
+
+ if dstoff:
+ dst_found += 1
+ dstoffs[idx] = dstoff
+ else:
+ # If we didn't find a valid value for a given index, we'll end up
+ # with dstoff = 0 for something where `isdst=1`. This is obviously
+ # wrong - one hour will be a much better guess than 0
+ for idx in range(typecnt):
+ if not dstoffs[idx] and isdsts[idx]:
+ dstoffs[idx] = 3600
+
+ return dstoffs
+
+ @staticmethod
+ def _ts_to_local(trans_idx, trans_list_utc, utcoffsets):
+ """Generate number of seconds since 1970 *in the local time*.
+
+ This is necessary to easily find the transition times in local time"""
+ if not trans_list_utc:
+ return [[], []]
+
+ # Start with the timestamps and modify in-place
+ trans_list_wall = [list(trans_list_utc), list(trans_list_utc)]
+
+ if len(utcoffsets) > 1:
+ offset_0 = utcoffsets[0]
+ offset_1 = utcoffsets[trans_idx[0]]
+ if offset_1 > offset_0:
+ offset_1, offset_0 = offset_0, offset_1
+ else:
+ offset_0 = offset_1 = utcoffsets[0]
+
+ trans_list_wall[0][0] += offset_0
+ trans_list_wall[1][0] += offset_1
+
+ for i in range(1, len(trans_idx)):
+ offset_0 = utcoffsets[trans_idx[i - 1]]
+ offset_1 = utcoffsets[trans_idx[i]]
+
+ if offset_1 > offset_0:
+ offset_1, offset_0 = offset_0, offset_1
+
+ trans_list_wall[0][i] += offset_0
+ trans_list_wall[1][i] += offset_1
+
+ return trans_list_wall
+
+
+class _ttinfo:
+ __slots__ = ["utcoff", "dstoff", "tzname"]
+
+ def __init__(self, utcoff, dstoff, tzname):
+ self.utcoff = utcoff
+ self.dstoff = dstoff
+ self.tzname = tzname
+
+ def __eq__(self, other):
+ return (
+ self.utcoff == other.utcoff
+ and self.dstoff == other.dstoff
+ and self.tzname == other.tzname
+ )
+
+ def __repr__(self): # pragma: nocover
+ return (
+ f"{self.__class__.__name__}"
+ + f"({self.utcoff}, {self.dstoff}, {self.tzname})"
+ )
+
+
+_NO_TTINFO = _ttinfo(None, None, None)
+
+
+class _TZStr:
+ __slots__ = (
+ "std",
+ "dst",
+ "start",
+ "end",
+ "get_trans_info",
+ "get_trans_info_fromutc",
+ "dst_diff",
+ )
+
+ def __init__(
+ self, std_abbr, std_offset, dst_abbr, dst_offset, start=None, end=None
+ ):
+ self.dst_diff = dst_offset - std_offset
+ std_offset = _load_timedelta(std_offset)
+ self.std = _ttinfo(
+ utcoff=std_offset, dstoff=_load_timedelta(0), tzname=std_abbr
+ )
+
+ self.start = start
+ self.end = end
+
+ dst_offset = _load_timedelta(dst_offset)
+ delta = _load_timedelta(self.dst_diff)
+ self.dst = _ttinfo(utcoff=dst_offset, dstoff=delta, tzname=dst_abbr)
+
+ # These are assertions because the constructor should only be called
+ # by functions that would fail before passing start or end
+ assert start is not None, "No transition start specified"
+ assert end is not None, "No transition end specified"
+
+ self.get_trans_info = self._get_trans_info
+ self.get_trans_info_fromutc = self._get_trans_info_fromutc
+
+ def transitions(self, year):
+ start = self.start.year_to_epoch(year)
+ end = self.end.year_to_epoch(year)
+ return start, end
+
+ def _get_trans_info(self, ts, year, fold):
+ """Get the information about the current transition - tti"""
+ start, end = self.transitions(year)
+
+ # With fold = 0, the period (denominated in local time) with the
+ # smaller offset starts at the end of the gap and ends at the end of
+ # the fold; with fold = 1, it runs from the start of the gap to the
+ # beginning of the fold.
+ #
+ # So in order to determine the DST boundaries we need to know both
+ # the fold and whether DST is positive or negative (rare), and it
+ # turns out that this boils down to fold XOR is_positive.
+ if fold == (self.dst_diff >= 0):
+ end -= self.dst_diff
+ else:
+ start += self.dst_diff
+
+ if start < end:
+ isdst = start <= ts < end
+ else:
+ isdst = not (end <= ts < start)
+
+ return self.dst if isdst else self.std
+
+ def _get_trans_info_fromutc(self, ts, year):
+ start, end = self.transitions(year)
+ start -= self.std.utcoff.total_seconds()
+ end -= self.dst.utcoff.total_seconds()
+
+ if start < end:
+ isdst = start <= ts < end
+ else:
+ isdst = not (end <= ts < start)
+
+ # For positive DST, the ambiguous period is one dst_diff after the end
+ # of DST; for negative DST, the ambiguous period is one dst_diff before
+ # the start of DST.
+ if self.dst_diff > 0:
+ ambig_start = end
+ ambig_end = end + self.dst_diff
+ else:
+ ambig_start = start
+ ambig_end = start - self.dst_diff
+
+ fold = ambig_start <= ts < ambig_end
+
+ return (self.dst if isdst else self.std, fold)
+
+
+def _post_epoch_days_before_year(year):
+ """Get the number of days between 1970-01-01 and YEAR-01-01"""
+ y = year - 1
+ return y * 365 + y // 4 - y // 100 + y // 400 - EPOCHORDINAL
+
+
+class _DayOffset:
+ __slots__ = ["d", "julian", "hour", "minute", "second"]
+
+ def __init__(self, d, julian, hour=2, minute=0, second=0):
+ if not (0 + julian) <= d <= 365:
+ min_day = 0 + julian
+ raise ValueError(f"d must be in [{min_day}, 365], not: {d}")
+
+ self.d = d
+ self.julian = julian
+ self.hour = hour
+ self.minute = minute
+ self.second = second
+
+ def year_to_epoch(self, year):
+ days_before_year = _post_epoch_days_before_year(year)
+
+ d = self.d
+ if self.julian and d >= 59 and calendar.isleap(year):
+ d += 1
+
+ epoch = (days_before_year + d) * 86400
+ epoch += self.hour * 3600 + self.minute * 60 + self.second
+
+ return epoch
+
+
+class _CalendarOffset:
+ __slots__ = ["m", "w", "d", "hour", "minute", "second"]
+
+ _DAYS_BEFORE_MONTH = (
+ -1,
+ 0,
+ 31,
+ 59,
+ 90,
+ 120,
+ 151,
+ 181,
+ 212,
+ 243,
+ 273,
+ 304,
+ 334,
+ )
+
+ def __init__(self, m, w, d, hour=2, minute=0, second=0):
+ if not 0 < m <= 12:
+ raise ValueError("m must be in (0, 12]")
+
+ if not 0 < w <= 5:
+ raise ValueError("w must be in (0, 5]")
+
+ if not 0 <= d <= 6:
+ raise ValueError("d must be in [0, 6]")
+
+ self.m = m
+ self.w = w
+ self.d = d
+ self.hour = hour
+ self.minute = minute
+ self.second = second
+
+ @classmethod
+ def _ymd2ord(cls, year, month, day):
+ return (
+ _post_epoch_days_before_year(year)
+ + cls._DAYS_BEFORE_MONTH[month]
+ + (month > 2 and calendar.isleap(year))
+ + day
+ )
+
+ # TODO: These are not actually epoch dates as they are expressed in local time
+ def year_to_epoch(self, year):
+ """Calculates the datetime of the occurrence from the year"""
+ # We know year and month, we need to convert w, d into day of month
+ #
+ # Week 1 is the first week in which day `d` (where 0 = Sunday) appears.
+ # Week 5 represents the last occurrence of day `d`, so we need to know
+ # the range of the month.
+ first_day, days_in_month = calendar.monthrange(year, self.m)
+
+ # This equation seems magical, so I'll break it down:
+ # 1. calendar says 0 = Monday, POSIX says 0 = Sunday
+ # so we need first_day + 1 to get 1 = Monday -> 7 = Sunday,
+ # which is still equivalent because this math is mod 7
+ # 2. Get first day - desired day mod 7: -1 % 7 = 6, so we don't need
+ # to do anything to adjust negative numbers.
+ # 3. Add 1 because month days are a 1-based index.
+ month_day = (self.d - (first_day + 1)) % 7 + 1
+
+ # Now use a 0-based index version of `w` to calculate the w-th
+ # occurrence of `d`
+ month_day += (self.w - 1) * 7
+
+ # month_day will only be > days_in_month if w was 5, and `w` means
+ # "last occurrence of `d`", so now we just check if we over-shot the
+ # end of the month and if so knock off 1 week.
+ if month_day > days_in_month:
+ month_day -= 7
+
+ ordinal = self._ymd2ord(year, self.m, month_day)
+ epoch = ordinal * 86400
+ epoch += self.hour * 3600 + self.minute * 60 + self.second
+ return epoch
+
+
+def _parse_tz_str(tz_str):
+ # The tz string has the format:
+ #
+ # std[offset[dst[offset],start[/time],end[/time]]]
+ #
+ # std and dst must be 3 or more characters long and must not contain
+ # a leading colon, embedded digits, commas, nor a plus or minus signs;
+ # The spaces between "std" and "offset" are only for display and are
+ # not actually present in the string.
+ #
+ # The format of the offset is ``[+|-]hh[:mm[:ss]]``
+
+ offset_str, *start_end_str = tz_str.split(",", 1)
+
+ # fmt: off
+ parser_re = re.compile(
+ r"(?P[^<0-9:.+-]+|<[a-zA-Z0-9+\-]+>)" +
+ r"((?P[+-]?\d{1,2}(:\d{2}(:\d{2})?)?)" +
+ r"((?P[^0-9:.+-]+|<[a-zA-Z0-9+\-]+>)" +
+ r"((?P[+-]?\d{1,2}(:\d{2}(:\d{2})?)?))?" +
+ r")?" + # dst
+ r")?$" # stdoff
+ )
+ # fmt: on
+
+ m = parser_re.match(offset_str)
+
+ if m is None:
+ raise ValueError(f"{tz_str} is not a valid TZ string")
+
+ std_abbr = m.group("std")
+ dst_abbr = m.group("dst")
+ dst_offset = None
+
+ std_abbr = std_abbr.strip("<>")
+
+ if dst_abbr:
+ dst_abbr = dst_abbr.strip("<>")
+
+ if std_offset := m.group("stdoff"):
+ try:
+ std_offset = _parse_tz_delta(std_offset)
+ except ValueError as e:
+ raise ValueError(f"Invalid STD offset in {tz_str}") from e
+ else:
+ std_offset = 0
+
+ if dst_abbr is not None:
+ if dst_offset := m.group("dstoff"):
+ try:
+ dst_offset = _parse_tz_delta(dst_offset)
+ except ValueError as e:
+ raise ValueError(f"Invalid DST offset in {tz_str}") from e
+ else:
+ dst_offset = std_offset + 3600
+
+ if not start_end_str:
+ raise ValueError(f"Missing transition rules: {tz_str}")
+
+ start_end_strs = start_end_str[0].split(",", 1)
+ try:
+ start, end = (_parse_dst_start_end(x) for x in start_end_strs)
+ except ValueError as e:
+ raise ValueError(f"Invalid TZ string: {tz_str}") from e
+
+ return _TZStr(std_abbr, std_offset, dst_abbr, dst_offset, start, end)
+ elif start_end_str:
+ raise ValueError(f"Transition rule present without DST: {tz_str}")
+ else:
+ # This is a static ttinfo, don't return _TZStr
+ return _ttinfo(
+ _load_timedelta(std_offset), _load_timedelta(0), std_abbr
+ )
+
+
+def _parse_dst_start_end(dststr):
+ date, *time = dststr.split("/")
+ if date[0] == "M":
+ n_is_julian = False
+ m = re.match(r"M(\d{1,2})\.(\d).(\d)$", date)
+ if m is None:
+ raise ValueError(f"Invalid dst start/end date: {dststr}")
+ date_offset = tuple(map(int, m.groups()))
+ offset = _CalendarOffset(*date_offset)
+ else:
+ if date[0] == "J":
+ n_is_julian = True
+ date = date[1:]
+ else:
+ n_is_julian = False
+
+ doy = int(date)
+ offset = _DayOffset(doy, n_is_julian)
+
+ if time:
+ time_components = list(map(int, time[0].split(":")))
+ n_components = len(time_components)
+ if n_components < 3:
+ time_components.extend([0] * (3 - n_components))
+ offset.hour, offset.minute, offset.second = time_components
+
+ return offset
+
+
+def _parse_tz_delta(tz_delta):
+ match = re.match(
+ r"(?P[+-])?(?P\d{1,2})(:(?P\d{2})(:(?P\d{2}))?)?",
+ tz_delta,
+ )
+ # Anything passed to this function should already have hit an equivalent
+ # regular expression to find the section to parse.
+ assert match is not None, tz_delta
+
+ h, m, s = (
+ int(v) if v is not None else 0
+ for v in map(match.group, ("h", "m", "s"))
+ )
+
+ total = h * 3600 + m * 60 + s
+
+ if not -86400 < total < 86400:
+ raise ValueError(
+ f"Offset must be strictly between -24h and +24h: {tz_delta}"
+ )
+
+ # Yes, +5 maps to an offset of -5h
+ if match.group("sign") != "-":
+ total *= -1
+
+ return total