Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- llava/lib/libasan.so +3 -0
- llava/lib/libncurses++.a +3 -0
- llava/lib/libstdc++.so +3 -0
- llava/lib/libtinfo.so +3 -0
- llava/lib/python3.10/__future__.py +147 -0
- llava/lib/python3.10/__phello__.foo.py +1 -0
- llava/lib/python3.10/_markupbase.py +396 -0
- llava/lib/python3.10/antigravity.py +17 -0
- llava/lib/python3.10/asynchat.py +315 -0
- llava/lib/python3.10/base64.py +603 -0
- llava/lib/python3.10/bdb.py +891 -0
- llava/lib/python3.10/datetime.py +2524 -0
- llava/lib/python3.10/filecmp.py +313 -0
- llava/lib/python3.10/gettext.py +788 -0
- llava/lib/python3.10/hmac.py +219 -0
- llava/lib/python3.10/netrc.py +143 -0
- llava/lib/python3.10/ntpath.py +838 -0
- llava/lib/python3.10/operator.py +460 -0
- llava/lib/python3.10/pipes.py +247 -0
- llava/lib/python3.10/pkgutil.py +715 -0
- llava/lib/python3.10/re.py +383 -0
- llava/lib/python3.10/selectors.py +619 -0
- llava/lib/python3.10/smtpd.py +979 -0
- llava/lib/python3.10/sre_parse.py +1076 -0
- llava/lib/python3.10/tokenize.py +684 -0
- llava/lib/python3.10/zipapp.py +206 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat_cuda_dispatch.h +25 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_cpu_dispatch.h +23 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil.h +44 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf_ops.h +50 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_max.h +39 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges.h +39 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_cpu_dispatch.h +25 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_logcumsumexp.h +39 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy_ops.h +39 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cpu_dispatch.h +23 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_enum_ops.h +28 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h +21 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist.h +39 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy_ops.h +39 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view.h +91 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_compositeimplicitautograd_dispatch.h +23 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/absolute_compositeimplicitautograd_dispatch.h +26 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/alias.h +30 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_cuda_dispatch.h +26 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/column_stack_compositeimplicitautograd_dispatch.h +25 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_backward.h +30 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_meta.h +27 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_ops.h +39 -0
.gitattributes
CHANGED
|
@@ -426,3 +426,7 @@ llava/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
|
|
| 426 |
llava/lib/libsqlite3.so.0 filter=lfs diff=lfs merge=lfs -text
|
| 427 |
llava/lib/libstdc++.so.6.0.29 filter=lfs diff=lfs merge=lfs -text
|
| 428 |
llava/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 426 |
llava/lib/libsqlite3.so.0 filter=lfs diff=lfs merge=lfs -text
|
| 427 |
llava/lib/libstdc++.so.6.0.29 filter=lfs diff=lfs merge=lfs -text
|
| 428 |
llava/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
|
| 429 |
+
llava/lib/libtinfo.so filter=lfs diff=lfs merge=lfs -text
|
| 430 |
+
llava/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text
|
| 431 |
+
llava/lib/libstdc++.so filter=lfs diff=lfs merge=lfs -text
|
| 432 |
+
llava/lib/libasan.so filter=lfs diff=lfs merge=lfs -text
|
llava/lib/libasan.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2a8a7995a4d84a8817af8d1604bef621e99d0622df4eda14f6fe5245735a952e
|
| 3 |
+
size 7575272
|
llava/lib/libncurses++.a
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93b48c40f5d7b07e1a8c4bd9419df55c28e250cca1166be4aafd2fc7caf18823
|
| 3 |
+
size 187604
|
llava/lib/libstdc++.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f045231ff3a95c2fbfde450575f0ef45d23e95be15193c8729b521fc363ece4
|
| 3 |
+
size 17981480
|
llava/lib/libtinfo.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5679c9d7cc0ec2d6b08c6058198667efe71f657e89dcc0bd7adcf5d6cbc80c5
|
| 3 |
+
size 287080
|
llava/lib/python3.10/__future__.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Record of phased-in incompatible language changes.
|
| 2 |
+
|
| 3 |
+
Each line is of the form:
|
| 4 |
+
|
| 5 |
+
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
|
| 6 |
+
CompilerFlag ")"
|
| 7 |
+
|
| 8 |
+
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
|
| 9 |
+
of the same form as sys.version_info:
|
| 10 |
+
|
| 11 |
+
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
|
| 12 |
+
PY_MINOR_VERSION, # the 1; an int
|
| 13 |
+
PY_MICRO_VERSION, # the 0; an int
|
| 14 |
+
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
|
| 15 |
+
PY_RELEASE_SERIAL # the 3; an int
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
OptionalRelease records the first release in which
|
| 19 |
+
|
| 20 |
+
from __future__ import FeatureName
|
| 21 |
+
|
| 22 |
+
was accepted.
|
| 23 |
+
|
| 24 |
+
In the case of MandatoryReleases that have not yet occurred,
|
| 25 |
+
MandatoryRelease predicts the release in which the feature will become part
|
| 26 |
+
of the language.
|
| 27 |
+
|
| 28 |
+
Else MandatoryRelease records when the feature became part of the language;
|
| 29 |
+
in releases at or after that, modules no longer need
|
| 30 |
+
|
| 31 |
+
from __future__ import FeatureName
|
| 32 |
+
|
| 33 |
+
to use the feature in question, but may continue to use such imports.
|
| 34 |
+
|
| 35 |
+
MandatoryRelease may also be None, meaning that a planned feature got
|
| 36 |
+
dropped.
|
| 37 |
+
|
| 38 |
+
Instances of class _Feature have two corresponding methods,
|
| 39 |
+
.getOptionalRelease() and .getMandatoryRelease().
|
| 40 |
+
|
| 41 |
+
CompilerFlag is the (bitfield) flag that should be passed in the fourth
|
| 42 |
+
argument to the builtin function compile() to enable the feature in
|
| 43 |
+
dynamically compiled code. This flag is stored in the .compiler_flag
|
| 44 |
+
attribute on _Future instances. These values must match the appropriate
|
| 45 |
+
#defines of CO_xxx flags in Include/cpython/compile.h.
|
| 46 |
+
|
| 47 |
+
No feature line is ever to be deleted from this file.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
all_feature_names = [
|
| 51 |
+
"nested_scopes",
|
| 52 |
+
"generators",
|
| 53 |
+
"division",
|
| 54 |
+
"absolute_import",
|
| 55 |
+
"with_statement",
|
| 56 |
+
"print_function",
|
| 57 |
+
"unicode_literals",
|
| 58 |
+
"barry_as_FLUFL",
|
| 59 |
+
"generator_stop",
|
| 60 |
+
"annotations",
|
| 61 |
+
]
|
| 62 |
+
|
| 63 |
+
__all__ = ["all_feature_names"] + all_feature_names
|
| 64 |
+
|
| 65 |
+
# The CO_xxx symbols are defined here under the same names defined in
|
| 66 |
+
# code.h and used by compile.h, so that an editor search will find them here.
|
| 67 |
+
# However, they're not exported in __all__, because they don't really belong to
|
| 68 |
+
# this module.
|
| 69 |
+
CO_NESTED = 0x0010 # nested_scopes
|
| 70 |
+
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
|
| 71 |
+
CO_FUTURE_DIVISION = 0x20000 # division
|
| 72 |
+
CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default
|
| 73 |
+
CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement
|
| 74 |
+
CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function
|
| 75 |
+
CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals
|
| 76 |
+
CO_FUTURE_BARRY_AS_BDFL = 0x400000
|
| 77 |
+
CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators
|
| 78 |
+
CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class _Feature:
|
| 82 |
+
|
| 83 |
+
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
|
| 84 |
+
self.optional = optionalRelease
|
| 85 |
+
self.mandatory = mandatoryRelease
|
| 86 |
+
self.compiler_flag = compiler_flag
|
| 87 |
+
|
| 88 |
+
def getOptionalRelease(self):
|
| 89 |
+
"""Return first release in which this feature was recognized.
|
| 90 |
+
|
| 91 |
+
This is a 5-tuple, of the same form as sys.version_info.
|
| 92 |
+
"""
|
| 93 |
+
return self.optional
|
| 94 |
+
|
| 95 |
+
def getMandatoryRelease(self):
|
| 96 |
+
"""Return release in which this feature will become mandatory.
|
| 97 |
+
|
| 98 |
+
This is a 5-tuple, of the same form as sys.version_info, or, if
|
| 99 |
+
the feature was dropped, is None.
|
| 100 |
+
"""
|
| 101 |
+
return self.mandatory
|
| 102 |
+
|
| 103 |
+
def __repr__(self):
|
| 104 |
+
return "_Feature" + repr((self.optional,
|
| 105 |
+
self.mandatory,
|
| 106 |
+
self.compiler_flag))
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
nested_scopes = _Feature((2, 1, 0, "beta", 1),
|
| 110 |
+
(2, 2, 0, "alpha", 0),
|
| 111 |
+
CO_NESTED)
|
| 112 |
+
|
| 113 |
+
generators = _Feature((2, 2, 0, "alpha", 1),
|
| 114 |
+
(2, 3, 0, "final", 0),
|
| 115 |
+
CO_GENERATOR_ALLOWED)
|
| 116 |
+
|
| 117 |
+
division = _Feature((2, 2, 0, "alpha", 2),
|
| 118 |
+
(3, 0, 0, "alpha", 0),
|
| 119 |
+
CO_FUTURE_DIVISION)
|
| 120 |
+
|
| 121 |
+
absolute_import = _Feature((2, 5, 0, "alpha", 1),
|
| 122 |
+
(3, 0, 0, "alpha", 0),
|
| 123 |
+
CO_FUTURE_ABSOLUTE_IMPORT)
|
| 124 |
+
|
| 125 |
+
with_statement = _Feature((2, 5, 0, "alpha", 1),
|
| 126 |
+
(2, 6, 0, "alpha", 0),
|
| 127 |
+
CO_FUTURE_WITH_STATEMENT)
|
| 128 |
+
|
| 129 |
+
print_function = _Feature((2, 6, 0, "alpha", 2),
|
| 130 |
+
(3, 0, 0, "alpha", 0),
|
| 131 |
+
CO_FUTURE_PRINT_FUNCTION)
|
| 132 |
+
|
| 133 |
+
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
|
| 134 |
+
(3, 0, 0, "alpha", 0),
|
| 135 |
+
CO_FUTURE_UNICODE_LITERALS)
|
| 136 |
+
|
| 137 |
+
barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
|
| 138 |
+
(4, 0, 0, "alpha", 0),
|
| 139 |
+
CO_FUTURE_BARRY_AS_BDFL)
|
| 140 |
+
|
| 141 |
+
generator_stop = _Feature((3, 5, 0, "beta", 1),
|
| 142 |
+
(3, 7, 0, "alpha", 0),
|
| 143 |
+
CO_FUTURE_GENERATOR_STOP)
|
| 144 |
+
|
| 145 |
+
annotations = _Feature((3, 7, 0, "beta", 1),
|
| 146 |
+
(3, 11, 0, "alpha", 0),
|
| 147 |
+
CO_FUTURE_ANNOTATIONS)
|
llava/lib/python3.10/__phello__.foo.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# This file exists as a helper for the test.test_frozen module.
|
llava/lib/python3.10/_markupbase.py
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Shared support for scanning document type declarations in HTML and XHTML.
|
| 2 |
+
|
| 3 |
+
This module is used as a foundation for the html.parser module. It has no
|
| 4 |
+
documented public API and should not be used directly.
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import re
|
| 9 |
+
|
| 10 |
+
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
|
| 11 |
+
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
|
| 12 |
+
_commentclose = re.compile(r'--\s*>')
|
| 13 |
+
_markedsectionclose = re.compile(r']\s*]\s*>')
|
| 14 |
+
|
| 15 |
+
# An analysis of the MS-Word extensions is available at
|
| 16 |
+
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
|
| 17 |
+
|
| 18 |
+
_msmarkedsectionclose = re.compile(r']\s*>')
|
| 19 |
+
|
| 20 |
+
del re
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ParserBase:
|
| 24 |
+
"""Parser base class which provides some common support methods used
|
| 25 |
+
by the SGML/HTML and XHTML parsers."""
|
| 26 |
+
|
| 27 |
+
def __init__(self):
|
| 28 |
+
if self.__class__ is ParserBase:
|
| 29 |
+
raise RuntimeError(
|
| 30 |
+
"_markupbase.ParserBase must be subclassed")
|
| 31 |
+
|
| 32 |
+
def reset(self):
|
| 33 |
+
self.lineno = 1
|
| 34 |
+
self.offset = 0
|
| 35 |
+
|
| 36 |
+
def getpos(self):
|
| 37 |
+
"""Return current line number and offset."""
|
| 38 |
+
return self.lineno, self.offset
|
| 39 |
+
|
| 40 |
+
# Internal -- update line number and offset. This should be
|
| 41 |
+
# called for each piece of data exactly once, in order -- in other
|
| 42 |
+
# words the concatenation of all the input strings to this
|
| 43 |
+
# function should be exactly the entire input.
|
| 44 |
+
def updatepos(self, i, j):
|
| 45 |
+
if i >= j:
|
| 46 |
+
return j
|
| 47 |
+
rawdata = self.rawdata
|
| 48 |
+
nlines = rawdata.count("\n", i, j)
|
| 49 |
+
if nlines:
|
| 50 |
+
self.lineno = self.lineno + nlines
|
| 51 |
+
pos = rawdata.rindex("\n", i, j) # Should not fail
|
| 52 |
+
self.offset = j-(pos+1)
|
| 53 |
+
else:
|
| 54 |
+
self.offset = self.offset + j-i
|
| 55 |
+
return j
|
| 56 |
+
|
| 57 |
+
_decl_otherchars = ''
|
| 58 |
+
|
| 59 |
+
# Internal -- parse declaration (for use by subclasses).
|
| 60 |
+
def parse_declaration(self, i):
|
| 61 |
+
# This is some sort of declaration; in "HTML as
|
| 62 |
+
# deployed," this should only be the document type
|
| 63 |
+
# declaration ("<!DOCTYPE html...>").
|
| 64 |
+
# ISO 8879:1986, however, has more complex
|
| 65 |
+
# declaration syntax for elements in <!...>, including:
|
| 66 |
+
# --comment--
|
| 67 |
+
# [marked section]
|
| 68 |
+
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
|
| 69 |
+
# ATTLIST, NOTATION, SHORTREF, USEMAP,
|
| 70 |
+
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
|
| 71 |
+
rawdata = self.rawdata
|
| 72 |
+
j = i + 2
|
| 73 |
+
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
|
| 74 |
+
if rawdata[j:j+1] == ">":
|
| 75 |
+
# the empty comment <!>
|
| 76 |
+
return j + 1
|
| 77 |
+
if rawdata[j:j+1] in ("-", ""):
|
| 78 |
+
# Start of comment followed by buffer boundary,
|
| 79 |
+
# or just a buffer boundary.
|
| 80 |
+
return -1
|
| 81 |
+
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
|
| 82 |
+
n = len(rawdata)
|
| 83 |
+
if rawdata[j:j+2] == '--': #comment
|
| 84 |
+
# Locate --.*-- as the body of the comment
|
| 85 |
+
return self.parse_comment(i)
|
| 86 |
+
elif rawdata[j] == '[': #marked section
|
| 87 |
+
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
|
| 88 |
+
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
|
| 89 |
+
# Note that this is extended by Microsoft Office "Save as Web" function
|
| 90 |
+
# to include [if...] and [endif].
|
| 91 |
+
return self.parse_marked_section(i)
|
| 92 |
+
else: #all other declaration elements
|
| 93 |
+
decltype, j = self._scan_name(j, i)
|
| 94 |
+
if j < 0:
|
| 95 |
+
return j
|
| 96 |
+
if decltype == "doctype":
|
| 97 |
+
self._decl_otherchars = ''
|
| 98 |
+
while j < n:
|
| 99 |
+
c = rawdata[j]
|
| 100 |
+
if c == ">":
|
| 101 |
+
# end of declaration syntax
|
| 102 |
+
data = rawdata[i+2:j]
|
| 103 |
+
if decltype == "doctype":
|
| 104 |
+
self.handle_decl(data)
|
| 105 |
+
else:
|
| 106 |
+
# According to the HTML5 specs sections "8.2.4.44 Bogus
|
| 107 |
+
# comment state" and "8.2.4.45 Markup declaration open
|
| 108 |
+
# state", a comment token should be emitted.
|
| 109 |
+
# Calling unknown_decl provides more flexibility though.
|
| 110 |
+
self.unknown_decl(data)
|
| 111 |
+
return j + 1
|
| 112 |
+
if c in "\"'":
|
| 113 |
+
m = _declstringlit_match(rawdata, j)
|
| 114 |
+
if not m:
|
| 115 |
+
return -1 # incomplete
|
| 116 |
+
j = m.end()
|
| 117 |
+
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
|
| 118 |
+
name, j = self._scan_name(j, i)
|
| 119 |
+
elif c in self._decl_otherchars:
|
| 120 |
+
j = j + 1
|
| 121 |
+
elif c == "[":
|
| 122 |
+
# this could be handled in a separate doctype parser
|
| 123 |
+
if decltype == "doctype":
|
| 124 |
+
j = self._parse_doctype_subset(j + 1, i)
|
| 125 |
+
elif decltype in {"attlist", "linktype", "link", "element"}:
|
| 126 |
+
# must tolerate []'d groups in a content model in an element declaration
|
| 127 |
+
# also in data attribute specifications of attlist declaration
|
| 128 |
+
# also link type declaration subsets in linktype declarations
|
| 129 |
+
# also link attribute specification lists in link declarations
|
| 130 |
+
raise AssertionError("unsupported '[' char in %s declaration" % decltype)
|
| 131 |
+
else:
|
| 132 |
+
raise AssertionError("unexpected '[' char in declaration")
|
| 133 |
+
else:
|
| 134 |
+
raise AssertionError("unexpected %r char in declaration" % rawdata[j])
|
| 135 |
+
if j < 0:
|
| 136 |
+
return j
|
| 137 |
+
return -1 # incomplete
|
| 138 |
+
|
| 139 |
+
# Internal -- parse a marked section
|
| 140 |
+
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
|
| 141 |
+
def parse_marked_section(self, i, report=1):
|
| 142 |
+
rawdata= self.rawdata
|
| 143 |
+
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
|
| 144 |
+
sectName, j = self._scan_name( i+3, i )
|
| 145 |
+
if j < 0:
|
| 146 |
+
return j
|
| 147 |
+
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
|
| 148 |
+
# look for standard ]]> ending
|
| 149 |
+
match= _markedsectionclose.search(rawdata, i+3)
|
| 150 |
+
elif sectName in {"if", "else", "endif"}:
|
| 151 |
+
# look for MS Office ]> ending
|
| 152 |
+
match= _msmarkedsectionclose.search(rawdata, i+3)
|
| 153 |
+
else:
|
| 154 |
+
raise AssertionError(
|
| 155 |
+
'unknown status keyword %r in marked section' % rawdata[i+3:j]
|
| 156 |
+
)
|
| 157 |
+
if not match:
|
| 158 |
+
return -1
|
| 159 |
+
if report:
|
| 160 |
+
j = match.start(0)
|
| 161 |
+
self.unknown_decl(rawdata[i+3: j])
|
| 162 |
+
return match.end(0)
|
| 163 |
+
|
| 164 |
+
# Internal -- parse comment, return length or -1 if not terminated
|
| 165 |
+
def parse_comment(self, i, report=1):
|
| 166 |
+
rawdata = self.rawdata
|
| 167 |
+
if rawdata[i:i+4] != '<!--':
|
| 168 |
+
raise AssertionError('unexpected call to parse_comment()')
|
| 169 |
+
match = _commentclose.search(rawdata, i+4)
|
| 170 |
+
if not match:
|
| 171 |
+
return -1
|
| 172 |
+
if report:
|
| 173 |
+
j = match.start(0)
|
| 174 |
+
self.handle_comment(rawdata[i+4: j])
|
| 175 |
+
return match.end(0)
|
| 176 |
+
|
| 177 |
+
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
|
| 178 |
+
# returning the index just past any whitespace following the trailing ']'.
|
| 179 |
+
def _parse_doctype_subset(self, i, declstartpos):
|
| 180 |
+
rawdata = self.rawdata
|
| 181 |
+
n = len(rawdata)
|
| 182 |
+
j = i
|
| 183 |
+
while j < n:
|
| 184 |
+
c = rawdata[j]
|
| 185 |
+
if c == "<":
|
| 186 |
+
s = rawdata[j:j+2]
|
| 187 |
+
if s == "<":
|
| 188 |
+
# end of buffer; incomplete
|
| 189 |
+
return -1
|
| 190 |
+
if s != "<!":
|
| 191 |
+
self.updatepos(declstartpos, j + 1)
|
| 192 |
+
raise AssertionError(
|
| 193 |
+
"unexpected char in internal subset (in %r)" % s
|
| 194 |
+
)
|
| 195 |
+
if (j + 2) == n:
|
| 196 |
+
# end of buffer; incomplete
|
| 197 |
+
return -1
|
| 198 |
+
if (j + 4) > n:
|
| 199 |
+
# end of buffer; incomplete
|
| 200 |
+
return -1
|
| 201 |
+
if rawdata[j:j+4] == "<!--":
|
| 202 |
+
j = self.parse_comment(j, report=0)
|
| 203 |
+
if j < 0:
|
| 204 |
+
return j
|
| 205 |
+
continue
|
| 206 |
+
name, j = self._scan_name(j + 2, declstartpos)
|
| 207 |
+
if j == -1:
|
| 208 |
+
return -1
|
| 209 |
+
if name not in {"attlist", "element", "entity", "notation"}:
|
| 210 |
+
self.updatepos(declstartpos, j + 2)
|
| 211 |
+
raise AssertionError(
|
| 212 |
+
"unknown declaration %r in internal subset" % name
|
| 213 |
+
)
|
| 214 |
+
# handle the individual names
|
| 215 |
+
meth = getattr(self, "_parse_doctype_" + name)
|
| 216 |
+
j = meth(j, declstartpos)
|
| 217 |
+
if j < 0:
|
| 218 |
+
return j
|
| 219 |
+
elif c == "%":
|
| 220 |
+
# parameter entity reference
|
| 221 |
+
if (j + 1) == n:
|
| 222 |
+
# end of buffer; incomplete
|
| 223 |
+
return -1
|
| 224 |
+
s, j = self._scan_name(j + 1, declstartpos)
|
| 225 |
+
if j < 0:
|
| 226 |
+
return j
|
| 227 |
+
if rawdata[j] == ";":
|
| 228 |
+
j = j + 1
|
| 229 |
+
elif c == "]":
|
| 230 |
+
j = j + 1
|
| 231 |
+
while j < n and rawdata[j].isspace():
|
| 232 |
+
j = j + 1
|
| 233 |
+
if j < n:
|
| 234 |
+
if rawdata[j] == ">":
|
| 235 |
+
return j
|
| 236 |
+
self.updatepos(declstartpos, j)
|
| 237 |
+
raise AssertionError("unexpected char after internal subset")
|
| 238 |
+
else:
|
| 239 |
+
return -1
|
| 240 |
+
elif c.isspace():
|
| 241 |
+
j = j + 1
|
| 242 |
+
else:
|
| 243 |
+
self.updatepos(declstartpos, j)
|
| 244 |
+
raise AssertionError("unexpected char %r in internal subset" % c)
|
| 245 |
+
# end of buffer reached
|
| 246 |
+
return -1
|
| 247 |
+
|
| 248 |
+
# Internal -- scan past <!ELEMENT declarations
|
| 249 |
+
def _parse_doctype_element(self, i, declstartpos):
|
| 250 |
+
name, j = self._scan_name(i, declstartpos)
|
| 251 |
+
if j == -1:
|
| 252 |
+
return -1
|
| 253 |
+
# style content model; just skip until '>'
|
| 254 |
+
rawdata = self.rawdata
|
| 255 |
+
if '>' in rawdata[j:]:
|
| 256 |
+
return rawdata.find(">", j) + 1
|
| 257 |
+
return -1
|
| 258 |
+
|
| 259 |
+
# Internal -- scan past <!ATTLIST declarations
|
| 260 |
+
def _parse_doctype_attlist(self, i, declstartpos):
|
| 261 |
+
rawdata = self.rawdata
|
| 262 |
+
name, j = self._scan_name(i, declstartpos)
|
| 263 |
+
c = rawdata[j:j+1]
|
| 264 |
+
if c == "":
|
| 265 |
+
return -1
|
| 266 |
+
if c == ">":
|
| 267 |
+
return j + 1
|
| 268 |
+
while 1:
|
| 269 |
+
# scan a series of attribute descriptions; simplified:
|
| 270 |
+
# name type [value] [#constraint]
|
| 271 |
+
name, j = self._scan_name(j, declstartpos)
|
| 272 |
+
if j < 0:
|
| 273 |
+
return j
|
| 274 |
+
c = rawdata[j:j+1]
|
| 275 |
+
if c == "":
|
| 276 |
+
return -1
|
| 277 |
+
if c == "(":
|
| 278 |
+
# an enumerated type; look for ')'
|
| 279 |
+
if ")" in rawdata[j:]:
|
| 280 |
+
j = rawdata.find(")", j) + 1
|
| 281 |
+
else:
|
| 282 |
+
return -1
|
| 283 |
+
while rawdata[j:j+1].isspace():
|
| 284 |
+
j = j + 1
|
| 285 |
+
if not rawdata[j:]:
|
| 286 |
+
# end of buffer, incomplete
|
| 287 |
+
return -1
|
| 288 |
+
else:
|
| 289 |
+
name, j = self._scan_name(j, declstartpos)
|
| 290 |
+
c = rawdata[j:j+1]
|
| 291 |
+
if not c:
|
| 292 |
+
return -1
|
| 293 |
+
if c in "'\"":
|
| 294 |
+
m = _declstringlit_match(rawdata, j)
|
| 295 |
+
if m:
|
| 296 |
+
j = m.end()
|
| 297 |
+
else:
|
| 298 |
+
return -1
|
| 299 |
+
c = rawdata[j:j+1]
|
| 300 |
+
if not c:
|
| 301 |
+
return -1
|
| 302 |
+
if c == "#":
|
| 303 |
+
if rawdata[j:] == "#":
|
| 304 |
+
# end of buffer
|
| 305 |
+
return -1
|
| 306 |
+
name, j = self._scan_name(j + 1, declstartpos)
|
| 307 |
+
if j < 0:
|
| 308 |
+
return j
|
| 309 |
+
c = rawdata[j:j+1]
|
| 310 |
+
if not c:
|
| 311 |
+
return -1
|
| 312 |
+
if c == '>':
|
| 313 |
+
# all done
|
| 314 |
+
return j + 1
|
| 315 |
+
|
| 316 |
+
# Internal -- scan past <!NOTATION declarations
|
| 317 |
+
def _parse_doctype_notation(self, i, declstartpos):
|
| 318 |
+
name, j = self._scan_name(i, declstartpos)
|
| 319 |
+
if j < 0:
|
| 320 |
+
return j
|
| 321 |
+
rawdata = self.rawdata
|
| 322 |
+
while 1:
|
| 323 |
+
c = rawdata[j:j+1]
|
| 324 |
+
if not c:
|
| 325 |
+
# end of buffer; incomplete
|
| 326 |
+
return -1
|
| 327 |
+
if c == '>':
|
| 328 |
+
return j + 1
|
| 329 |
+
if c in "'\"":
|
| 330 |
+
m = _declstringlit_match(rawdata, j)
|
| 331 |
+
if not m:
|
| 332 |
+
return -1
|
| 333 |
+
j = m.end()
|
| 334 |
+
else:
|
| 335 |
+
name, j = self._scan_name(j, declstartpos)
|
| 336 |
+
if j < 0:
|
| 337 |
+
return j
|
| 338 |
+
|
| 339 |
+
# Internal -- scan past <!ENTITY declarations
|
| 340 |
+
def _parse_doctype_entity(self, i, declstartpos):
|
| 341 |
+
rawdata = self.rawdata
|
| 342 |
+
if rawdata[i:i+1] == "%":
|
| 343 |
+
j = i + 1
|
| 344 |
+
while 1:
|
| 345 |
+
c = rawdata[j:j+1]
|
| 346 |
+
if not c:
|
| 347 |
+
return -1
|
| 348 |
+
if c.isspace():
|
| 349 |
+
j = j + 1
|
| 350 |
+
else:
|
| 351 |
+
break
|
| 352 |
+
else:
|
| 353 |
+
j = i
|
| 354 |
+
name, j = self._scan_name(j, declstartpos)
|
| 355 |
+
if j < 0:
|
| 356 |
+
return j
|
| 357 |
+
while 1:
|
| 358 |
+
c = self.rawdata[j:j+1]
|
| 359 |
+
if not c:
|
| 360 |
+
return -1
|
| 361 |
+
if c in "'\"":
|
| 362 |
+
m = _declstringlit_match(rawdata, j)
|
| 363 |
+
if m:
|
| 364 |
+
j = m.end()
|
| 365 |
+
else:
|
| 366 |
+
return -1 # incomplete
|
| 367 |
+
elif c == ">":
|
| 368 |
+
return j + 1
|
| 369 |
+
else:
|
| 370 |
+
name, j = self._scan_name(j, declstartpos)
|
| 371 |
+
if j < 0:
|
| 372 |
+
return j
|
| 373 |
+
|
| 374 |
+
# Internal -- scan a name token and the new position and the token, or
|
| 375 |
+
# return -1 if we've reached the end of the buffer.
|
| 376 |
+
def _scan_name(self, i, declstartpos):
|
| 377 |
+
rawdata = self.rawdata
|
| 378 |
+
n = len(rawdata)
|
| 379 |
+
if i == n:
|
| 380 |
+
return None, -1
|
| 381 |
+
m = _declname_match(rawdata, i)
|
| 382 |
+
if m:
|
| 383 |
+
s = m.group()
|
| 384 |
+
name = s.strip()
|
| 385 |
+
if (i + len(s)) == n:
|
| 386 |
+
return None, -1 # end of buffer
|
| 387 |
+
return name.lower(), m.end()
|
| 388 |
+
else:
|
| 389 |
+
self.updatepos(declstartpos, i)
|
| 390 |
+
raise AssertionError(
|
| 391 |
+
"expected name token at %r" % rawdata[declstartpos:declstartpos+20]
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
# To be overridden -- handlers for unknown objects
|
| 395 |
+
def unknown_decl(self, data):
|
| 396 |
+
pass
|
llava/lib/python3.10/antigravity.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import webbrowser
|
| 3 |
+
import hashlib
|
| 4 |
+
|
| 5 |
+
webbrowser.open("https://xkcd.com/353/")
|
| 6 |
+
|
| 7 |
+
def geohash(latitude, longitude, datedow):
|
| 8 |
+
'''Compute geohash() using the Munroe algorithm.
|
| 9 |
+
|
| 10 |
+
>>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68')
|
| 11 |
+
37.857713 -122.544543
|
| 12 |
+
|
| 13 |
+
'''
|
| 14 |
+
# https://xkcd.com/426/
|
| 15 |
+
h = hashlib.md5(datedow, usedforsecurity=False).hexdigest()
|
| 16 |
+
p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
|
| 17 |
+
print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
|
llava/lib/python3.10/asynchat.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- Mode: Python; tab-width: 4 -*-
|
| 2 |
+
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
|
| 3 |
+
# Author: Sam Rushing <rushing@nightmare.com>
|
| 4 |
+
|
| 5 |
+
# ======================================================================
|
| 6 |
+
# Copyright 1996 by Sam Rushing
|
| 7 |
+
#
|
| 8 |
+
# All Rights Reserved
|
| 9 |
+
#
|
| 10 |
+
# Permission to use, copy, modify, and distribute this software and
|
| 11 |
+
# its documentation for any purpose and without fee is hereby
|
| 12 |
+
# granted, provided that the above copyright notice appear in all
|
| 13 |
+
# copies and that both that copyright notice and this permission
|
| 14 |
+
# notice appear in supporting documentation, and that the name of Sam
|
| 15 |
+
# Rushing not be used in advertising or publicity pertaining to
|
| 16 |
+
# distribution of the software without specific, written prior
|
| 17 |
+
# permission.
|
| 18 |
+
#
|
| 19 |
+
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
| 20 |
+
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
| 21 |
+
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
| 22 |
+
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 23 |
+
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
| 24 |
+
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
| 25 |
+
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 26 |
+
# ======================================================================
|
| 27 |
+
|
| 28 |
+
r"""A class supporting chat-style (command/response) protocols.
|
| 29 |
+
|
| 30 |
+
This class adds support for 'chat' style protocols - where one side
|
| 31 |
+
sends a 'command', and the other sends a response (examples would be
|
| 32 |
+
the common internet protocols - smtp, nntp, ftp, etc..).
|
| 33 |
+
|
| 34 |
+
The handle_read() method looks at the input stream for the current
|
| 35 |
+
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
|
| 36 |
+
for multi-line output), calling self.found_terminator() on its
|
| 37 |
+
receipt.
|
| 38 |
+
|
| 39 |
+
for example:
|
| 40 |
+
Say you build an async nntp client using this class. At the start
|
| 41 |
+
of the connection, you'll have self.terminator set to '\r\n', in
|
| 42 |
+
order to process the single-line greeting. Just before issuing a
|
| 43 |
+
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
|
| 44 |
+
command will be accumulated (using your own 'collect_incoming_data'
|
| 45 |
+
method) up to the terminator, and then control will be returned to
|
| 46 |
+
you - by calling your self.found_terminator() method.
|
| 47 |
+
"""
|
| 48 |
+
import asyncore
|
| 49 |
+
from collections import deque
|
| 50 |
+
|
| 51 |
+
from warnings import warn
|
| 52 |
+
warn(
|
| 53 |
+
'The asynchat module is deprecated and will be removed in Python 3.12. '
|
| 54 |
+
'The recommended replacement is asyncio',
|
| 55 |
+
DeprecationWarning,
|
| 56 |
+
stacklevel=2)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class async_chat(asyncore.dispatcher):
|
| 61 |
+
"""This is an abstract class. You must derive from this class, and add
|
| 62 |
+
the two methods collect_incoming_data() and found_terminator()"""
|
| 63 |
+
|
| 64 |
+
# these are overridable defaults
|
| 65 |
+
|
| 66 |
+
ac_in_buffer_size = 65536
|
| 67 |
+
ac_out_buffer_size = 65536
|
| 68 |
+
|
| 69 |
+
# we don't want to enable the use of encoding by default, because that is a
|
| 70 |
+
# sign of an application bug that we don't want to pass silently
|
| 71 |
+
|
| 72 |
+
use_encoding = 0
|
| 73 |
+
encoding = 'latin-1'
|
| 74 |
+
|
| 75 |
+
def __init__(self, sock=None, map=None):
|
| 76 |
+
# for string terminator matching
|
| 77 |
+
self.ac_in_buffer = b''
|
| 78 |
+
|
| 79 |
+
# we use a list here rather than io.BytesIO for a few reasons...
|
| 80 |
+
# del lst[:] is faster than bio.truncate(0)
|
| 81 |
+
# lst = [] is faster than bio.truncate(0)
|
| 82 |
+
self.incoming = []
|
| 83 |
+
|
| 84 |
+
# we toss the use of the "simple producer" and replace it with
|
| 85 |
+
# a pure deque, which the original fifo was a wrapping of
|
| 86 |
+
self.producer_fifo = deque()
|
| 87 |
+
asyncore.dispatcher.__init__(self, sock, map)
|
| 88 |
+
|
| 89 |
+
def collect_incoming_data(self, data):
|
| 90 |
+
raise NotImplementedError("must be implemented in subclass")
|
| 91 |
+
|
| 92 |
+
def _collect_incoming_data(self, data):
|
| 93 |
+
self.incoming.append(data)
|
| 94 |
+
|
| 95 |
+
def _get_data(self):
|
| 96 |
+
d = b''.join(self.incoming)
|
| 97 |
+
del self.incoming[:]
|
| 98 |
+
return d
|
| 99 |
+
|
| 100 |
+
def found_terminator(self):
|
| 101 |
+
raise NotImplementedError("must be implemented in subclass")
|
| 102 |
+
|
| 103 |
+
def set_terminator(self, term):
|
| 104 |
+
"""Set the input delimiter.
|
| 105 |
+
|
| 106 |
+
Can be a fixed string of any length, an integer, or None.
|
| 107 |
+
"""
|
| 108 |
+
if isinstance(term, str) and self.use_encoding:
|
| 109 |
+
term = bytes(term, self.encoding)
|
| 110 |
+
elif isinstance(term, int) and term < 0:
|
| 111 |
+
raise ValueError('the number of received bytes must be positive')
|
| 112 |
+
self.terminator = term
|
| 113 |
+
|
| 114 |
+
def get_terminator(self):
|
| 115 |
+
return self.terminator
|
| 116 |
+
|
| 117 |
+
# grab some more data from the socket,
|
| 118 |
+
# throw it to the collector method,
|
| 119 |
+
# check for the terminator,
|
| 120 |
+
# if found, transition to the next state.
|
| 121 |
+
|
| 122 |
+
def handle_read(self):
|
| 123 |
+
|
| 124 |
+
try:
|
| 125 |
+
data = self.recv(self.ac_in_buffer_size)
|
| 126 |
+
except BlockingIOError:
|
| 127 |
+
return
|
| 128 |
+
except OSError:
|
| 129 |
+
self.handle_error()
|
| 130 |
+
return
|
| 131 |
+
|
| 132 |
+
if isinstance(data, str) and self.use_encoding:
|
| 133 |
+
data = bytes(str, self.encoding)
|
| 134 |
+
self.ac_in_buffer = self.ac_in_buffer + data
|
| 135 |
+
|
| 136 |
+
# Continue to search for self.terminator in self.ac_in_buffer,
|
| 137 |
+
# while calling self.collect_incoming_data. The while loop
|
| 138 |
+
# is necessary because we might read several data+terminator
|
| 139 |
+
# combos with a single recv(4096).
|
| 140 |
+
|
| 141 |
+
while self.ac_in_buffer:
|
| 142 |
+
lb = len(self.ac_in_buffer)
|
| 143 |
+
terminator = self.get_terminator()
|
| 144 |
+
if not terminator:
|
| 145 |
+
# no terminator, collect it all
|
| 146 |
+
self.collect_incoming_data(self.ac_in_buffer)
|
| 147 |
+
self.ac_in_buffer = b''
|
| 148 |
+
elif isinstance(terminator, int):
|
| 149 |
+
# numeric terminator
|
| 150 |
+
n = terminator
|
| 151 |
+
if lb < n:
|
| 152 |
+
self.collect_incoming_data(self.ac_in_buffer)
|
| 153 |
+
self.ac_in_buffer = b''
|
| 154 |
+
self.terminator = self.terminator - lb
|
| 155 |
+
else:
|
| 156 |
+
self.collect_incoming_data(self.ac_in_buffer[:n])
|
| 157 |
+
self.ac_in_buffer = self.ac_in_buffer[n:]
|
| 158 |
+
self.terminator = 0
|
| 159 |
+
self.found_terminator()
|
| 160 |
+
else:
|
| 161 |
+
# 3 cases:
|
| 162 |
+
# 1) end of buffer matches terminator exactly:
|
| 163 |
+
# collect data, transition
|
| 164 |
+
# 2) end of buffer matches some prefix:
|
| 165 |
+
# collect data to the prefix
|
| 166 |
+
# 3) end of buffer does not match any prefix:
|
| 167 |
+
# collect data
|
| 168 |
+
terminator_len = len(terminator)
|
| 169 |
+
index = self.ac_in_buffer.find(terminator)
|
| 170 |
+
if index != -1:
|
| 171 |
+
# we found the terminator
|
| 172 |
+
if index > 0:
|
| 173 |
+
# don't bother reporting the empty string
|
| 174 |
+
# (source of subtle bugs)
|
| 175 |
+
self.collect_incoming_data(self.ac_in_buffer[:index])
|
| 176 |
+
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
|
| 177 |
+
# This does the Right Thing if the terminator
|
| 178 |
+
# is changed here.
|
| 179 |
+
self.found_terminator()
|
| 180 |
+
else:
|
| 181 |
+
# check for a prefix of the terminator
|
| 182 |
+
index = find_prefix_at_end(self.ac_in_buffer, terminator)
|
| 183 |
+
if index:
|
| 184 |
+
if index != lb:
|
| 185 |
+
# we found a prefix, collect up to the prefix
|
| 186 |
+
self.collect_incoming_data(self.ac_in_buffer[:-index])
|
| 187 |
+
self.ac_in_buffer = self.ac_in_buffer[-index:]
|
| 188 |
+
break
|
| 189 |
+
else:
|
| 190 |
+
# no prefix, collect it all
|
| 191 |
+
self.collect_incoming_data(self.ac_in_buffer)
|
| 192 |
+
self.ac_in_buffer = b''
|
| 193 |
+
|
| 194 |
+
def handle_write(self):
|
| 195 |
+
self.initiate_send()
|
| 196 |
+
|
| 197 |
+
def handle_close(self):
|
| 198 |
+
self.close()
|
| 199 |
+
|
| 200 |
+
def push(self, data):
|
| 201 |
+
if not isinstance(data, (bytes, bytearray, memoryview)):
|
| 202 |
+
raise TypeError('data argument must be byte-ish (%r)',
|
| 203 |
+
type(data))
|
| 204 |
+
sabs = self.ac_out_buffer_size
|
| 205 |
+
if len(data) > sabs:
|
| 206 |
+
for i in range(0, len(data), sabs):
|
| 207 |
+
self.producer_fifo.append(data[i:i+sabs])
|
| 208 |
+
else:
|
| 209 |
+
self.producer_fifo.append(data)
|
| 210 |
+
self.initiate_send()
|
| 211 |
+
|
| 212 |
+
def push_with_producer(self, producer):
|
| 213 |
+
self.producer_fifo.append(producer)
|
| 214 |
+
self.initiate_send()
|
| 215 |
+
|
| 216 |
+
def readable(self):
|
| 217 |
+
"predicate for inclusion in the readable for select()"
|
| 218 |
+
# cannot use the old predicate, it violates the claim of the
|
| 219 |
+
# set_terminator method.
|
| 220 |
+
|
| 221 |
+
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
|
| 222 |
+
return 1
|
| 223 |
+
|
| 224 |
+
def writable(self):
|
| 225 |
+
"predicate for inclusion in the writable for select()"
|
| 226 |
+
return self.producer_fifo or (not self.connected)
|
| 227 |
+
|
| 228 |
+
def close_when_done(self):
|
| 229 |
+
"automatically close this channel once the outgoing queue is empty"
|
| 230 |
+
self.producer_fifo.append(None)
|
| 231 |
+
|
| 232 |
+
def initiate_send(self):
|
| 233 |
+
while self.producer_fifo and self.connected:
|
| 234 |
+
first = self.producer_fifo[0]
|
| 235 |
+
# handle empty string/buffer or None entry
|
| 236 |
+
if not first:
|
| 237 |
+
del self.producer_fifo[0]
|
| 238 |
+
if first is None:
|
| 239 |
+
self.handle_close()
|
| 240 |
+
return
|
| 241 |
+
|
| 242 |
+
# handle classic producer behavior
|
| 243 |
+
obs = self.ac_out_buffer_size
|
| 244 |
+
try:
|
| 245 |
+
data = first[:obs]
|
| 246 |
+
except TypeError:
|
| 247 |
+
data = first.more()
|
| 248 |
+
if data:
|
| 249 |
+
self.producer_fifo.appendleft(data)
|
| 250 |
+
else:
|
| 251 |
+
del self.producer_fifo[0]
|
| 252 |
+
continue
|
| 253 |
+
|
| 254 |
+
if isinstance(data, str) and self.use_encoding:
|
| 255 |
+
data = bytes(data, self.encoding)
|
| 256 |
+
|
| 257 |
+
# send the data
|
| 258 |
+
try:
|
| 259 |
+
num_sent = self.send(data)
|
| 260 |
+
except OSError:
|
| 261 |
+
self.handle_error()
|
| 262 |
+
return
|
| 263 |
+
|
| 264 |
+
if num_sent:
|
| 265 |
+
if num_sent < len(data) or obs < len(first):
|
| 266 |
+
self.producer_fifo[0] = first[num_sent:]
|
| 267 |
+
else:
|
| 268 |
+
del self.producer_fifo[0]
|
| 269 |
+
# we tried to send some actual data
|
| 270 |
+
return
|
| 271 |
+
|
| 272 |
+
def discard_buffers(self):
|
| 273 |
+
# Emergencies only!
|
| 274 |
+
self.ac_in_buffer = b''
|
| 275 |
+
del self.incoming[:]
|
| 276 |
+
self.producer_fifo.clear()
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class simple_producer:
|
| 280 |
+
|
| 281 |
+
def __init__(self, data, buffer_size=512):
|
| 282 |
+
self.data = data
|
| 283 |
+
self.buffer_size = buffer_size
|
| 284 |
+
|
| 285 |
+
def more(self):
|
| 286 |
+
if len(self.data) > self.buffer_size:
|
| 287 |
+
result = self.data[:self.buffer_size]
|
| 288 |
+
self.data = self.data[self.buffer_size:]
|
| 289 |
+
return result
|
| 290 |
+
else:
|
| 291 |
+
result = self.data
|
| 292 |
+
self.data = b''
|
| 293 |
+
return result
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# Given 'haystack', see if any prefix of 'needle' is at its end. This
|
| 297 |
+
# assumes an exact match has already been checked. Return the number of
|
| 298 |
+
# characters matched.
|
| 299 |
+
# for example:
|
| 300 |
+
# f_p_a_e("qwerty\r", "\r\n") => 1
|
| 301 |
+
# f_p_a_e("qwertydkjf", "\r\n") => 0
|
| 302 |
+
# f_p_a_e("qwerty\r\n", "\r\n") => <undefined>
|
| 303 |
+
|
| 304 |
+
# this could maybe be made faster with a computed regex?
|
| 305 |
+
# [answer: no; circa Python-2.0, Jan 2001]
|
| 306 |
+
# new python: 28961/s
|
| 307 |
+
# old python: 18307/s
|
| 308 |
+
# re: 12820/s
|
| 309 |
+
# regex: 14035/s
|
| 310 |
+
|
| 311 |
+
def find_prefix_at_end(haystack, needle):
|
| 312 |
+
l = len(needle) - 1
|
| 313 |
+
while l and not haystack.endswith(needle[:l]):
|
| 314 |
+
l -= 1
|
| 315 |
+
return l
|
llava/lib/python3.10/base64.py
ADDED
|
@@ -0,0 +1,603 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
|
| 4 |
+
|
| 5 |
+
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
|
| 6 |
+
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
|
| 7 |
+
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
|
| 8 |
+
|
| 9 |
+
import re
|
| 10 |
+
import struct
|
| 11 |
+
import binascii
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
# Legacy interface exports traditional RFC 2045 Base64 encodings
|
| 16 |
+
'encode', 'decode', 'encodebytes', 'decodebytes',
|
| 17 |
+
# Generalized interface for other encodings
|
| 18 |
+
'b64encode', 'b64decode', 'b32encode', 'b32decode',
|
| 19 |
+
'b32hexencode', 'b32hexdecode', 'b16encode', 'b16decode',
|
| 20 |
+
# Base85 and Ascii85 encodings
|
| 21 |
+
'b85encode', 'b85decode', 'a85encode', 'a85decode',
|
| 22 |
+
# Standard Base64 encoding
|
| 23 |
+
'standard_b64encode', 'standard_b64decode',
|
| 24 |
+
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
|
| 25 |
+
# starting at:
|
| 26 |
+
#
|
| 27 |
+
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
|
| 28 |
+
'urlsafe_b64encode', 'urlsafe_b64decode',
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
bytes_types = (bytes, bytearray) # Types acceptable as binary data
|
| 33 |
+
|
| 34 |
+
def _bytes_from_decode_data(s):
|
| 35 |
+
if isinstance(s, str):
|
| 36 |
+
try:
|
| 37 |
+
return s.encode('ascii')
|
| 38 |
+
except UnicodeEncodeError:
|
| 39 |
+
raise ValueError('string argument should contain only ASCII characters')
|
| 40 |
+
if isinstance(s, bytes_types):
|
| 41 |
+
return s
|
| 42 |
+
try:
|
| 43 |
+
return memoryview(s).tobytes()
|
| 44 |
+
except TypeError:
|
| 45 |
+
raise TypeError("argument should be a bytes-like object or ASCII "
|
| 46 |
+
"string, not %r" % s.__class__.__name__) from None
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Base64 encoding/decoding uses binascii
|
| 50 |
+
|
| 51 |
+
def b64encode(s, altchars=None):
|
| 52 |
+
"""Encode the bytes-like object s using Base64 and return a bytes object.
|
| 53 |
+
|
| 54 |
+
Optional altchars should be a byte string of length 2 which specifies an
|
| 55 |
+
alternative alphabet for the '+' and '/' characters. This allows an
|
| 56 |
+
application to e.g. generate url or filesystem safe Base64 strings.
|
| 57 |
+
"""
|
| 58 |
+
encoded = binascii.b2a_base64(s, newline=False)
|
| 59 |
+
if altchars is not None:
|
| 60 |
+
assert len(altchars) == 2, repr(altchars)
|
| 61 |
+
return encoded.translate(bytes.maketrans(b'+/', altchars))
|
| 62 |
+
return encoded
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def b64decode(s, altchars=None, validate=False):
|
| 66 |
+
"""Decode the Base64 encoded bytes-like object or ASCII string s.
|
| 67 |
+
|
| 68 |
+
Optional altchars must be a bytes-like object or ASCII string of length 2
|
| 69 |
+
which specifies the alternative alphabet used instead of the '+' and '/'
|
| 70 |
+
characters.
|
| 71 |
+
|
| 72 |
+
The result is returned as a bytes object. A binascii.Error is raised if
|
| 73 |
+
s is incorrectly padded.
|
| 74 |
+
|
| 75 |
+
If validate is False (the default), characters that are neither in the
|
| 76 |
+
normal base-64 alphabet nor the alternative alphabet are discarded prior
|
| 77 |
+
to the padding check. If validate is True, these non-alphabet characters
|
| 78 |
+
in the input result in a binascii.Error.
|
| 79 |
+
"""
|
| 80 |
+
s = _bytes_from_decode_data(s)
|
| 81 |
+
if altchars is not None:
|
| 82 |
+
altchars = _bytes_from_decode_data(altchars)
|
| 83 |
+
assert len(altchars) == 2, repr(altchars)
|
| 84 |
+
s = s.translate(bytes.maketrans(altchars, b'+/'))
|
| 85 |
+
if validate and not re.fullmatch(b'[A-Za-z0-9+/]*={0,2}', s):
|
| 86 |
+
raise binascii.Error('Non-base64 digit found')
|
| 87 |
+
return binascii.a2b_base64(s)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def standard_b64encode(s):
|
| 91 |
+
"""Encode bytes-like object s using the standard Base64 alphabet.
|
| 92 |
+
|
| 93 |
+
The result is returned as a bytes object.
|
| 94 |
+
"""
|
| 95 |
+
return b64encode(s)
|
| 96 |
+
|
| 97 |
+
def standard_b64decode(s):
|
| 98 |
+
"""Decode bytes encoded with the standard Base64 alphabet.
|
| 99 |
+
|
| 100 |
+
Argument s is a bytes-like object or ASCII string to decode. The result
|
| 101 |
+
is returned as a bytes object. A binascii.Error is raised if the input
|
| 102 |
+
is incorrectly padded. Characters that are not in the standard alphabet
|
| 103 |
+
are discarded prior to the padding check.
|
| 104 |
+
"""
|
| 105 |
+
return b64decode(s)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
|
| 109 |
+
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
|
| 110 |
+
|
| 111 |
+
def urlsafe_b64encode(s):
|
| 112 |
+
"""Encode bytes using the URL- and filesystem-safe Base64 alphabet.
|
| 113 |
+
|
| 114 |
+
Argument s is a bytes-like object to encode. The result is returned as a
|
| 115 |
+
bytes object. The alphabet uses '-' instead of '+' and '_' instead of
|
| 116 |
+
'/'.
|
| 117 |
+
"""
|
| 118 |
+
return b64encode(s).translate(_urlsafe_encode_translation)
|
| 119 |
+
|
| 120 |
+
def urlsafe_b64decode(s):
|
| 121 |
+
"""Decode bytes using the URL- and filesystem-safe Base64 alphabet.
|
| 122 |
+
|
| 123 |
+
Argument s is a bytes-like object or ASCII string to decode. The result
|
| 124 |
+
is returned as a bytes object. A binascii.Error is raised if the input
|
| 125 |
+
is incorrectly padded. Characters that are not in the URL-safe base-64
|
| 126 |
+
alphabet, and are not a plus '+' or slash '/', are discarded prior to the
|
| 127 |
+
padding check.
|
| 128 |
+
|
| 129 |
+
The alphabet uses '-' instead of '+' and '_' instead of '/'.
|
| 130 |
+
"""
|
| 131 |
+
s = _bytes_from_decode_data(s)
|
| 132 |
+
s = s.translate(_urlsafe_decode_translation)
|
| 133 |
+
return b64decode(s)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
# Base32 encoding/decoding must be done in Python
|
| 138 |
+
_B32_ENCODE_DOCSTRING = '''
|
| 139 |
+
Encode the bytes-like objects using {encoding} and return a bytes object.
|
| 140 |
+
'''
|
| 141 |
+
_B32_DECODE_DOCSTRING = '''
|
| 142 |
+
Decode the {encoding} encoded bytes-like object or ASCII string s.
|
| 143 |
+
|
| 144 |
+
Optional casefold is a flag specifying whether a lowercase alphabet is
|
| 145 |
+
acceptable as input. For security purposes, the default is False.
|
| 146 |
+
{extra_args}
|
| 147 |
+
The result is returned as a bytes object. A binascii.Error is raised if
|
| 148 |
+
the input is incorrectly padded or if there are non-alphabet
|
| 149 |
+
characters present in the input.
|
| 150 |
+
'''
|
| 151 |
+
_B32_DECODE_MAP01_DOCSTRING = '''
|
| 152 |
+
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
|
| 153 |
+
letter O (oh), and for optional mapping of the digit 1 (one) to
|
| 154 |
+
either the letter I (eye) or letter L (el). The optional argument
|
| 155 |
+
map01 when not None, specifies which letter the digit 1 should be
|
| 156 |
+
mapped to (when map01 is not None, the digit 0 is always mapped to
|
| 157 |
+
the letter O). For security purposes the default is None, so that
|
| 158 |
+
0 and 1 are not allowed in the input.
|
| 159 |
+
'''
|
| 160 |
+
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
|
| 161 |
+
_b32hexalphabet = b'0123456789ABCDEFGHIJKLMNOPQRSTUV'
|
| 162 |
+
_b32tab2 = {}
|
| 163 |
+
_b32rev = {}
|
| 164 |
+
|
| 165 |
+
def _b32encode(alphabet, s):
|
| 166 |
+
global _b32tab2
|
| 167 |
+
# Delay the initialization of the table to not waste memory
|
| 168 |
+
# if the function is never called
|
| 169 |
+
if alphabet not in _b32tab2:
|
| 170 |
+
b32tab = [bytes((i,)) for i in alphabet]
|
| 171 |
+
_b32tab2[alphabet] = [a + b for a in b32tab for b in b32tab]
|
| 172 |
+
b32tab = None
|
| 173 |
+
|
| 174 |
+
if not isinstance(s, bytes_types):
|
| 175 |
+
s = memoryview(s).tobytes()
|
| 176 |
+
leftover = len(s) % 5
|
| 177 |
+
# Pad the last quantum with zero bits if necessary
|
| 178 |
+
if leftover:
|
| 179 |
+
s = s + b'\0' * (5 - leftover) # Don't use += !
|
| 180 |
+
encoded = bytearray()
|
| 181 |
+
from_bytes = int.from_bytes
|
| 182 |
+
b32tab2 = _b32tab2[alphabet]
|
| 183 |
+
for i in range(0, len(s), 5):
|
| 184 |
+
c = from_bytes(s[i: i + 5], 'big')
|
| 185 |
+
encoded += (b32tab2[c >> 30] + # bits 1 - 10
|
| 186 |
+
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
|
| 187 |
+
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
|
| 188 |
+
b32tab2[c & 0x3ff] # bits 31 - 40
|
| 189 |
+
)
|
| 190 |
+
# Adjust for any leftover partial quanta
|
| 191 |
+
if leftover == 1:
|
| 192 |
+
encoded[-6:] = b'======'
|
| 193 |
+
elif leftover == 2:
|
| 194 |
+
encoded[-4:] = b'===='
|
| 195 |
+
elif leftover == 3:
|
| 196 |
+
encoded[-3:] = b'==='
|
| 197 |
+
elif leftover == 4:
|
| 198 |
+
encoded[-1:] = b'='
|
| 199 |
+
return bytes(encoded)
|
| 200 |
+
|
| 201 |
+
def _b32decode(alphabet, s, casefold=False, map01=None):
|
| 202 |
+
global _b32rev
|
| 203 |
+
# Delay the initialization of the table to not waste memory
|
| 204 |
+
# if the function is never called
|
| 205 |
+
if alphabet not in _b32rev:
|
| 206 |
+
_b32rev[alphabet] = {v: k for k, v in enumerate(alphabet)}
|
| 207 |
+
s = _bytes_from_decode_data(s)
|
| 208 |
+
if len(s) % 8:
|
| 209 |
+
raise binascii.Error('Incorrect padding')
|
| 210 |
+
# Handle section 2.4 zero and one mapping. The flag map01 will be either
|
| 211 |
+
# False, or the character to map the digit 1 (one) to. It should be
|
| 212 |
+
# either L (el) or I (eye).
|
| 213 |
+
if map01 is not None:
|
| 214 |
+
map01 = _bytes_from_decode_data(map01)
|
| 215 |
+
assert len(map01) == 1, repr(map01)
|
| 216 |
+
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
|
| 217 |
+
if casefold:
|
| 218 |
+
s = s.upper()
|
| 219 |
+
# Strip off pad characters from the right. We need to count the pad
|
| 220 |
+
# characters because this will tell us how many null bytes to remove from
|
| 221 |
+
# the end of the decoded string.
|
| 222 |
+
l = len(s)
|
| 223 |
+
s = s.rstrip(b'=')
|
| 224 |
+
padchars = l - len(s)
|
| 225 |
+
# Now decode the full quanta
|
| 226 |
+
decoded = bytearray()
|
| 227 |
+
b32rev = _b32rev[alphabet]
|
| 228 |
+
for i in range(0, len(s), 8):
|
| 229 |
+
quanta = s[i: i + 8]
|
| 230 |
+
acc = 0
|
| 231 |
+
try:
|
| 232 |
+
for c in quanta:
|
| 233 |
+
acc = (acc << 5) + b32rev[c]
|
| 234 |
+
except KeyError:
|
| 235 |
+
raise binascii.Error('Non-base32 digit found') from None
|
| 236 |
+
decoded += acc.to_bytes(5, 'big')
|
| 237 |
+
# Process the last, partial quanta
|
| 238 |
+
if l % 8 or padchars not in {0, 1, 3, 4, 6}:
|
| 239 |
+
raise binascii.Error('Incorrect padding')
|
| 240 |
+
if padchars and decoded:
|
| 241 |
+
acc <<= 5 * padchars
|
| 242 |
+
last = acc.to_bytes(5, 'big')
|
| 243 |
+
leftover = (43 - 5 * padchars) // 8 # 1: 4, 3: 3, 4: 2, 6: 1
|
| 244 |
+
decoded[-5:] = last[:leftover]
|
| 245 |
+
return bytes(decoded)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def b32encode(s):
|
| 249 |
+
return _b32encode(_b32alphabet, s)
|
| 250 |
+
b32encode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32')
|
| 251 |
+
|
| 252 |
+
def b32decode(s, casefold=False, map01=None):
|
| 253 |
+
return _b32decode(_b32alphabet, s, casefold, map01)
|
| 254 |
+
b32decode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32',
|
| 255 |
+
extra_args=_B32_DECODE_MAP01_DOCSTRING)
|
| 256 |
+
|
| 257 |
+
def b32hexencode(s):
|
| 258 |
+
return _b32encode(_b32hexalphabet, s)
|
| 259 |
+
b32hexencode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32hex')
|
| 260 |
+
|
| 261 |
+
def b32hexdecode(s, casefold=False):
|
| 262 |
+
# base32hex does not have the 01 mapping
|
| 263 |
+
return _b32decode(_b32hexalphabet, s, casefold)
|
| 264 |
+
b32hexdecode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32hex',
|
| 265 |
+
extra_args='')
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
|
| 269 |
+
# lowercase. The RFC also recommends against accepting input case
|
| 270 |
+
# insensitively.
|
| 271 |
+
def b16encode(s):
|
| 272 |
+
"""Encode the bytes-like object s using Base16 and return a bytes object.
|
| 273 |
+
"""
|
| 274 |
+
return binascii.hexlify(s).upper()
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def b16decode(s, casefold=False):
|
| 278 |
+
"""Decode the Base16 encoded bytes-like object or ASCII string s.
|
| 279 |
+
|
| 280 |
+
Optional casefold is a flag specifying whether a lowercase alphabet is
|
| 281 |
+
acceptable as input. For security purposes, the default is False.
|
| 282 |
+
|
| 283 |
+
The result is returned as a bytes object. A binascii.Error is raised if
|
| 284 |
+
s is incorrectly padded or if there are non-alphabet characters present
|
| 285 |
+
in the input.
|
| 286 |
+
"""
|
| 287 |
+
s = _bytes_from_decode_data(s)
|
| 288 |
+
if casefold:
|
| 289 |
+
s = s.upper()
|
| 290 |
+
if re.search(b'[^0-9A-F]', s):
|
| 291 |
+
raise binascii.Error('Non-base16 digit found')
|
| 292 |
+
return binascii.unhexlify(s)
|
| 293 |
+
|
| 294 |
+
#
|
| 295 |
+
# Ascii85 encoding/decoding
|
| 296 |
+
#
|
| 297 |
+
|
| 298 |
+
_a85chars = None
|
| 299 |
+
_a85chars2 = None
|
| 300 |
+
_A85START = b"<~"
|
| 301 |
+
_A85END = b"~>"
|
| 302 |
+
|
| 303 |
+
def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False):
|
| 304 |
+
# Helper function for a85encode and b85encode
|
| 305 |
+
if not isinstance(b, bytes_types):
|
| 306 |
+
b = memoryview(b).tobytes()
|
| 307 |
+
|
| 308 |
+
padding = (-len(b)) % 4
|
| 309 |
+
if padding:
|
| 310 |
+
b = b + b'\0' * padding
|
| 311 |
+
words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b)
|
| 312 |
+
|
| 313 |
+
chunks = [b'z' if foldnuls and not word else
|
| 314 |
+
b'y' if foldspaces and word == 0x20202020 else
|
| 315 |
+
(chars2[word // 614125] +
|
| 316 |
+
chars2[word // 85 % 7225] +
|
| 317 |
+
chars[word % 85])
|
| 318 |
+
for word in words]
|
| 319 |
+
|
| 320 |
+
if padding and not pad:
|
| 321 |
+
if chunks[-1] == b'z':
|
| 322 |
+
chunks[-1] = chars[0] * 5
|
| 323 |
+
chunks[-1] = chunks[-1][:-padding]
|
| 324 |
+
|
| 325 |
+
return b''.join(chunks)
|
| 326 |
+
|
| 327 |
+
def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
|
| 328 |
+
"""Encode bytes-like object b using Ascii85 and return a bytes object.
|
| 329 |
+
|
| 330 |
+
foldspaces is an optional flag that uses the special short sequence 'y'
|
| 331 |
+
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
|
| 332 |
+
feature is not supported by the "standard" Adobe encoding.
|
| 333 |
+
|
| 334 |
+
wrapcol controls whether the output should have newline (b'\\n') characters
|
| 335 |
+
added to it. If this is non-zero, each output line will be at most this
|
| 336 |
+
many characters long.
|
| 337 |
+
|
| 338 |
+
pad controls whether the input is padded to a multiple of 4 before
|
| 339 |
+
encoding. Note that the btoa implementation always pads.
|
| 340 |
+
|
| 341 |
+
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
|
| 342 |
+
which is used by the Adobe implementation.
|
| 343 |
+
"""
|
| 344 |
+
global _a85chars, _a85chars2
|
| 345 |
+
# Delay the initialization of tables to not waste memory
|
| 346 |
+
# if the function is never called
|
| 347 |
+
if _a85chars2 is None:
|
| 348 |
+
_a85chars = [bytes((i,)) for i in range(33, 118)]
|
| 349 |
+
_a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
|
| 350 |
+
|
| 351 |
+
result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces)
|
| 352 |
+
|
| 353 |
+
if adobe:
|
| 354 |
+
result = _A85START + result
|
| 355 |
+
if wrapcol:
|
| 356 |
+
wrapcol = max(2 if adobe else 1, wrapcol)
|
| 357 |
+
chunks = [result[i: i + wrapcol]
|
| 358 |
+
for i in range(0, len(result), wrapcol)]
|
| 359 |
+
if adobe:
|
| 360 |
+
if len(chunks[-1]) + 2 > wrapcol:
|
| 361 |
+
chunks.append(b'')
|
| 362 |
+
result = b'\n'.join(chunks)
|
| 363 |
+
if adobe:
|
| 364 |
+
result += _A85END
|
| 365 |
+
|
| 366 |
+
return result
|
| 367 |
+
|
| 368 |
+
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
|
| 369 |
+
"""Decode the Ascii85 encoded bytes-like object or ASCII string b.
|
| 370 |
+
|
| 371 |
+
foldspaces is a flag that specifies whether the 'y' short sequence should be
|
| 372 |
+
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
|
| 373 |
+
not supported by the "standard" Adobe encoding.
|
| 374 |
+
|
| 375 |
+
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
|
| 376 |
+
is framed with <~ and ~>).
|
| 377 |
+
|
| 378 |
+
ignorechars should be a byte string containing characters to ignore from the
|
| 379 |
+
input. This should only contain whitespace characters, and by default
|
| 380 |
+
contains all whitespace characters in ASCII.
|
| 381 |
+
|
| 382 |
+
The result is returned as a bytes object.
|
| 383 |
+
"""
|
| 384 |
+
b = _bytes_from_decode_data(b)
|
| 385 |
+
if adobe:
|
| 386 |
+
if not b.endswith(_A85END):
|
| 387 |
+
raise ValueError(
|
| 388 |
+
"Ascii85 encoded byte sequences must end "
|
| 389 |
+
"with {!r}".format(_A85END)
|
| 390 |
+
)
|
| 391 |
+
if b.startswith(_A85START):
|
| 392 |
+
b = b[2:-2] # Strip off start/end markers
|
| 393 |
+
else:
|
| 394 |
+
b = b[:-2]
|
| 395 |
+
#
|
| 396 |
+
# We have to go through this stepwise, so as to ignore spaces and handle
|
| 397 |
+
# special short sequences
|
| 398 |
+
#
|
| 399 |
+
packI = struct.Struct('!I').pack
|
| 400 |
+
decoded = []
|
| 401 |
+
decoded_append = decoded.append
|
| 402 |
+
curr = []
|
| 403 |
+
curr_append = curr.append
|
| 404 |
+
curr_clear = curr.clear
|
| 405 |
+
for x in b + b'u' * 4:
|
| 406 |
+
if b'!'[0] <= x <= b'u'[0]:
|
| 407 |
+
curr_append(x)
|
| 408 |
+
if len(curr) == 5:
|
| 409 |
+
acc = 0
|
| 410 |
+
for x in curr:
|
| 411 |
+
acc = 85 * acc + (x - 33)
|
| 412 |
+
try:
|
| 413 |
+
decoded_append(packI(acc))
|
| 414 |
+
except struct.error:
|
| 415 |
+
raise ValueError('Ascii85 overflow') from None
|
| 416 |
+
curr_clear()
|
| 417 |
+
elif x == b'z'[0]:
|
| 418 |
+
if curr:
|
| 419 |
+
raise ValueError('z inside Ascii85 5-tuple')
|
| 420 |
+
decoded_append(b'\0\0\0\0')
|
| 421 |
+
elif foldspaces and x == b'y'[0]:
|
| 422 |
+
if curr:
|
| 423 |
+
raise ValueError('y inside Ascii85 5-tuple')
|
| 424 |
+
decoded_append(b'\x20\x20\x20\x20')
|
| 425 |
+
elif x in ignorechars:
|
| 426 |
+
# Skip whitespace
|
| 427 |
+
continue
|
| 428 |
+
else:
|
| 429 |
+
raise ValueError('Non-Ascii85 digit found: %c' % x)
|
| 430 |
+
|
| 431 |
+
result = b''.join(decoded)
|
| 432 |
+
padding = 4 - len(curr)
|
| 433 |
+
if padding:
|
| 434 |
+
# Throw away the extra padding
|
| 435 |
+
result = result[:-padding]
|
| 436 |
+
return result
|
| 437 |
+
|
| 438 |
+
# The following code is originally taken (with permission) from Mercurial
|
| 439 |
+
|
| 440 |
+
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
| 441 |
+
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
|
| 442 |
+
_b85chars = None
|
| 443 |
+
_b85chars2 = None
|
| 444 |
+
_b85dec = None
|
| 445 |
+
|
| 446 |
+
def b85encode(b, pad=False):
|
| 447 |
+
"""Encode bytes-like object b in base85 format and return a bytes object.
|
| 448 |
+
|
| 449 |
+
If pad is true, the input is padded with b'\\0' so its length is a multiple of
|
| 450 |
+
4 bytes before encoding.
|
| 451 |
+
"""
|
| 452 |
+
global _b85chars, _b85chars2
|
| 453 |
+
# Delay the initialization of tables to not waste memory
|
| 454 |
+
# if the function is never called
|
| 455 |
+
if _b85chars2 is None:
|
| 456 |
+
_b85chars = [bytes((i,)) for i in _b85alphabet]
|
| 457 |
+
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
|
| 458 |
+
return _85encode(b, _b85chars, _b85chars2, pad)
|
| 459 |
+
|
| 460 |
+
def b85decode(b):
|
| 461 |
+
"""Decode the base85-encoded bytes-like object or ASCII string b
|
| 462 |
+
|
| 463 |
+
The result is returned as a bytes object.
|
| 464 |
+
"""
|
| 465 |
+
global _b85dec
|
| 466 |
+
# Delay the initialization of tables to not waste memory
|
| 467 |
+
# if the function is never called
|
| 468 |
+
if _b85dec is None:
|
| 469 |
+
_b85dec = [None] * 256
|
| 470 |
+
for i, c in enumerate(_b85alphabet):
|
| 471 |
+
_b85dec[c] = i
|
| 472 |
+
|
| 473 |
+
b = _bytes_from_decode_data(b)
|
| 474 |
+
padding = (-len(b)) % 5
|
| 475 |
+
b = b + b'~' * padding
|
| 476 |
+
out = []
|
| 477 |
+
packI = struct.Struct('!I').pack
|
| 478 |
+
for i in range(0, len(b), 5):
|
| 479 |
+
chunk = b[i:i + 5]
|
| 480 |
+
acc = 0
|
| 481 |
+
try:
|
| 482 |
+
for c in chunk:
|
| 483 |
+
acc = acc * 85 + _b85dec[c]
|
| 484 |
+
except TypeError:
|
| 485 |
+
for j, c in enumerate(chunk):
|
| 486 |
+
if _b85dec[c] is None:
|
| 487 |
+
raise ValueError('bad base85 character at position %d'
|
| 488 |
+
% (i + j)) from None
|
| 489 |
+
raise
|
| 490 |
+
try:
|
| 491 |
+
out.append(packI(acc))
|
| 492 |
+
except struct.error:
|
| 493 |
+
raise ValueError('base85 overflow in hunk starting at byte %d'
|
| 494 |
+
% i) from None
|
| 495 |
+
|
| 496 |
+
result = b''.join(out)
|
| 497 |
+
if padding:
|
| 498 |
+
result = result[:-padding]
|
| 499 |
+
return result
|
| 500 |
+
|
| 501 |
+
# Legacy interface. This code could be cleaned up since I don't believe
|
| 502 |
+
# binascii has any line length limitations. It just doesn't seem worth it
|
| 503 |
+
# though. The files should be opened in binary mode.
|
| 504 |
+
|
| 505 |
+
MAXLINESIZE = 76 # Excluding the CRLF
|
| 506 |
+
MAXBINSIZE = (MAXLINESIZE//4)*3
|
| 507 |
+
|
| 508 |
+
def encode(input, output):
|
| 509 |
+
"""Encode a file; input and output are binary files."""
|
| 510 |
+
while True:
|
| 511 |
+
s = input.read(MAXBINSIZE)
|
| 512 |
+
if not s:
|
| 513 |
+
break
|
| 514 |
+
while len(s) < MAXBINSIZE:
|
| 515 |
+
ns = input.read(MAXBINSIZE-len(s))
|
| 516 |
+
if not ns:
|
| 517 |
+
break
|
| 518 |
+
s += ns
|
| 519 |
+
line = binascii.b2a_base64(s)
|
| 520 |
+
output.write(line)
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
def decode(input, output):
|
| 524 |
+
"""Decode a file; input and output are binary files."""
|
| 525 |
+
while True:
|
| 526 |
+
line = input.readline()
|
| 527 |
+
if not line:
|
| 528 |
+
break
|
| 529 |
+
s = binascii.a2b_base64(line)
|
| 530 |
+
output.write(s)
|
| 531 |
+
|
| 532 |
+
def _input_type_check(s):
|
| 533 |
+
try:
|
| 534 |
+
m = memoryview(s)
|
| 535 |
+
except TypeError as err:
|
| 536 |
+
msg = "expected bytes-like object, not %s" % s.__class__.__name__
|
| 537 |
+
raise TypeError(msg) from err
|
| 538 |
+
if m.format not in ('c', 'b', 'B'):
|
| 539 |
+
msg = ("expected single byte elements, not %r from %s" %
|
| 540 |
+
(m.format, s.__class__.__name__))
|
| 541 |
+
raise TypeError(msg)
|
| 542 |
+
if m.ndim != 1:
|
| 543 |
+
msg = ("expected 1-D data, not %d-D data from %s" %
|
| 544 |
+
(m.ndim, s.__class__.__name__))
|
| 545 |
+
raise TypeError(msg)
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def encodebytes(s):
|
| 549 |
+
"""Encode a bytestring into a bytes object containing multiple lines
|
| 550 |
+
of base-64 data."""
|
| 551 |
+
_input_type_check(s)
|
| 552 |
+
pieces = []
|
| 553 |
+
for i in range(0, len(s), MAXBINSIZE):
|
| 554 |
+
chunk = s[i : i + MAXBINSIZE]
|
| 555 |
+
pieces.append(binascii.b2a_base64(chunk))
|
| 556 |
+
return b"".join(pieces)
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def decodebytes(s):
|
| 560 |
+
"""Decode a bytestring of base-64 data into a bytes object."""
|
| 561 |
+
_input_type_check(s)
|
| 562 |
+
return binascii.a2b_base64(s)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
# Usable as a script...
|
| 566 |
+
def main():
|
| 567 |
+
"""Small main program"""
|
| 568 |
+
import sys, getopt
|
| 569 |
+
try:
|
| 570 |
+
opts, args = getopt.getopt(sys.argv[1:], 'deut')
|
| 571 |
+
except getopt.error as msg:
|
| 572 |
+
sys.stdout = sys.stderr
|
| 573 |
+
print(msg)
|
| 574 |
+
print("""usage: %s [-d|-e|-u|-t] [file|-]
|
| 575 |
+
-d, -u: decode
|
| 576 |
+
-e: encode (default)
|
| 577 |
+
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
|
| 578 |
+
sys.exit(2)
|
| 579 |
+
func = encode
|
| 580 |
+
for o, a in opts:
|
| 581 |
+
if o == '-e': func = encode
|
| 582 |
+
if o == '-d': func = decode
|
| 583 |
+
if o == '-u': func = decode
|
| 584 |
+
if o == '-t': test(); return
|
| 585 |
+
if args and args[0] != '-':
|
| 586 |
+
with open(args[0], 'rb') as f:
|
| 587 |
+
func(f, sys.stdout.buffer)
|
| 588 |
+
else:
|
| 589 |
+
func(sys.stdin.buffer, sys.stdout.buffer)
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
def test():
|
| 593 |
+
s0 = b"Aladdin:open sesame"
|
| 594 |
+
print(repr(s0))
|
| 595 |
+
s1 = encodebytes(s0)
|
| 596 |
+
print(repr(s1))
|
| 597 |
+
s2 = decodebytes(s1)
|
| 598 |
+
print(repr(s2))
|
| 599 |
+
assert s0 == s2
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
if __name__ == '__main__':
|
| 603 |
+
main()
|
llava/lib/python3.10/bdb.py
ADDED
|
@@ -0,0 +1,891 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Debugger basics"""
|
| 2 |
+
|
| 3 |
+
import fnmatch
|
| 4 |
+
import sys
|
| 5 |
+
import os
|
| 6 |
+
from inspect import CO_GENERATOR, CO_COROUTINE, CO_ASYNC_GENERATOR
|
| 7 |
+
|
| 8 |
+
__all__ = ["BdbQuit", "Bdb", "Breakpoint"]
|
| 9 |
+
|
| 10 |
+
GENERATOR_AND_COROUTINE_FLAGS = CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class BdbQuit(Exception):
|
| 14 |
+
"""Exception to give up completely."""
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class Bdb:
|
| 18 |
+
"""Generic Python debugger base class.
|
| 19 |
+
|
| 20 |
+
This class takes care of details of the trace facility;
|
| 21 |
+
a derived class should implement user interaction.
|
| 22 |
+
The standard debugger class (pdb.Pdb) is an example.
|
| 23 |
+
|
| 24 |
+
The optional skip argument must be an iterable of glob-style
|
| 25 |
+
module name patterns. The debugger will not step into frames
|
| 26 |
+
that originate in a module that matches one of these patterns.
|
| 27 |
+
Whether a frame is considered to originate in a certain module
|
| 28 |
+
is determined by the __name__ in the frame globals.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self, skip=None):
|
| 32 |
+
self.skip = set(skip) if skip else None
|
| 33 |
+
self.breaks = {}
|
| 34 |
+
self.fncache = {}
|
| 35 |
+
self.frame_returning = None
|
| 36 |
+
|
| 37 |
+
self._load_breaks()
|
| 38 |
+
|
| 39 |
+
def canonic(self, filename):
|
| 40 |
+
"""Return canonical form of filename.
|
| 41 |
+
|
| 42 |
+
For real filenames, the canonical form is a case-normalized (on
|
| 43 |
+
case insensitive filesystems) absolute path. 'Filenames' with
|
| 44 |
+
angle brackets, such as "<stdin>", generated in interactive
|
| 45 |
+
mode, are returned unchanged.
|
| 46 |
+
"""
|
| 47 |
+
if filename == "<" + filename[1:-1] + ">":
|
| 48 |
+
return filename
|
| 49 |
+
canonic = self.fncache.get(filename)
|
| 50 |
+
if not canonic:
|
| 51 |
+
canonic = os.path.abspath(filename)
|
| 52 |
+
canonic = os.path.normcase(canonic)
|
| 53 |
+
self.fncache[filename] = canonic
|
| 54 |
+
return canonic
|
| 55 |
+
|
| 56 |
+
def reset(self):
|
| 57 |
+
"""Set values of attributes as ready to start debugging."""
|
| 58 |
+
import linecache
|
| 59 |
+
linecache.checkcache()
|
| 60 |
+
self.botframe = None
|
| 61 |
+
self._set_stopinfo(None, None)
|
| 62 |
+
|
| 63 |
+
def trace_dispatch(self, frame, event, arg):
|
| 64 |
+
"""Dispatch a trace function for debugged frames based on the event.
|
| 65 |
+
|
| 66 |
+
This function is installed as the trace function for debugged
|
| 67 |
+
frames. Its return value is the new trace function, which is
|
| 68 |
+
usually itself. The default implementation decides how to
|
| 69 |
+
dispatch a frame, depending on the type of event (passed in as a
|
| 70 |
+
string) that is about to be executed.
|
| 71 |
+
|
| 72 |
+
The event can be one of the following:
|
| 73 |
+
line: A new line of code is going to be executed.
|
| 74 |
+
call: A function is about to be called or another code block
|
| 75 |
+
is entered.
|
| 76 |
+
return: A function or other code block is about to return.
|
| 77 |
+
exception: An exception has occurred.
|
| 78 |
+
c_call: A C function is about to be called.
|
| 79 |
+
c_return: A C function has returned.
|
| 80 |
+
c_exception: A C function has raised an exception.
|
| 81 |
+
|
| 82 |
+
For the Python events, specialized functions (see the dispatch_*()
|
| 83 |
+
methods) are called. For the C events, no action is taken.
|
| 84 |
+
|
| 85 |
+
The arg parameter depends on the previous event.
|
| 86 |
+
"""
|
| 87 |
+
if self.quitting:
|
| 88 |
+
return # None
|
| 89 |
+
if event == 'line':
|
| 90 |
+
return self.dispatch_line(frame)
|
| 91 |
+
if event == 'call':
|
| 92 |
+
return self.dispatch_call(frame, arg)
|
| 93 |
+
if event == 'return':
|
| 94 |
+
return self.dispatch_return(frame, arg)
|
| 95 |
+
if event == 'exception':
|
| 96 |
+
return self.dispatch_exception(frame, arg)
|
| 97 |
+
if event == 'c_call':
|
| 98 |
+
return self.trace_dispatch
|
| 99 |
+
if event == 'c_exception':
|
| 100 |
+
return self.trace_dispatch
|
| 101 |
+
if event == 'c_return':
|
| 102 |
+
return self.trace_dispatch
|
| 103 |
+
print('bdb.Bdb.dispatch: unknown debugging event:', repr(event))
|
| 104 |
+
return self.trace_dispatch
|
| 105 |
+
|
| 106 |
+
def dispatch_line(self, frame):
|
| 107 |
+
"""Invoke user function and return trace function for line event.
|
| 108 |
+
|
| 109 |
+
If the debugger stops on the current line, invoke
|
| 110 |
+
self.user_line(). Raise BdbQuit if self.quitting is set.
|
| 111 |
+
Return self.trace_dispatch to continue tracing in this scope.
|
| 112 |
+
"""
|
| 113 |
+
if self.stop_here(frame) or self.break_here(frame):
|
| 114 |
+
self.user_line(frame)
|
| 115 |
+
if self.quitting: raise BdbQuit
|
| 116 |
+
return self.trace_dispatch
|
| 117 |
+
|
| 118 |
+
def dispatch_call(self, frame, arg):
|
| 119 |
+
"""Invoke user function and return trace function for call event.
|
| 120 |
+
|
| 121 |
+
If the debugger stops on this function call, invoke
|
| 122 |
+
self.user_call(). Raise BdbQuit if self.quitting is set.
|
| 123 |
+
Return self.trace_dispatch to continue tracing in this scope.
|
| 124 |
+
"""
|
| 125 |
+
# XXX 'arg' is no longer used
|
| 126 |
+
if self.botframe is None:
|
| 127 |
+
# First call of dispatch since reset()
|
| 128 |
+
self.botframe = frame.f_back # (CT) Note that this may also be None!
|
| 129 |
+
return self.trace_dispatch
|
| 130 |
+
if not (self.stop_here(frame) or self.break_anywhere(frame)):
|
| 131 |
+
# No need to trace this function
|
| 132 |
+
return # None
|
| 133 |
+
# Ignore call events in generator except when stepping.
|
| 134 |
+
if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
|
| 135 |
+
return self.trace_dispatch
|
| 136 |
+
self.user_call(frame, arg)
|
| 137 |
+
if self.quitting: raise BdbQuit
|
| 138 |
+
return self.trace_dispatch
|
| 139 |
+
|
| 140 |
+
def dispatch_return(self, frame, arg):
|
| 141 |
+
"""Invoke user function and return trace function for return event.
|
| 142 |
+
|
| 143 |
+
If the debugger stops on this function return, invoke
|
| 144 |
+
self.user_return(). Raise BdbQuit if self.quitting is set.
|
| 145 |
+
Return self.trace_dispatch to continue tracing in this scope.
|
| 146 |
+
"""
|
| 147 |
+
if self.stop_here(frame) or frame == self.returnframe:
|
| 148 |
+
# Ignore return events in generator except when stepping.
|
| 149 |
+
if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
|
| 150 |
+
return self.trace_dispatch
|
| 151 |
+
try:
|
| 152 |
+
self.frame_returning = frame
|
| 153 |
+
self.user_return(frame, arg)
|
| 154 |
+
finally:
|
| 155 |
+
self.frame_returning = None
|
| 156 |
+
if self.quitting: raise BdbQuit
|
| 157 |
+
# The user issued a 'next' or 'until' command.
|
| 158 |
+
if self.stopframe is frame and self.stoplineno != -1:
|
| 159 |
+
self._set_stopinfo(None, None)
|
| 160 |
+
return self.trace_dispatch
|
| 161 |
+
|
| 162 |
+
def dispatch_exception(self, frame, arg):
|
| 163 |
+
"""Invoke user function and return trace function for exception event.
|
| 164 |
+
|
| 165 |
+
If the debugger stops on this exception, invoke
|
| 166 |
+
self.user_exception(). Raise BdbQuit if self.quitting is set.
|
| 167 |
+
Return self.trace_dispatch to continue tracing in this scope.
|
| 168 |
+
"""
|
| 169 |
+
if self.stop_here(frame):
|
| 170 |
+
# When stepping with next/until/return in a generator frame, skip
|
| 171 |
+
# the internal StopIteration exception (with no traceback)
|
| 172 |
+
# triggered by a subiterator run with the 'yield from' statement.
|
| 173 |
+
if not (frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS
|
| 174 |
+
and arg[0] is StopIteration and arg[2] is None):
|
| 175 |
+
self.user_exception(frame, arg)
|
| 176 |
+
if self.quitting: raise BdbQuit
|
| 177 |
+
# Stop at the StopIteration or GeneratorExit exception when the user
|
| 178 |
+
# has set stopframe in a generator by issuing a return command, or a
|
| 179 |
+
# next/until command at the last statement in the generator before the
|
| 180 |
+
# exception.
|
| 181 |
+
elif (self.stopframe and frame is not self.stopframe
|
| 182 |
+
and self.stopframe.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS
|
| 183 |
+
and arg[0] in (StopIteration, GeneratorExit)):
|
| 184 |
+
self.user_exception(frame, arg)
|
| 185 |
+
if self.quitting: raise BdbQuit
|
| 186 |
+
|
| 187 |
+
return self.trace_dispatch
|
| 188 |
+
|
| 189 |
+
# Normally derived classes don't override the following
|
| 190 |
+
# methods, but they may if they want to redefine the
|
| 191 |
+
# definition of stopping and breakpoints.
|
| 192 |
+
|
| 193 |
+
def is_skipped_module(self, module_name):
|
| 194 |
+
"Return True if module_name matches any skip pattern."
|
| 195 |
+
if module_name is None: # some modules do not have names
|
| 196 |
+
return False
|
| 197 |
+
for pattern in self.skip:
|
| 198 |
+
if fnmatch.fnmatch(module_name, pattern):
|
| 199 |
+
return True
|
| 200 |
+
return False
|
| 201 |
+
|
| 202 |
+
def stop_here(self, frame):
|
| 203 |
+
"Return True if frame is below the starting frame in the stack."
|
| 204 |
+
# (CT) stopframe may now also be None, see dispatch_call.
|
| 205 |
+
# (CT) the former test for None is therefore removed from here.
|
| 206 |
+
if self.skip and \
|
| 207 |
+
self.is_skipped_module(frame.f_globals.get('__name__')):
|
| 208 |
+
return False
|
| 209 |
+
if frame is self.stopframe:
|
| 210 |
+
if self.stoplineno == -1:
|
| 211 |
+
return False
|
| 212 |
+
return frame.f_lineno >= self.stoplineno
|
| 213 |
+
if not self.stopframe:
|
| 214 |
+
return True
|
| 215 |
+
return False
|
| 216 |
+
|
| 217 |
+
def break_here(self, frame):
|
| 218 |
+
"""Return True if there is an effective breakpoint for this line.
|
| 219 |
+
|
| 220 |
+
Check for line or function breakpoint and if in effect.
|
| 221 |
+
Delete temporary breakpoints if effective() says to.
|
| 222 |
+
"""
|
| 223 |
+
filename = self.canonic(frame.f_code.co_filename)
|
| 224 |
+
if filename not in self.breaks:
|
| 225 |
+
return False
|
| 226 |
+
lineno = frame.f_lineno
|
| 227 |
+
if lineno not in self.breaks[filename]:
|
| 228 |
+
# The line itself has no breakpoint, but maybe the line is the
|
| 229 |
+
# first line of a function with breakpoint set by function name.
|
| 230 |
+
lineno = frame.f_code.co_firstlineno
|
| 231 |
+
if lineno not in self.breaks[filename]:
|
| 232 |
+
return False
|
| 233 |
+
|
| 234 |
+
# flag says ok to delete temp. bp
|
| 235 |
+
(bp, flag) = effective(filename, lineno, frame)
|
| 236 |
+
if bp:
|
| 237 |
+
self.currentbp = bp.number
|
| 238 |
+
if (flag and bp.temporary):
|
| 239 |
+
self.do_clear(str(bp.number))
|
| 240 |
+
return True
|
| 241 |
+
else:
|
| 242 |
+
return False
|
| 243 |
+
|
| 244 |
+
def do_clear(self, arg):
|
| 245 |
+
"""Remove temporary breakpoint.
|
| 246 |
+
|
| 247 |
+
Must implement in derived classes or get NotImplementedError.
|
| 248 |
+
"""
|
| 249 |
+
raise NotImplementedError("subclass of bdb must implement do_clear()")
|
| 250 |
+
|
| 251 |
+
def break_anywhere(self, frame):
|
| 252 |
+
"""Return True if there is any breakpoint for frame's filename.
|
| 253 |
+
"""
|
| 254 |
+
return self.canonic(frame.f_code.co_filename) in self.breaks
|
| 255 |
+
|
| 256 |
+
# Derived classes should override the user_* methods
|
| 257 |
+
# to gain control.
|
| 258 |
+
|
| 259 |
+
def user_call(self, frame, argument_list):
|
| 260 |
+
"""Called if we might stop in a function."""
|
| 261 |
+
pass
|
| 262 |
+
|
| 263 |
+
def user_line(self, frame):
|
| 264 |
+
"""Called when we stop or break at a line."""
|
| 265 |
+
pass
|
| 266 |
+
|
| 267 |
+
def user_return(self, frame, return_value):
|
| 268 |
+
"""Called when a return trap is set here."""
|
| 269 |
+
pass
|
| 270 |
+
|
| 271 |
+
def user_exception(self, frame, exc_info):
|
| 272 |
+
"""Called when we stop on an exception."""
|
| 273 |
+
pass
|
| 274 |
+
|
| 275 |
+
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
|
| 276 |
+
"""Set the attributes for stopping.
|
| 277 |
+
|
| 278 |
+
If stoplineno is greater than or equal to 0, then stop at line
|
| 279 |
+
greater than or equal to the stopline. If stoplineno is -1, then
|
| 280 |
+
don't stop at all.
|
| 281 |
+
"""
|
| 282 |
+
self.stopframe = stopframe
|
| 283 |
+
self.returnframe = returnframe
|
| 284 |
+
self.quitting = False
|
| 285 |
+
# stoplineno >= 0 means: stop at line >= the stoplineno
|
| 286 |
+
# stoplineno -1 means: don't stop at all
|
| 287 |
+
self.stoplineno = stoplineno
|
| 288 |
+
|
| 289 |
+
# Derived classes and clients can call the following methods
|
| 290 |
+
# to affect the stepping state.
|
| 291 |
+
|
| 292 |
+
def set_until(self, frame, lineno=None):
|
| 293 |
+
"""Stop when the line with the lineno greater than the current one is
|
| 294 |
+
reached or when returning from current frame."""
|
| 295 |
+
# the name "until" is borrowed from gdb
|
| 296 |
+
if lineno is None:
|
| 297 |
+
lineno = frame.f_lineno + 1
|
| 298 |
+
self._set_stopinfo(frame, frame, lineno)
|
| 299 |
+
|
| 300 |
+
def set_step(self):
|
| 301 |
+
"""Stop after one line of code."""
|
| 302 |
+
# Issue #13183: pdb skips frames after hitting a breakpoint and running
|
| 303 |
+
# step commands.
|
| 304 |
+
# Restore the trace function in the caller (that may not have been set
|
| 305 |
+
# for performance reasons) when returning from the current frame.
|
| 306 |
+
if self.frame_returning:
|
| 307 |
+
caller_frame = self.frame_returning.f_back
|
| 308 |
+
if caller_frame and not caller_frame.f_trace:
|
| 309 |
+
caller_frame.f_trace = self.trace_dispatch
|
| 310 |
+
self._set_stopinfo(None, None)
|
| 311 |
+
|
| 312 |
+
def set_next(self, frame):
|
| 313 |
+
"""Stop on the next line in or below the given frame."""
|
| 314 |
+
self._set_stopinfo(frame, None)
|
| 315 |
+
|
| 316 |
+
def set_return(self, frame):
|
| 317 |
+
"""Stop when returning from the given frame."""
|
| 318 |
+
if frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
|
| 319 |
+
self._set_stopinfo(frame, None, -1)
|
| 320 |
+
else:
|
| 321 |
+
self._set_stopinfo(frame.f_back, frame)
|
| 322 |
+
|
| 323 |
+
def set_trace(self, frame=None):
|
| 324 |
+
"""Start debugging from frame.
|
| 325 |
+
|
| 326 |
+
If frame is not specified, debugging starts from caller's frame.
|
| 327 |
+
"""
|
| 328 |
+
if frame is None:
|
| 329 |
+
frame = sys._getframe().f_back
|
| 330 |
+
self.reset()
|
| 331 |
+
while frame:
|
| 332 |
+
frame.f_trace = self.trace_dispatch
|
| 333 |
+
self.botframe = frame
|
| 334 |
+
frame = frame.f_back
|
| 335 |
+
self.set_step()
|
| 336 |
+
sys.settrace(self.trace_dispatch)
|
| 337 |
+
|
| 338 |
+
def set_continue(self):
|
| 339 |
+
"""Stop only at breakpoints or when finished.
|
| 340 |
+
|
| 341 |
+
If there are no breakpoints, set the system trace function to None.
|
| 342 |
+
"""
|
| 343 |
+
# Don't stop except at breakpoints or when finished
|
| 344 |
+
self._set_stopinfo(self.botframe, None, -1)
|
| 345 |
+
if not self.breaks:
|
| 346 |
+
# no breakpoints; run without debugger overhead
|
| 347 |
+
sys.settrace(None)
|
| 348 |
+
frame = sys._getframe().f_back
|
| 349 |
+
while frame and frame is not self.botframe:
|
| 350 |
+
del frame.f_trace
|
| 351 |
+
frame = frame.f_back
|
| 352 |
+
|
| 353 |
+
def set_quit(self):
|
| 354 |
+
"""Set quitting attribute to True.
|
| 355 |
+
|
| 356 |
+
Raises BdbQuit exception in the next call to a dispatch_*() method.
|
| 357 |
+
"""
|
| 358 |
+
self.stopframe = self.botframe
|
| 359 |
+
self.returnframe = None
|
| 360 |
+
self.quitting = True
|
| 361 |
+
sys.settrace(None)
|
| 362 |
+
|
| 363 |
+
# Derived classes and clients can call the following methods
|
| 364 |
+
# to manipulate breakpoints. These methods return an
|
| 365 |
+
# error message if something went wrong, None if all is well.
|
| 366 |
+
# Set_break prints out the breakpoint line and file:lineno.
|
| 367 |
+
# Call self.get_*break*() to see the breakpoints or better
|
| 368 |
+
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
|
| 369 |
+
|
| 370 |
+
def _add_to_breaks(self, filename, lineno):
|
| 371 |
+
"""Add breakpoint to breaks, if not already there."""
|
| 372 |
+
bp_linenos = self.breaks.setdefault(filename, [])
|
| 373 |
+
if lineno not in bp_linenos:
|
| 374 |
+
bp_linenos.append(lineno)
|
| 375 |
+
|
| 376 |
+
def set_break(self, filename, lineno, temporary=False, cond=None,
|
| 377 |
+
funcname=None):
|
| 378 |
+
"""Set a new breakpoint for filename:lineno.
|
| 379 |
+
|
| 380 |
+
If lineno doesn't exist for the filename, return an error message.
|
| 381 |
+
The filename should be in canonical form.
|
| 382 |
+
"""
|
| 383 |
+
filename = self.canonic(filename)
|
| 384 |
+
import linecache # Import as late as possible
|
| 385 |
+
line = linecache.getline(filename, lineno)
|
| 386 |
+
if not line:
|
| 387 |
+
return 'Line %s:%d does not exist' % (filename, lineno)
|
| 388 |
+
self._add_to_breaks(filename, lineno)
|
| 389 |
+
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
|
| 390 |
+
return None
|
| 391 |
+
|
| 392 |
+
def _load_breaks(self):
|
| 393 |
+
"""Apply all breakpoints (set in other instances) to this one.
|
| 394 |
+
|
| 395 |
+
Populates this instance's breaks list from the Breakpoint class's
|
| 396 |
+
list, which can have breakpoints set by another Bdb instance. This
|
| 397 |
+
is necessary for interactive sessions to keep the breakpoints
|
| 398 |
+
active across multiple calls to run().
|
| 399 |
+
"""
|
| 400 |
+
for (filename, lineno) in Breakpoint.bplist.keys():
|
| 401 |
+
self._add_to_breaks(filename, lineno)
|
| 402 |
+
|
| 403 |
+
def _prune_breaks(self, filename, lineno):
|
| 404 |
+
"""Prune breakpoints for filename:lineno.
|
| 405 |
+
|
| 406 |
+
A list of breakpoints is maintained in the Bdb instance and in
|
| 407 |
+
the Breakpoint class. If a breakpoint in the Bdb instance no
|
| 408 |
+
longer exists in the Breakpoint class, then it's removed from the
|
| 409 |
+
Bdb instance.
|
| 410 |
+
"""
|
| 411 |
+
if (filename, lineno) not in Breakpoint.bplist:
|
| 412 |
+
self.breaks[filename].remove(lineno)
|
| 413 |
+
if not self.breaks[filename]:
|
| 414 |
+
del self.breaks[filename]
|
| 415 |
+
|
| 416 |
+
def clear_break(self, filename, lineno):
|
| 417 |
+
"""Delete breakpoints for filename:lineno.
|
| 418 |
+
|
| 419 |
+
If no breakpoints were set, return an error message.
|
| 420 |
+
"""
|
| 421 |
+
filename = self.canonic(filename)
|
| 422 |
+
if filename not in self.breaks:
|
| 423 |
+
return 'There are no breakpoints in %s' % filename
|
| 424 |
+
if lineno not in self.breaks[filename]:
|
| 425 |
+
return 'There is no breakpoint at %s:%d' % (filename, lineno)
|
| 426 |
+
# If there's only one bp in the list for that file,line
|
| 427 |
+
# pair, then remove the breaks entry
|
| 428 |
+
for bp in Breakpoint.bplist[filename, lineno][:]:
|
| 429 |
+
bp.deleteMe()
|
| 430 |
+
self._prune_breaks(filename, lineno)
|
| 431 |
+
return None
|
| 432 |
+
|
| 433 |
+
def clear_bpbynumber(self, arg):
|
| 434 |
+
"""Delete a breakpoint by its index in Breakpoint.bpbynumber.
|
| 435 |
+
|
| 436 |
+
If arg is invalid, return an error message.
|
| 437 |
+
"""
|
| 438 |
+
try:
|
| 439 |
+
bp = self.get_bpbynumber(arg)
|
| 440 |
+
except ValueError as err:
|
| 441 |
+
return str(err)
|
| 442 |
+
bp.deleteMe()
|
| 443 |
+
self._prune_breaks(bp.file, bp.line)
|
| 444 |
+
return None
|
| 445 |
+
|
| 446 |
+
def clear_all_file_breaks(self, filename):
|
| 447 |
+
"""Delete all breakpoints in filename.
|
| 448 |
+
|
| 449 |
+
If none were set, return an error message.
|
| 450 |
+
"""
|
| 451 |
+
filename = self.canonic(filename)
|
| 452 |
+
if filename not in self.breaks:
|
| 453 |
+
return 'There are no breakpoints in %s' % filename
|
| 454 |
+
for line in self.breaks[filename]:
|
| 455 |
+
blist = Breakpoint.bplist[filename, line]
|
| 456 |
+
for bp in blist:
|
| 457 |
+
bp.deleteMe()
|
| 458 |
+
del self.breaks[filename]
|
| 459 |
+
return None
|
| 460 |
+
|
| 461 |
+
def clear_all_breaks(self):
|
| 462 |
+
"""Delete all existing breakpoints.
|
| 463 |
+
|
| 464 |
+
If none were set, return an error message.
|
| 465 |
+
"""
|
| 466 |
+
if not self.breaks:
|
| 467 |
+
return 'There are no breakpoints'
|
| 468 |
+
for bp in Breakpoint.bpbynumber:
|
| 469 |
+
if bp:
|
| 470 |
+
bp.deleteMe()
|
| 471 |
+
self.breaks = {}
|
| 472 |
+
return None
|
| 473 |
+
|
| 474 |
+
def get_bpbynumber(self, arg):
|
| 475 |
+
"""Return a breakpoint by its index in Breakpoint.bybpnumber.
|
| 476 |
+
|
| 477 |
+
For invalid arg values or if the breakpoint doesn't exist,
|
| 478 |
+
raise a ValueError.
|
| 479 |
+
"""
|
| 480 |
+
if not arg:
|
| 481 |
+
raise ValueError('Breakpoint number expected')
|
| 482 |
+
try:
|
| 483 |
+
number = int(arg)
|
| 484 |
+
except ValueError:
|
| 485 |
+
raise ValueError('Non-numeric breakpoint number %s' % arg) from None
|
| 486 |
+
try:
|
| 487 |
+
bp = Breakpoint.bpbynumber[number]
|
| 488 |
+
except IndexError:
|
| 489 |
+
raise ValueError('Breakpoint number %d out of range' % number) from None
|
| 490 |
+
if bp is None:
|
| 491 |
+
raise ValueError('Breakpoint %d already deleted' % number)
|
| 492 |
+
return bp
|
| 493 |
+
|
| 494 |
+
def get_break(self, filename, lineno):
|
| 495 |
+
"""Return True if there is a breakpoint for filename:lineno."""
|
| 496 |
+
filename = self.canonic(filename)
|
| 497 |
+
return filename in self.breaks and \
|
| 498 |
+
lineno in self.breaks[filename]
|
| 499 |
+
|
| 500 |
+
def get_breaks(self, filename, lineno):
|
| 501 |
+
"""Return all breakpoints for filename:lineno.
|
| 502 |
+
|
| 503 |
+
If no breakpoints are set, return an empty list.
|
| 504 |
+
"""
|
| 505 |
+
filename = self.canonic(filename)
|
| 506 |
+
return filename in self.breaks and \
|
| 507 |
+
lineno in self.breaks[filename] and \
|
| 508 |
+
Breakpoint.bplist[filename, lineno] or []
|
| 509 |
+
|
| 510 |
+
def get_file_breaks(self, filename):
|
| 511 |
+
"""Return all lines with breakpoints for filename.
|
| 512 |
+
|
| 513 |
+
If no breakpoints are set, return an empty list.
|
| 514 |
+
"""
|
| 515 |
+
filename = self.canonic(filename)
|
| 516 |
+
if filename in self.breaks:
|
| 517 |
+
return self.breaks[filename]
|
| 518 |
+
else:
|
| 519 |
+
return []
|
| 520 |
+
|
| 521 |
+
def get_all_breaks(self):
|
| 522 |
+
"""Return all breakpoints that are set."""
|
| 523 |
+
return self.breaks
|
| 524 |
+
|
| 525 |
+
# Derived classes and clients can call the following method
|
| 526 |
+
# to get a data structure representing a stack trace.
|
| 527 |
+
|
| 528 |
+
def get_stack(self, f, t):
|
| 529 |
+
"""Return a list of (frame, lineno) in a stack trace and a size.
|
| 530 |
+
|
| 531 |
+
List starts with original calling frame, if there is one.
|
| 532 |
+
Size may be number of frames above or below f.
|
| 533 |
+
"""
|
| 534 |
+
stack = []
|
| 535 |
+
if t and t.tb_frame is f:
|
| 536 |
+
t = t.tb_next
|
| 537 |
+
while f is not None:
|
| 538 |
+
stack.append((f, f.f_lineno))
|
| 539 |
+
if f is self.botframe:
|
| 540 |
+
break
|
| 541 |
+
f = f.f_back
|
| 542 |
+
stack.reverse()
|
| 543 |
+
i = max(0, len(stack) - 1)
|
| 544 |
+
while t is not None:
|
| 545 |
+
stack.append((t.tb_frame, t.tb_lineno))
|
| 546 |
+
t = t.tb_next
|
| 547 |
+
if f is None:
|
| 548 |
+
i = max(0, len(stack) - 1)
|
| 549 |
+
return stack, i
|
| 550 |
+
|
| 551 |
+
def format_stack_entry(self, frame_lineno, lprefix=': '):
|
| 552 |
+
"""Return a string with information about a stack entry.
|
| 553 |
+
|
| 554 |
+
The stack entry frame_lineno is a (frame, lineno) tuple. The
|
| 555 |
+
return string contains the canonical filename, the function name
|
| 556 |
+
or '<lambda>', the input arguments, the return value, and the
|
| 557 |
+
line of code (if it exists).
|
| 558 |
+
|
| 559 |
+
"""
|
| 560 |
+
import linecache, reprlib
|
| 561 |
+
frame, lineno = frame_lineno
|
| 562 |
+
filename = self.canonic(frame.f_code.co_filename)
|
| 563 |
+
s = '%s(%r)' % (filename, lineno)
|
| 564 |
+
if frame.f_code.co_name:
|
| 565 |
+
s += frame.f_code.co_name
|
| 566 |
+
else:
|
| 567 |
+
s += "<lambda>"
|
| 568 |
+
s += '()'
|
| 569 |
+
if '__return__' in frame.f_locals:
|
| 570 |
+
rv = frame.f_locals['__return__']
|
| 571 |
+
s += '->'
|
| 572 |
+
s += reprlib.repr(rv)
|
| 573 |
+
if lineno is not None:
|
| 574 |
+
line = linecache.getline(filename, lineno, frame.f_globals)
|
| 575 |
+
if line:
|
| 576 |
+
s += lprefix + line.strip()
|
| 577 |
+
return s
|
| 578 |
+
|
| 579 |
+
# The following methods can be called by clients to use
|
| 580 |
+
# a debugger to debug a statement or an expression.
|
| 581 |
+
# Both can be given as a string, or a code object.
|
| 582 |
+
|
| 583 |
+
def run(self, cmd, globals=None, locals=None):
|
| 584 |
+
"""Debug a statement executed via the exec() function.
|
| 585 |
+
|
| 586 |
+
globals defaults to __main__.dict; locals defaults to globals.
|
| 587 |
+
"""
|
| 588 |
+
if globals is None:
|
| 589 |
+
import __main__
|
| 590 |
+
globals = __main__.__dict__
|
| 591 |
+
if locals is None:
|
| 592 |
+
locals = globals
|
| 593 |
+
self.reset()
|
| 594 |
+
if isinstance(cmd, str):
|
| 595 |
+
cmd = compile(cmd, "<string>", "exec")
|
| 596 |
+
sys.settrace(self.trace_dispatch)
|
| 597 |
+
try:
|
| 598 |
+
exec(cmd, globals, locals)
|
| 599 |
+
except BdbQuit:
|
| 600 |
+
pass
|
| 601 |
+
finally:
|
| 602 |
+
self.quitting = True
|
| 603 |
+
sys.settrace(None)
|
| 604 |
+
|
| 605 |
+
def runeval(self, expr, globals=None, locals=None):
|
| 606 |
+
"""Debug an expression executed via the eval() function.
|
| 607 |
+
|
| 608 |
+
globals defaults to __main__.dict; locals defaults to globals.
|
| 609 |
+
"""
|
| 610 |
+
if globals is None:
|
| 611 |
+
import __main__
|
| 612 |
+
globals = __main__.__dict__
|
| 613 |
+
if locals is None:
|
| 614 |
+
locals = globals
|
| 615 |
+
self.reset()
|
| 616 |
+
sys.settrace(self.trace_dispatch)
|
| 617 |
+
try:
|
| 618 |
+
return eval(expr, globals, locals)
|
| 619 |
+
except BdbQuit:
|
| 620 |
+
pass
|
| 621 |
+
finally:
|
| 622 |
+
self.quitting = True
|
| 623 |
+
sys.settrace(None)
|
| 624 |
+
|
| 625 |
+
def runctx(self, cmd, globals, locals):
|
| 626 |
+
"""For backwards-compatibility. Defers to run()."""
|
| 627 |
+
# B/W compatibility
|
| 628 |
+
self.run(cmd, globals, locals)
|
| 629 |
+
|
| 630 |
+
# This method is more useful to debug a single function call.
|
| 631 |
+
|
| 632 |
+
def runcall(self, func, /, *args, **kwds):
|
| 633 |
+
"""Debug a single function call.
|
| 634 |
+
|
| 635 |
+
Return the result of the function call.
|
| 636 |
+
"""
|
| 637 |
+
self.reset()
|
| 638 |
+
sys.settrace(self.trace_dispatch)
|
| 639 |
+
res = None
|
| 640 |
+
try:
|
| 641 |
+
res = func(*args, **kwds)
|
| 642 |
+
except BdbQuit:
|
| 643 |
+
pass
|
| 644 |
+
finally:
|
| 645 |
+
self.quitting = True
|
| 646 |
+
sys.settrace(None)
|
| 647 |
+
return res
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
def set_trace():
|
| 651 |
+
"""Start debugging with a Bdb instance from the caller's frame."""
|
| 652 |
+
Bdb().set_trace()
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
class Breakpoint:
|
| 656 |
+
"""Breakpoint class.
|
| 657 |
+
|
| 658 |
+
Implements temporary breakpoints, ignore counts, disabling and
|
| 659 |
+
(re)-enabling, and conditionals.
|
| 660 |
+
|
| 661 |
+
Breakpoints are indexed by number through bpbynumber and by
|
| 662 |
+
the (file, line) tuple using bplist. The former points to a
|
| 663 |
+
single instance of class Breakpoint. The latter points to a
|
| 664 |
+
list of such instances since there may be more than one
|
| 665 |
+
breakpoint per line.
|
| 666 |
+
|
| 667 |
+
When creating a breakpoint, its associated filename should be
|
| 668 |
+
in canonical form. If funcname is defined, a breakpoint hit will be
|
| 669 |
+
counted when the first line of that function is executed. A
|
| 670 |
+
conditional breakpoint always counts a hit.
|
| 671 |
+
"""
|
| 672 |
+
|
| 673 |
+
# XXX Keeping state in the class is a mistake -- this means
|
| 674 |
+
# you cannot have more than one active Bdb instance.
|
| 675 |
+
|
| 676 |
+
next = 1 # Next bp to be assigned
|
| 677 |
+
bplist = {} # indexed by (file, lineno) tuple
|
| 678 |
+
bpbynumber = [None] # Each entry is None or an instance of Bpt
|
| 679 |
+
# index 0 is unused, except for marking an
|
| 680 |
+
# effective break .... see effective()
|
| 681 |
+
|
| 682 |
+
def __init__(self, file, line, temporary=False, cond=None, funcname=None):
|
| 683 |
+
self.funcname = funcname
|
| 684 |
+
# Needed if funcname is not None.
|
| 685 |
+
self.func_first_executable_line = None
|
| 686 |
+
self.file = file # This better be in canonical form!
|
| 687 |
+
self.line = line
|
| 688 |
+
self.temporary = temporary
|
| 689 |
+
self.cond = cond
|
| 690 |
+
self.enabled = True
|
| 691 |
+
self.ignore = 0
|
| 692 |
+
self.hits = 0
|
| 693 |
+
self.number = Breakpoint.next
|
| 694 |
+
Breakpoint.next += 1
|
| 695 |
+
# Build the two lists
|
| 696 |
+
self.bpbynumber.append(self)
|
| 697 |
+
if (file, line) in self.bplist:
|
| 698 |
+
self.bplist[file, line].append(self)
|
| 699 |
+
else:
|
| 700 |
+
self.bplist[file, line] = [self]
|
| 701 |
+
|
| 702 |
+
@staticmethod
|
| 703 |
+
def clearBreakpoints():
|
| 704 |
+
Breakpoint.next = 1
|
| 705 |
+
Breakpoint.bplist = {}
|
| 706 |
+
Breakpoint.bpbynumber = [None]
|
| 707 |
+
|
| 708 |
+
def deleteMe(self):
|
| 709 |
+
"""Delete the breakpoint from the list associated to a file:line.
|
| 710 |
+
|
| 711 |
+
If it is the last breakpoint in that position, it also deletes
|
| 712 |
+
the entry for the file:line.
|
| 713 |
+
"""
|
| 714 |
+
|
| 715 |
+
index = (self.file, self.line)
|
| 716 |
+
self.bpbynumber[self.number] = None # No longer in list
|
| 717 |
+
self.bplist[index].remove(self)
|
| 718 |
+
if not self.bplist[index]:
|
| 719 |
+
# No more bp for this f:l combo
|
| 720 |
+
del self.bplist[index]
|
| 721 |
+
|
| 722 |
+
def enable(self):
|
| 723 |
+
"""Mark the breakpoint as enabled."""
|
| 724 |
+
self.enabled = True
|
| 725 |
+
|
| 726 |
+
def disable(self):
|
| 727 |
+
"""Mark the breakpoint as disabled."""
|
| 728 |
+
self.enabled = False
|
| 729 |
+
|
| 730 |
+
def bpprint(self, out=None):
|
| 731 |
+
"""Print the output of bpformat().
|
| 732 |
+
|
| 733 |
+
The optional out argument directs where the output is sent
|
| 734 |
+
and defaults to standard output.
|
| 735 |
+
"""
|
| 736 |
+
if out is None:
|
| 737 |
+
out = sys.stdout
|
| 738 |
+
print(self.bpformat(), file=out)
|
| 739 |
+
|
| 740 |
+
def bpformat(self):
|
| 741 |
+
"""Return a string with information about the breakpoint.
|
| 742 |
+
|
| 743 |
+
The information includes the breakpoint number, temporary
|
| 744 |
+
status, file:line position, break condition, number of times to
|
| 745 |
+
ignore, and number of times hit.
|
| 746 |
+
|
| 747 |
+
"""
|
| 748 |
+
if self.temporary:
|
| 749 |
+
disp = 'del '
|
| 750 |
+
else:
|
| 751 |
+
disp = 'keep '
|
| 752 |
+
if self.enabled:
|
| 753 |
+
disp = disp + 'yes '
|
| 754 |
+
else:
|
| 755 |
+
disp = disp + 'no '
|
| 756 |
+
ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
|
| 757 |
+
self.file, self.line)
|
| 758 |
+
if self.cond:
|
| 759 |
+
ret += '\n\tstop only if %s' % (self.cond,)
|
| 760 |
+
if self.ignore:
|
| 761 |
+
ret += '\n\tignore next %d hits' % (self.ignore,)
|
| 762 |
+
if self.hits:
|
| 763 |
+
if self.hits > 1:
|
| 764 |
+
ss = 's'
|
| 765 |
+
else:
|
| 766 |
+
ss = ''
|
| 767 |
+
ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss)
|
| 768 |
+
return ret
|
| 769 |
+
|
| 770 |
+
def __str__(self):
|
| 771 |
+
"Return a condensed description of the breakpoint."
|
| 772 |
+
return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line)
|
| 773 |
+
|
| 774 |
+
# -----------end of Breakpoint class----------
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
def checkfuncname(b, frame):
|
| 778 |
+
"""Return True if break should happen here.
|
| 779 |
+
|
| 780 |
+
Whether a break should happen depends on the way that b (the breakpoint)
|
| 781 |
+
was set. If it was set via line number, check if b.line is the same as
|
| 782 |
+
the one in the frame. If it was set via function name, check if this is
|
| 783 |
+
the right function and if it is on the first executable line.
|
| 784 |
+
"""
|
| 785 |
+
if not b.funcname:
|
| 786 |
+
# Breakpoint was set via line number.
|
| 787 |
+
if b.line != frame.f_lineno:
|
| 788 |
+
# Breakpoint was set at a line with a def statement and the function
|
| 789 |
+
# defined is called: don't break.
|
| 790 |
+
return False
|
| 791 |
+
return True
|
| 792 |
+
|
| 793 |
+
# Breakpoint set via function name.
|
| 794 |
+
if frame.f_code.co_name != b.funcname:
|
| 795 |
+
# It's not a function call, but rather execution of def statement.
|
| 796 |
+
return False
|
| 797 |
+
|
| 798 |
+
# We are in the right frame.
|
| 799 |
+
if not b.func_first_executable_line:
|
| 800 |
+
# The function is entered for the 1st time.
|
| 801 |
+
b.func_first_executable_line = frame.f_lineno
|
| 802 |
+
|
| 803 |
+
if b.func_first_executable_line != frame.f_lineno:
|
| 804 |
+
# But we are not at the first line number: don't break.
|
| 805 |
+
return False
|
| 806 |
+
return True
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
def effective(file, line, frame):
|
| 810 |
+
"""Return (active breakpoint, delete temporary flag) or (None, None) as
|
| 811 |
+
breakpoint to act upon.
|
| 812 |
+
|
| 813 |
+
The "active breakpoint" is the first entry in bplist[line, file] (which
|
| 814 |
+
must exist) that is enabled, for which checkfuncname is True, and that
|
| 815 |
+
has neither a False condition nor a positive ignore count. The flag,
|
| 816 |
+
meaning that a temporary breakpoint should be deleted, is False only
|
| 817 |
+
when the condiion cannot be evaluated (in which case, ignore count is
|
| 818 |
+
ignored).
|
| 819 |
+
|
| 820 |
+
If no such entry exists, then (None, None) is returned.
|
| 821 |
+
"""
|
| 822 |
+
possibles = Breakpoint.bplist[file, line]
|
| 823 |
+
for b in possibles:
|
| 824 |
+
if not b.enabled:
|
| 825 |
+
continue
|
| 826 |
+
if not checkfuncname(b, frame):
|
| 827 |
+
continue
|
| 828 |
+
# Count every hit when bp is enabled
|
| 829 |
+
b.hits += 1
|
| 830 |
+
if not b.cond:
|
| 831 |
+
# If unconditional, and ignoring go on to next, else break
|
| 832 |
+
if b.ignore > 0:
|
| 833 |
+
b.ignore -= 1
|
| 834 |
+
continue
|
| 835 |
+
else:
|
| 836 |
+
# breakpoint and marker that it's ok to delete if temporary
|
| 837 |
+
return (b, True)
|
| 838 |
+
else:
|
| 839 |
+
# Conditional bp.
|
| 840 |
+
# Ignore count applies only to those bpt hits where the
|
| 841 |
+
# condition evaluates to true.
|
| 842 |
+
try:
|
| 843 |
+
val = eval(b.cond, frame.f_globals, frame.f_locals)
|
| 844 |
+
if val:
|
| 845 |
+
if b.ignore > 0:
|
| 846 |
+
b.ignore -= 1
|
| 847 |
+
# continue
|
| 848 |
+
else:
|
| 849 |
+
return (b, True)
|
| 850 |
+
# else:
|
| 851 |
+
# continue
|
| 852 |
+
except:
|
| 853 |
+
# if eval fails, most conservative thing is to stop on
|
| 854 |
+
# breakpoint regardless of ignore count. Don't delete
|
| 855 |
+
# temporary, as another hint to user.
|
| 856 |
+
return (b, False)
|
| 857 |
+
return (None, None)
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
# -------------------- testing --------------------
|
| 861 |
+
|
| 862 |
+
class Tdb(Bdb):
|
| 863 |
+
def user_call(self, frame, args):
|
| 864 |
+
name = frame.f_code.co_name
|
| 865 |
+
if not name: name = '???'
|
| 866 |
+
print('+++ call', name, args)
|
| 867 |
+
def user_line(self, frame):
|
| 868 |
+
import linecache
|
| 869 |
+
name = frame.f_code.co_name
|
| 870 |
+
if not name: name = '???'
|
| 871 |
+
fn = self.canonic(frame.f_code.co_filename)
|
| 872 |
+
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
|
| 873 |
+
print('+++', fn, frame.f_lineno, name, ':', line.strip())
|
| 874 |
+
def user_return(self, frame, retval):
|
| 875 |
+
print('+++ return', retval)
|
| 876 |
+
def user_exception(self, frame, exc_stuff):
|
| 877 |
+
print('+++ exception', exc_stuff)
|
| 878 |
+
self.set_continue()
|
| 879 |
+
|
| 880 |
+
def foo(n):
|
| 881 |
+
print('foo(', n, ')')
|
| 882 |
+
x = bar(n*10)
|
| 883 |
+
print('bar returned', x)
|
| 884 |
+
|
| 885 |
+
def bar(a):
|
| 886 |
+
print('bar(', a, ')')
|
| 887 |
+
return a/2
|
| 888 |
+
|
| 889 |
+
def test():
|
| 890 |
+
t = Tdb()
|
| 891 |
+
t.run('import bdb; bdb.foo(10)')
|
llava/lib/python3.10/datetime.py
ADDED
|
@@ -0,0 +1,2524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Concrete date/time and related types.
|
| 2 |
+
|
| 3 |
+
See http://www.iana.org/time-zones/repository/tz-link.html for
|
| 4 |
+
time zone and DST data sources.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
__all__ = ("date", "datetime", "time", "timedelta", "timezone", "tzinfo",
|
| 8 |
+
"MINYEAR", "MAXYEAR")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
import time as _time
|
| 12 |
+
import math as _math
|
| 13 |
+
import sys
|
| 14 |
+
from operator import index as _index
|
| 15 |
+
|
| 16 |
+
def _cmp(x, y):
|
| 17 |
+
return 0 if x == y else 1 if x > y else -1
|
| 18 |
+
|
| 19 |
+
MINYEAR = 1
|
| 20 |
+
MAXYEAR = 9999
|
| 21 |
+
_MAXORDINAL = 3652059 # date.max.toordinal()
|
| 22 |
+
|
| 23 |
+
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
|
| 24 |
+
# also assumes the current Gregorian calendar indefinitely extended in
|
| 25 |
+
# both directions. Difference: Dates.py calls January 1 of year 0 day
|
| 26 |
+
# number 1. The code here calls January 1 of year 1 day number 1. This is
|
| 27 |
+
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
|
| 28 |
+
# and Reingold's "Calendrical Calculations", where it's the base calendar
|
| 29 |
+
# for all computations. See the book for algorithms for converting between
|
| 30 |
+
# proleptic Gregorian ordinals and many other calendar systems.
|
| 31 |
+
|
| 32 |
+
# -1 is a placeholder for indexing purposes.
|
| 33 |
+
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
|
| 34 |
+
|
| 35 |
+
_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes.
|
| 36 |
+
dbm = 0
|
| 37 |
+
for dim in _DAYS_IN_MONTH[1:]:
|
| 38 |
+
_DAYS_BEFORE_MONTH.append(dbm)
|
| 39 |
+
dbm += dim
|
| 40 |
+
del dbm, dim
|
| 41 |
+
|
| 42 |
+
def _is_leap(year):
|
| 43 |
+
"year -> 1 if leap year, else 0."
|
| 44 |
+
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
|
| 45 |
+
|
| 46 |
+
def _days_before_year(year):
|
| 47 |
+
"year -> number of days before January 1st of year."
|
| 48 |
+
y = year - 1
|
| 49 |
+
return y*365 + y//4 - y//100 + y//400
|
| 50 |
+
|
| 51 |
+
def _days_in_month(year, month):
|
| 52 |
+
"year, month -> number of days in that month in that year."
|
| 53 |
+
assert 1 <= month <= 12, month
|
| 54 |
+
if month == 2 and _is_leap(year):
|
| 55 |
+
return 29
|
| 56 |
+
return _DAYS_IN_MONTH[month]
|
| 57 |
+
|
| 58 |
+
def _days_before_month(year, month):
|
| 59 |
+
"year, month -> number of days in year preceding first day of month."
|
| 60 |
+
assert 1 <= month <= 12, 'month must be in 1..12'
|
| 61 |
+
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
|
| 62 |
+
|
| 63 |
+
def _ymd2ord(year, month, day):
|
| 64 |
+
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
|
| 65 |
+
assert 1 <= month <= 12, 'month must be in 1..12'
|
| 66 |
+
dim = _days_in_month(year, month)
|
| 67 |
+
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
|
| 68 |
+
return (_days_before_year(year) +
|
| 69 |
+
_days_before_month(year, month) +
|
| 70 |
+
day)
|
| 71 |
+
|
| 72 |
+
_DI400Y = _days_before_year(401) # number of days in 400 years
|
| 73 |
+
_DI100Y = _days_before_year(101) # " " " " 100 "
|
| 74 |
+
_DI4Y = _days_before_year(5) # " " " " 4 "
|
| 75 |
+
|
| 76 |
+
# A 4-year cycle has an extra leap day over what we'd get from pasting
|
| 77 |
+
# together 4 single years.
|
| 78 |
+
assert _DI4Y == 4 * 365 + 1
|
| 79 |
+
|
| 80 |
+
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
|
| 81 |
+
# pasting together 4 100-year cycles.
|
| 82 |
+
assert _DI400Y == 4 * _DI100Y + 1
|
| 83 |
+
|
| 84 |
+
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
|
| 85 |
+
# pasting together 25 4-year cycles.
|
| 86 |
+
assert _DI100Y == 25 * _DI4Y - 1
|
| 87 |
+
|
| 88 |
+
def _ord2ymd(n):
|
| 89 |
+
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
|
| 90 |
+
|
| 91 |
+
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
|
| 92 |
+
# repeats exactly every 400 years. The basic strategy is to find the
|
| 93 |
+
# closest 400-year boundary at or before n, then work with the offset
|
| 94 |
+
# from that boundary to n. Life is much clearer if we subtract 1 from
|
| 95 |
+
# n first -- then the values of n at 400-year boundaries are exactly
|
| 96 |
+
# those divisible by _DI400Y:
|
| 97 |
+
#
|
| 98 |
+
# D M Y n n-1
|
| 99 |
+
# -- --- ---- ---------- ----------------
|
| 100 |
+
# 31 Dec -400 -_DI400Y -_DI400Y -1
|
| 101 |
+
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
|
| 102 |
+
# ...
|
| 103 |
+
# 30 Dec 000 -1 -2
|
| 104 |
+
# 31 Dec 000 0 -1
|
| 105 |
+
# 1 Jan 001 1 0 400-year boundary
|
| 106 |
+
# 2 Jan 001 2 1
|
| 107 |
+
# 3 Jan 001 3 2
|
| 108 |
+
# ...
|
| 109 |
+
# 31 Dec 400 _DI400Y _DI400Y -1
|
| 110 |
+
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
|
| 111 |
+
n -= 1
|
| 112 |
+
n400, n = divmod(n, _DI400Y)
|
| 113 |
+
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
|
| 114 |
+
|
| 115 |
+
# Now n is the (non-negative) offset, in days, from January 1 of year, to
|
| 116 |
+
# the desired date. Now compute how many 100-year cycles precede n.
|
| 117 |
+
# Note that it's possible for n100 to equal 4! In that case 4 full
|
| 118 |
+
# 100-year cycles precede the desired day, which implies the desired
|
| 119 |
+
# day is December 31 at the end of a 400-year cycle.
|
| 120 |
+
n100, n = divmod(n, _DI100Y)
|
| 121 |
+
|
| 122 |
+
# Now compute how many 4-year cycles precede it.
|
| 123 |
+
n4, n = divmod(n, _DI4Y)
|
| 124 |
+
|
| 125 |
+
# And now how many single years. Again n1 can be 4, and again meaning
|
| 126 |
+
# that the desired day is December 31 at the end of the 4-year cycle.
|
| 127 |
+
n1, n = divmod(n, 365)
|
| 128 |
+
|
| 129 |
+
year += n100 * 100 + n4 * 4 + n1
|
| 130 |
+
if n1 == 4 or n100 == 4:
|
| 131 |
+
assert n == 0
|
| 132 |
+
return year-1, 12, 31
|
| 133 |
+
|
| 134 |
+
# Now the year is correct, and n is the offset from January 1. We find
|
| 135 |
+
# the month via an estimate that's either exact or one too large.
|
| 136 |
+
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
|
| 137 |
+
assert leapyear == _is_leap(year)
|
| 138 |
+
month = (n + 50) >> 5
|
| 139 |
+
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
|
| 140 |
+
if preceding > n: # estimate is too large
|
| 141 |
+
month -= 1
|
| 142 |
+
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
|
| 143 |
+
n -= preceding
|
| 144 |
+
assert 0 <= n < _days_in_month(year, month)
|
| 145 |
+
|
| 146 |
+
# Now the year and month are correct, and n is the offset from the
|
| 147 |
+
# start of that month: we're done!
|
| 148 |
+
return year, month, n+1
|
| 149 |
+
|
| 150 |
+
# Month and day names. For localized versions, see the calendar module.
|
| 151 |
+
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
| 152 |
+
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
|
| 153 |
+
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
|
| 157 |
+
wday = (_ymd2ord(y, m, d) + 6) % 7
|
| 158 |
+
dnum = _days_before_month(y, m) + d
|
| 159 |
+
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
|
| 160 |
+
|
| 161 |
+
def _format_time(hh, mm, ss, us, timespec='auto'):
|
| 162 |
+
specs = {
|
| 163 |
+
'hours': '{:02d}',
|
| 164 |
+
'minutes': '{:02d}:{:02d}',
|
| 165 |
+
'seconds': '{:02d}:{:02d}:{:02d}',
|
| 166 |
+
'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}',
|
| 167 |
+
'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}'
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
if timespec == 'auto':
|
| 171 |
+
# Skip trailing microseconds when us==0.
|
| 172 |
+
timespec = 'microseconds' if us else 'seconds'
|
| 173 |
+
elif timespec == 'milliseconds':
|
| 174 |
+
us //= 1000
|
| 175 |
+
try:
|
| 176 |
+
fmt = specs[timespec]
|
| 177 |
+
except KeyError:
|
| 178 |
+
raise ValueError('Unknown timespec value')
|
| 179 |
+
else:
|
| 180 |
+
return fmt.format(hh, mm, ss, us)
|
| 181 |
+
|
| 182 |
+
def _format_offset(off):
|
| 183 |
+
s = ''
|
| 184 |
+
if off is not None:
|
| 185 |
+
if off.days < 0:
|
| 186 |
+
sign = "-"
|
| 187 |
+
off = -off
|
| 188 |
+
else:
|
| 189 |
+
sign = "+"
|
| 190 |
+
hh, mm = divmod(off, timedelta(hours=1))
|
| 191 |
+
mm, ss = divmod(mm, timedelta(minutes=1))
|
| 192 |
+
s += "%s%02d:%02d" % (sign, hh, mm)
|
| 193 |
+
if ss or ss.microseconds:
|
| 194 |
+
s += ":%02d" % ss.seconds
|
| 195 |
+
|
| 196 |
+
if ss.microseconds:
|
| 197 |
+
s += '.%06d' % ss.microseconds
|
| 198 |
+
return s
|
| 199 |
+
|
| 200 |
+
# Correctly substitute for %z and %Z escapes in strftime formats.
|
| 201 |
+
def _wrap_strftime(object, format, timetuple):
|
| 202 |
+
# Don't call utcoffset() or tzname() unless actually needed.
|
| 203 |
+
freplace = None # the string to use for %f
|
| 204 |
+
zreplace = None # the string to use for %z
|
| 205 |
+
Zreplace = None # the string to use for %Z
|
| 206 |
+
|
| 207 |
+
# Scan format for %z and %Z escapes, replacing as needed.
|
| 208 |
+
newformat = []
|
| 209 |
+
push = newformat.append
|
| 210 |
+
i, n = 0, len(format)
|
| 211 |
+
while i < n:
|
| 212 |
+
ch = format[i]
|
| 213 |
+
i += 1
|
| 214 |
+
if ch == '%':
|
| 215 |
+
if i < n:
|
| 216 |
+
ch = format[i]
|
| 217 |
+
i += 1
|
| 218 |
+
if ch == 'f':
|
| 219 |
+
if freplace is None:
|
| 220 |
+
freplace = '%06d' % getattr(object,
|
| 221 |
+
'microsecond', 0)
|
| 222 |
+
newformat.append(freplace)
|
| 223 |
+
elif ch == 'z':
|
| 224 |
+
if zreplace is None:
|
| 225 |
+
zreplace = ""
|
| 226 |
+
if hasattr(object, "utcoffset"):
|
| 227 |
+
offset = object.utcoffset()
|
| 228 |
+
if offset is not None:
|
| 229 |
+
sign = '+'
|
| 230 |
+
if offset.days < 0:
|
| 231 |
+
offset = -offset
|
| 232 |
+
sign = '-'
|
| 233 |
+
h, rest = divmod(offset, timedelta(hours=1))
|
| 234 |
+
m, rest = divmod(rest, timedelta(minutes=1))
|
| 235 |
+
s = rest.seconds
|
| 236 |
+
u = offset.microseconds
|
| 237 |
+
if u:
|
| 238 |
+
zreplace = '%c%02d%02d%02d.%06d' % (sign, h, m, s, u)
|
| 239 |
+
elif s:
|
| 240 |
+
zreplace = '%c%02d%02d%02d' % (sign, h, m, s)
|
| 241 |
+
else:
|
| 242 |
+
zreplace = '%c%02d%02d' % (sign, h, m)
|
| 243 |
+
assert '%' not in zreplace
|
| 244 |
+
newformat.append(zreplace)
|
| 245 |
+
elif ch == 'Z':
|
| 246 |
+
if Zreplace is None:
|
| 247 |
+
Zreplace = ""
|
| 248 |
+
if hasattr(object, "tzname"):
|
| 249 |
+
s = object.tzname()
|
| 250 |
+
if s is not None:
|
| 251 |
+
# strftime is going to have at this: escape %
|
| 252 |
+
Zreplace = s.replace('%', '%%')
|
| 253 |
+
newformat.append(Zreplace)
|
| 254 |
+
else:
|
| 255 |
+
push('%')
|
| 256 |
+
push(ch)
|
| 257 |
+
else:
|
| 258 |
+
push('%')
|
| 259 |
+
else:
|
| 260 |
+
push(ch)
|
| 261 |
+
newformat = "".join(newformat)
|
| 262 |
+
return _time.strftime(newformat, timetuple)
|
| 263 |
+
|
| 264 |
+
# Helpers for parsing the result of isoformat()
|
| 265 |
+
def _parse_isoformat_date(dtstr):
|
| 266 |
+
# It is assumed that this function will only be called with a
|
| 267 |
+
# string of length exactly 10, and (though this is not used) ASCII-only
|
| 268 |
+
year = int(dtstr[0:4])
|
| 269 |
+
if dtstr[4] != '-':
|
| 270 |
+
raise ValueError('Invalid date separator: %s' % dtstr[4])
|
| 271 |
+
|
| 272 |
+
month = int(dtstr[5:7])
|
| 273 |
+
|
| 274 |
+
if dtstr[7] != '-':
|
| 275 |
+
raise ValueError('Invalid date separator')
|
| 276 |
+
|
| 277 |
+
day = int(dtstr[8:10])
|
| 278 |
+
|
| 279 |
+
return [year, month, day]
|
| 280 |
+
|
| 281 |
+
def _parse_hh_mm_ss_ff(tstr):
|
| 282 |
+
# Parses things of the form HH[:MM[:SS[.fff[fff]]]]
|
| 283 |
+
len_str = len(tstr)
|
| 284 |
+
|
| 285 |
+
time_comps = [0, 0, 0, 0]
|
| 286 |
+
pos = 0
|
| 287 |
+
for comp in range(0, 3):
|
| 288 |
+
if (len_str - pos) < 2:
|
| 289 |
+
raise ValueError('Incomplete time component')
|
| 290 |
+
|
| 291 |
+
time_comps[comp] = int(tstr[pos:pos+2])
|
| 292 |
+
|
| 293 |
+
pos += 2
|
| 294 |
+
next_char = tstr[pos:pos+1]
|
| 295 |
+
|
| 296 |
+
if not next_char or comp >= 2:
|
| 297 |
+
break
|
| 298 |
+
|
| 299 |
+
if next_char != ':':
|
| 300 |
+
raise ValueError('Invalid time separator: %c' % next_char)
|
| 301 |
+
|
| 302 |
+
pos += 1
|
| 303 |
+
|
| 304 |
+
if pos < len_str:
|
| 305 |
+
if tstr[pos] != '.':
|
| 306 |
+
raise ValueError('Invalid microsecond component')
|
| 307 |
+
else:
|
| 308 |
+
pos += 1
|
| 309 |
+
|
| 310 |
+
len_remainder = len_str - pos
|
| 311 |
+
if len_remainder not in (3, 6):
|
| 312 |
+
raise ValueError('Invalid microsecond component')
|
| 313 |
+
|
| 314 |
+
time_comps[3] = int(tstr[pos:])
|
| 315 |
+
if len_remainder == 3:
|
| 316 |
+
time_comps[3] *= 1000
|
| 317 |
+
|
| 318 |
+
return time_comps
|
| 319 |
+
|
| 320 |
+
def _parse_isoformat_time(tstr):
|
| 321 |
+
# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]
|
| 322 |
+
len_str = len(tstr)
|
| 323 |
+
if len_str < 2:
|
| 324 |
+
raise ValueError('Isoformat time too short')
|
| 325 |
+
|
| 326 |
+
# This is equivalent to re.search('[+-]', tstr), but faster
|
| 327 |
+
tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1)
|
| 328 |
+
timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr
|
| 329 |
+
|
| 330 |
+
time_comps = _parse_hh_mm_ss_ff(timestr)
|
| 331 |
+
|
| 332 |
+
tzi = None
|
| 333 |
+
if tz_pos > 0:
|
| 334 |
+
tzstr = tstr[tz_pos:]
|
| 335 |
+
|
| 336 |
+
# Valid time zone strings are:
|
| 337 |
+
# HH:MM len: 5
|
| 338 |
+
# HH:MM:SS len: 8
|
| 339 |
+
# HH:MM:SS.ffffff len: 15
|
| 340 |
+
|
| 341 |
+
if len(tzstr) not in (5, 8, 15):
|
| 342 |
+
raise ValueError('Malformed time zone string')
|
| 343 |
+
|
| 344 |
+
tz_comps = _parse_hh_mm_ss_ff(tzstr)
|
| 345 |
+
if all(x == 0 for x in tz_comps):
|
| 346 |
+
tzi = timezone.utc
|
| 347 |
+
else:
|
| 348 |
+
tzsign = -1 if tstr[tz_pos - 1] == '-' else 1
|
| 349 |
+
|
| 350 |
+
td = timedelta(hours=tz_comps[0], minutes=tz_comps[1],
|
| 351 |
+
seconds=tz_comps[2], microseconds=tz_comps[3])
|
| 352 |
+
|
| 353 |
+
tzi = timezone(tzsign * td)
|
| 354 |
+
|
| 355 |
+
time_comps.append(tzi)
|
| 356 |
+
|
| 357 |
+
return time_comps
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
# Just raise TypeError if the arg isn't None or a string.
|
| 361 |
+
def _check_tzname(name):
|
| 362 |
+
if name is not None and not isinstance(name, str):
|
| 363 |
+
raise TypeError("tzinfo.tzname() must return None or string, "
|
| 364 |
+
"not '%s'" % type(name))
|
| 365 |
+
|
| 366 |
+
# name is the offset-producing method, "utcoffset" or "dst".
|
| 367 |
+
# offset is what it returned.
|
| 368 |
+
# If offset isn't None or timedelta, raises TypeError.
|
| 369 |
+
# If offset is None, returns None.
|
| 370 |
+
# Else offset is checked for being in range.
|
| 371 |
+
# If it is, its integer value is returned. Else ValueError is raised.
|
| 372 |
+
def _check_utc_offset(name, offset):
|
| 373 |
+
assert name in ("utcoffset", "dst")
|
| 374 |
+
if offset is None:
|
| 375 |
+
return
|
| 376 |
+
if not isinstance(offset, timedelta):
|
| 377 |
+
raise TypeError("tzinfo.%s() must return None "
|
| 378 |
+
"or timedelta, not '%s'" % (name, type(offset)))
|
| 379 |
+
if not -timedelta(1) < offset < timedelta(1):
|
| 380 |
+
raise ValueError("%s()=%s, must be strictly between "
|
| 381 |
+
"-timedelta(hours=24) and timedelta(hours=24)" %
|
| 382 |
+
(name, offset))
|
| 383 |
+
|
| 384 |
+
def _check_date_fields(year, month, day):
|
| 385 |
+
year = _index(year)
|
| 386 |
+
month = _index(month)
|
| 387 |
+
day = _index(day)
|
| 388 |
+
if not MINYEAR <= year <= MAXYEAR:
|
| 389 |
+
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
|
| 390 |
+
if not 1 <= month <= 12:
|
| 391 |
+
raise ValueError('month must be in 1..12', month)
|
| 392 |
+
dim = _days_in_month(year, month)
|
| 393 |
+
if not 1 <= day <= dim:
|
| 394 |
+
raise ValueError('day must be in 1..%d' % dim, day)
|
| 395 |
+
return year, month, day
|
| 396 |
+
|
| 397 |
+
def _check_time_fields(hour, minute, second, microsecond, fold):
|
| 398 |
+
hour = _index(hour)
|
| 399 |
+
minute = _index(minute)
|
| 400 |
+
second = _index(second)
|
| 401 |
+
microsecond = _index(microsecond)
|
| 402 |
+
if not 0 <= hour <= 23:
|
| 403 |
+
raise ValueError('hour must be in 0..23', hour)
|
| 404 |
+
if not 0 <= minute <= 59:
|
| 405 |
+
raise ValueError('minute must be in 0..59', minute)
|
| 406 |
+
if not 0 <= second <= 59:
|
| 407 |
+
raise ValueError('second must be in 0..59', second)
|
| 408 |
+
if not 0 <= microsecond <= 999999:
|
| 409 |
+
raise ValueError('microsecond must be in 0..999999', microsecond)
|
| 410 |
+
if fold not in (0, 1):
|
| 411 |
+
raise ValueError('fold must be either 0 or 1', fold)
|
| 412 |
+
return hour, minute, second, microsecond, fold
|
| 413 |
+
|
| 414 |
+
def _check_tzinfo_arg(tz):
|
| 415 |
+
if tz is not None and not isinstance(tz, tzinfo):
|
| 416 |
+
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
|
| 417 |
+
|
| 418 |
+
def _cmperror(x, y):
|
| 419 |
+
raise TypeError("can't compare '%s' to '%s'" % (
|
| 420 |
+
type(x).__name__, type(y).__name__))
|
| 421 |
+
|
| 422 |
+
def _divide_and_round(a, b):
|
| 423 |
+
"""divide a by b and round result to the nearest integer
|
| 424 |
+
|
| 425 |
+
When the ratio is exactly half-way between two integers,
|
| 426 |
+
the even integer is returned.
|
| 427 |
+
"""
|
| 428 |
+
# Based on the reference implementation for divmod_near
|
| 429 |
+
# in Objects/longobject.c.
|
| 430 |
+
q, r = divmod(a, b)
|
| 431 |
+
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
|
| 432 |
+
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
|
| 433 |
+
# positive, 2 * r < b if b negative.
|
| 434 |
+
r *= 2
|
| 435 |
+
greater_than_half = r > b if b > 0 else r < b
|
| 436 |
+
if greater_than_half or r == b and q % 2 == 1:
|
| 437 |
+
q += 1
|
| 438 |
+
|
| 439 |
+
return q
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
class timedelta:
|
| 443 |
+
"""Represent the difference between two datetime objects.
|
| 444 |
+
|
| 445 |
+
Supported operators:
|
| 446 |
+
|
| 447 |
+
- add, subtract timedelta
|
| 448 |
+
- unary plus, minus, abs
|
| 449 |
+
- compare to timedelta
|
| 450 |
+
- multiply, divide by int
|
| 451 |
+
|
| 452 |
+
In addition, datetime supports subtraction of two datetime objects
|
| 453 |
+
returning a timedelta, and addition or subtraction of a datetime
|
| 454 |
+
and a timedelta giving a datetime.
|
| 455 |
+
|
| 456 |
+
Representation: (days, seconds, microseconds). Why? Because I
|
| 457 |
+
felt like it.
|
| 458 |
+
"""
|
| 459 |
+
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
|
| 460 |
+
|
| 461 |
+
def __new__(cls, days=0, seconds=0, microseconds=0,
|
| 462 |
+
milliseconds=0, minutes=0, hours=0, weeks=0):
|
| 463 |
+
# Doing this efficiently and accurately in C is going to be difficult
|
| 464 |
+
# and error-prone, due to ubiquitous overflow possibilities, and that
|
| 465 |
+
# C double doesn't have enough bits of precision to represent
|
| 466 |
+
# microseconds over 10K years faithfully. The code here tries to make
|
| 467 |
+
# explicit where go-fast assumptions can be relied on, in order to
|
| 468 |
+
# guide the C implementation; it's way more convoluted than speed-
|
| 469 |
+
# ignoring auto-overflow-to-long idiomatic Python could be.
|
| 470 |
+
|
| 471 |
+
# XXX Check that all inputs are ints or floats.
|
| 472 |
+
|
| 473 |
+
# Final values, all integer.
|
| 474 |
+
# s and us fit in 32-bit signed ints; d isn't bounded.
|
| 475 |
+
d = s = us = 0
|
| 476 |
+
|
| 477 |
+
# Normalize everything to days, seconds, microseconds.
|
| 478 |
+
days += weeks*7
|
| 479 |
+
seconds += minutes*60 + hours*3600
|
| 480 |
+
microseconds += milliseconds*1000
|
| 481 |
+
|
| 482 |
+
# Get rid of all fractions, and normalize s and us.
|
| 483 |
+
# Take a deep breath <wink>.
|
| 484 |
+
if isinstance(days, float):
|
| 485 |
+
dayfrac, days = _math.modf(days)
|
| 486 |
+
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
|
| 487 |
+
assert daysecondswhole == int(daysecondswhole) # can't overflow
|
| 488 |
+
s = int(daysecondswhole)
|
| 489 |
+
assert days == int(days)
|
| 490 |
+
d = int(days)
|
| 491 |
+
else:
|
| 492 |
+
daysecondsfrac = 0.0
|
| 493 |
+
d = days
|
| 494 |
+
assert isinstance(daysecondsfrac, float)
|
| 495 |
+
assert abs(daysecondsfrac) <= 1.0
|
| 496 |
+
assert isinstance(d, int)
|
| 497 |
+
assert abs(s) <= 24 * 3600
|
| 498 |
+
# days isn't referenced again before redefinition
|
| 499 |
+
|
| 500 |
+
if isinstance(seconds, float):
|
| 501 |
+
secondsfrac, seconds = _math.modf(seconds)
|
| 502 |
+
assert seconds == int(seconds)
|
| 503 |
+
seconds = int(seconds)
|
| 504 |
+
secondsfrac += daysecondsfrac
|
| 505 |
+
assert abs(secondsfrac) <= 2.0
|
| 506 |
+
else:
|
| 507 |
+
secondsfrac = daysecondsfrac
|
| 508 |
+
# daysecondsfrac isn't referenced again
|
| 509 |
+
assert isinstance(secondsfrac, float)
|
| 510 |
+
assert abs(secondsfrac) <= 2.0
|
| 511 |
+
|
| 512 |
+
assert isinstance(seconds, int)
|
| 513 |
+
days, seconds = divmod(seconds, 24*3600)
|
| 514 |
+
d += days
|
| 515 |
+
s += int(seconds) # can't overflow
|
| 516 |
+
assert isinstance(s, int)
|
| 517 |
+
assert abs(s) <= 2 * 24 * 3600
|
| 518 |
+
# seconds isn't referenced again before redefinition
|
| 519 |
+
|
| 520 |
+
usdouble = secondsfrac * 1e6
|
| 521 |
+
assert abs(usdouble) < 2.1e6 # exact value not critical
|
| 522 |
+
# secondsfrac isn't referenced again
|
| 523 |
+
|
| 524 |
+
if isinstance(microseconds, float):
|
| 525 |
+
microseconds = round(microseconds + usdouble)
|
| 526 |
+
seconds, microseconds = divmod(microseconds, 1000000)
|
| 527 |
+
days, seconds = divmod(seconds, 24*3600)
|
| 528 |
+
d += days
|
| 529 |
+
s += seconds
|
| 530 |
+
else:
|
| 531 |
+
microseconds = int(microseconds)
|
| 532 |
+
seconds, microseconds = divmod(microseconds, 1000000)
|
| 533 |
+
days, seconds = divmod(seconds, 24*3600)
|
| 534 |
+
d += days
|
| 535 |
+
s += seconds
|
| 536 |
+
microseconds = round(microseconds + usdouble)
|
| 537 |
+
assert isinstance(s, int)
|
| 538 |
+
assert isinstance(microseconds, int)
|
| 539 |
+
assert abs(s) <= 3 * 24 * 3600
|
| 540 |
+
assert abs(microseconds) < 3.1e6
|
| 541 |
+
|
| 542 |
+
# Just a little bit of carrying possible for microseconds and seconds.
|
| 543 |
+
seconds, us = divmod(microseconds, 1000000)
|
| 544 |
+
s += seconds
|
| 545 |
+
days, s = divmod(s, 24*3600)
|
| 546 |
+
d += days
|
| 547 |
+
|
| 548 |
+
assert isinstance(d, int)
|
| 549 |
+
assert isinstance(s, int) and 0 <= s < 24*3600
|
| 550 |
+
assert isinstance(us, int) and 0 <= us < 1000000
|
| 551 |
+
|
| 552 |
+
if abs(d) > 999999999:
|
| 553 |
+
raise OverflowError("timedelta # of days is too large: %d" % d)
|
| 554 |
+
|
| 555 |
+
self = object.__new__(cls)
|
| 556 |
+
self._days = d
|
| 557 |
+
self._seconds = s
|
| 558 |
+
self._microseconds = us
|
| 559 |
+
self._hashcode = -1
|
| 560 |
+
return self
|
| 561 |
+
|
| 562 |
+
def __repr__(self):
|
| 563 |
+
args = []
|
| 564 |
+
if self._days:
|
| 565 |
+
args.append("days=%d" % self._days)
|
| 566 |
+
if self._seconds:
|
| 567 |
+
args.append("seconds=%d" % self._seconds)
|
| 568 |
+
if self._microseconds:
|
| 569 |
+
args.append("microseconds=%d" % self._microseconds)
|
| 570 |
+
if not args:
|
| 571 |
+
args.append('0')
|
| 572 |
+
return "%s.%s(%s)" % (self.__class__.__module__,
|
| 573 |
+
self.__class__.__qualname__,
|
| 574 |
+
', '.join(args))
|
| 575 |
+
|
| 576 |
+
def __str__(self):
|
| 577 |
+
mm, ss = divmod(self._seconds, 60)
|
| 578 |
+
hh, mm = divmod(mm, 60)
|
| 579 |
+
s = "%d:%02d:%02d" % (hh, mm, ss)
|
| 580 |
+
if self._days:
|
| 581 |
+
def plural(n):
|
| 582 |
+
return n, abs(n) != 1 and "s" or ""
|
| 583 |
+
s = ("%d day%s, " % plural(self._days)) + s
|
| 584 |
+
if self._microseconds:
|
| 585 |
+
s = s + ".%06d" % self._microseconds
|
| 586 |
+
return s
|
| 587 |
+
|
| 588 |
+
def total_seconds(self):
|
| 589 |
+
"""Total seconds in the duration."""
|
| 590 |
+
return ((self.days * 86400 + self.seconds) * 10**6 +
|
| 591 |
+
self.microseconds) / 10**6
|
| 592 |
+
|
| 593 |
+
# Read-only field accessors
|
| 594 |
+
@property
|
| 595 |
+
def days(self):
|
| 596 |
+
"""days"""
|
| 597 |
+
return self._days
|
| 598 |
+
|
| 599 |
+
@property
|
| 600 |
+
def seconds(self):
|
| 601 |
+
"""seconds"""
|
| 602 |
+
return self._seconds
|
| 603 |
+
|
| 604 |
+
@property
|
| 605 |
+
def microseconds(self):
|
| 606 |
+
"""microseconds"""
|
| 607 |
+
return self._microseconds
|
| 608 |
+
|
| 609 |
+
def __add__(self, other):
|
| 610 |
+
if isinstance(other, timedelta):
|
| 611 |
+
# for CPython compatibility, we cannot use
|
| 612 |
+
# our __class__ here, but need a real timedelta
|
| 613 |
+
return timedelta(self._days + other._days,
|
| 614 |
+
self._seconds + other._seconds,
|
| 615 |
+
self._microseconds + other._microseconds)
|
| 616 |
+
return NotImplemented
|
| 617 |
+
|
| 618 |
+
__radd__ = __add__
|
| 619 |
+
|
| 620 |
+
def __sub__(self, other):
|
| 621 |
+
if isinstance(other, timedelta):
|
| 622 |
+
# for CPython compatibility, we cannot use
|
| 623 |
+
# our __class__ here, but need a real timedelta
|
| 624 |
+
return timedelta(self._days - other._days,
|
| 625 |
+
self._seconds - other._seconds,
|
| 626 |
+
self._microseconds - other._microseconds)
|
| 627 |
+
return NotImplemented
|
| 628 |
+
|
| 629 |
+
def __rsub__(self, other):
|
| 630 |
+
if isinstance(other, timedelta):
|
| 631 |
+
return -self + other
|
| 632 |
+
return NotImplemented
|
| 633 |
+
|
| 634 |
+
def __neg__(self):
|
| 635 |
+
# for CPython compatibility, we cannot use
|
| 636 |
+
# our __class__ here, but need a real timedelta
|
| 637 |
+
return timedelta(-self._days,
|
| 638 |
+
-self._seconds,
|
| 639 |
+
-self._microseconds)
|
| 640 |
+
|
| 641 |
+
def __pos__(self):
|
| 642 |
+
return self
|
| 643 |
+
|
| 644 |
+
def __abs__(self):
|
| 645 |
+
if self._days < 0:
|
| 646 |
+
return -self
|
| 647 |
+
else:
|
| 648 |
+
return self
|
| 649 |
+
|
| 650 |
+
def __mul__(self, other):
|
| 651 |
+
if isinstance(other, int):
|
| 652 |
+
# for CPython compatibility, we cannot use
|
| 653 |
+
# our __class__ here, but need a real timedelta
|
| 654 |
+
return timedelta(self._days * other,
|
| 655 |
+
self._seconds * other,
|
| 656 |
+
self._microseconds * other)
|
| 657 |
+
if isinstance(other, float):
|
| 658 |
+
usec = self._to_microseconds()
|
| 659 |
+
a, b = other.as_integer_ratio()
|
| 660 |
+
return timedelta(0, 0, _divide_and_round(usec * a, b))
|
| 661 |
+
return NotImplemented
|
| 662 |
+
|
| 663 |
+
__rmul__ = __mul__
|
| 664 |
+
|
| 665 |
+
def _to_microseconds(self):
|
| 666 |
+
return ((self._days * (24*3600) + self._seconds) * 1000000 +
|
| 667 |
+
self._microseconds)
|
| 668 |
+
|
| 669 |
+
def __floordiv__(self, other):
|
| 670 |
+
if not isinstance(other, (int, timedelta)):
|
| 671 |
+
return NotImplemented
|
| 672 |
+
usec = self._to_microseconds()
|
| 673 |
+
if isinstance(other, timedelta):
|
| 674 |
+
return usec // other._to_microseconds()
|
| 675 |
+
if isinstance(other, int):
|
| 676 |
+
return timedelta(0, 0, usec // other)
|
| 677 |
+
|
| 678 |
+
def __truediv__(self, other):
|
| 679 |
+
if not isinstance(other, (int, float, timedelta)):
|
| 680 |
+
return NotImplemented
|
| 681 |
+
usec = self._to_microseconds()
|
| 682 |
+
if isinstance(other, timedelta):
|
| 683 |
+
return usec / other._to_microseconds()
|
| 684 |
+
if isinstance(other, int):
|
| 685 |
+
return timedelta(0, 0, _divide_and_round(usec, other))
|
| 686 |
+
if isinstance(other, float):
|
| 687 |
+
a, b = other.as_integer_ratio()
|
| 688 |
+
return timedelta(0, 0, _divide_and_round(b * usec, a))
|
| 689 |
+
|
| 690 |
+
def __mod__(self, other):
|
| 691 |
+
if isinstance(other, timedelta):
|
| 692 |
+
r = self._to_microseconds() % other._to_microseconds()
|
| 693 |
+
return timedelta(0, 0, r)
|
| 694 |
+
return NotImplemented
|
| 695 |
+
|
| 696 |
+
def __divmod__(self, other):
|
| 697 |
+
if isinstance(other, timedelta):
|
| 698 |
+
q, r = divmod(self._to_microseconds(),
|
| 699 |
+
other._to_microseconds())
|
| 700 |
+
return q, timedelta(0, 0, r)
|
| 701 |
+
return NotImplemented
|
| 702 |
+
|
| 703 |
+
# Comparisons of timedelta objects with other.
|
| 704 |
+
|
| 705 |
+
def __eq__(self, other):
|
| 706 |
+
if isinstance(other, timedelta):
|
| 707 |
+
return self._cmp(other) == 0
|
| 708 |
+
else:
|
| 709 |
+
return NotImplemented
|
| 710 |
+
|
| 711 |
+
def __le__(self, other):
|
| 712 |
+
if isinstance(other, timedelta):
|
| 713 |
+
return self._cmp(other) <= 0
|
| 714 |
+
else:
|
| 715 |
+
return NotImplemented
|
| 716 |
+
|
| 717 |
+
def __lt__(self, other):
|
| 718 |
+
if isinstance(other, timedelta):
|
| 719 |
+
return self._cmp(other) < 0
|
| 720 |
+
else:
|
| 721 |
+
return NotImplemented
|
| 722 |
+
|
| 723 |
+
def __ge__(self, other):
|
| 724 |
+
if isinstance(other, timedelta):
|
| 725 |
+
return self._cmp(other) >= 0
|
| 726 |
+
else:
|
| 727 |
+
return NotImplemented
|
| 728 |
+
|
| 729 |
+
def __gt__(self, other):
|
| 730 |
+
if isinstance(other, timedelta):
|
| 731 |
+
return self._cmp(other) > 0
|
| 732 |
+
else:
|
| 733 |
+
return NotImplemented
|
| 734 |
+
|
| 735 |
+
def _cmp(self, other):
|
| 736 |
+
assert isinstance(other, timedelta)
|
| 737 |
+
return _cmp(self._getstate(), other._getstate())
|
| 738 |
+
|
| 739 |
+
def __hash__(self):
|
| 740 |
+
if self._hashcode == -1:
|
| 741 |
+
self._hashcode = hash(self._getstate())
|
| 742 |
+
return self._hashcode
|
| 743 |
+
|
| 744 |
+
def __bool__(self):
|
| 745 |
+
return (self._days != 0 or
|
| 746 |
+
self._seconds != 0 or
|
| 747 |
+
self._microseconds != 0)
|
| 748 |
+
|
| 749 |
+
# Pickle support.
|
| 750 |
+
|
| 751 |
+
def _getstate(self):
|
| 752 |
+
return (self._days, self._seconds, self._microseconds)
|
| 753 |
+
|
| 754 |
+
def __reduce__(self):
|
| 755 |
+
return (self.__class__, self._getstate())
|
| 756 |
+
|
| 757 |
+
timedelta.min = timedelta(-999999999)
|
| 758 |
+
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
|
| 759 |
+
microseconds=999999)
|
| 760 |
+
timedelta.resolution = timedelta(microseconds=1)
|
| 761 |
+
|
| 762 |
+
class date:
|
| 763 |
+
"""Concrete date type.
|
| 764 |
+
|
| 765 |
+
Constructors:
|
| 766 |
+
|
| 767 |
+
__new__()
|
| 768 |
+
fromtimestamp()
|
| 769 |
+
today()
|
| 770 |
+
fromordinal()
|
| 771 |
+
|
| 772 |
+
Operators:
|
| 773 |
+
|
| 774 |
+
__repr__, __str__
|
| 775 |
+
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
|
| 776 |
+
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
|
| 777 |
+
|
| 778 |
+
Methods:
|
| 779 |
+
|
| 780 |
+
timetuple()
|
| 781 |
+
toordinal()
|
| 782 |
+
weekday()
|
| 783 |
+
isoweekday(), isocalendar(), isoformat()
|
| 784 |
+
ctime()
|
| 785 |
+
strftime()
|
| 786 |
+
|
| 787 |
+
Properties (readonly):
|
| 788 |
+
year, month, day
|
| 789 |
+
"""
|
| 790 |
+
__slots__ = '_year', '_month', '_day', '_hashcode'
|
| 791 |
+
|
| 792 |
+
def __new__(cls, year, month=None, day=None):
|
| 793 |
+
"""Constructor.
|
| 794 |
+
|
| 795 |
+
Arguments:
|
| 796 |
+
|
| 797 |
+
year, month, day (required, base 1)
|
| 798 |
+
"""
|
| 799 |
+
if (month is None and
|
| 800 |
+
isinstance(year, (bytes, str)) and len(year) == 4 and
|
| 801 |
+
1 <= ord(year[2:3]) <= 12):
|
| 802 |
+
# Pickle support
|
| 803 |
+
if isinstance(year, str):
|
| 804 |
+
try:
|
| 805 |
+
year = year.encode('latin1')
|
| 806 |
+
except UnicodeEncodeError:
|
| 807 |
+
# More informative error message.
|
| 808 |
+
raise ValueError(
|
| 809 |
+
"Failed to encode latin1 string when unpickling "
|
| 810 |
+
"a date object. "
|
| 811 |
+
"pickle.load(data, encoding='latin1') is assumed.")
|
| 812 |
+
self = object.__new__(cls)
|
| 813 |
+
self.__setstate(year)
|
| 814 |
+
self._hashcode = -1
|
| 815 |
+
return self
|
| 816 |
+
year, month, day = _check_date_fields(year, month, day)
|
| 817 |
+
self = object.__new__(cls)
|
| 818 |
+
self._year = year
|
| 819 |
+
self._month = month
|
| 820 |
+
self._day = day
|
| 821 |
+
self._hashcode = -1
|
| 822 |
+
return self
|
| 823 |
+
|
| 824 |
+
# Additional constructors
|
| 825 |
+
|
| 826 |
+
@classmethod
|
| 827 |
+
def fromtimestamp(cls, t):
|
| 828 |
+
"Construct a date from a POSIX timestamp (like time.time())."
|
| 829 |
+
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
|
| 830 |
+
return cls(y, m, d)
|
| 831 |
+
|
| 832 |
+
@classmethod
|
| 833 |
+
def today(cls):
|
| 834 |
+
"Construct a date from time.time()."
|
| 835 |
+
t = _time.time()
|
| 836 |
+
return cls.fromtimestamp(t)
|
| 837 |
+
|
| 838 |
+
@classmethod
|
| 839 |
+
def fromordinal(cls, n):
|
| 840 |
+
"""Construct a date from a proleptic Gregorian ordinal.
|
| 841 |
+
|
| 842 |
+
January 1 of year 1 is day 1. Only the year, month and day are
|
| 843 |
+
non-zero in the result.
|
| 844 |
+
"""
|
| 845 |
+
y, m, d = _ord2ymd(n)
|
| 846 |
+
return cls(y, m, d)
|
| 847 |
+
|
| 848 |
+
@classmethod
|
| 849 |
+
def fromisoformat(cls, date_string):
|
| 850 |
+
"""Construct a date from the output of date.isoformat()."""
|
| 851 |
+
if not isinstance(date_string, str):
|
| 852 |
+
raise TypeError('fromisoformat: argument must be str')
|
| 853 |
+
|
| 854 |
+
try:
|
| 855 |
+
assert len(date_string) == 10
|
| 856 |
+
return cls(*_parse_isoformat_date(date_string))
|
| 857 |
+
except Exception:
|
| 858 |
+
raise ValueError(f'Invalid isoformat string: {date_string!r}')
|
| 859 |
+
|
| 860 |
+
@classmethod
|
| 861 |
+
def fromisocalendar(cls, year, week, day):
|
| 862 |
+
"""Construct a date from the ISO year, week number and weekday.
|
| 863 |
+
|
| 864 |
+
This is the inverse of the date.isocalendar() function"""
|
| 865 |
+
# Year is bounded this way because 9999-12-31 is (9999, 52, 5)
|
| 866 |
+
if not MINYEAR <= year <= MAXYEAR:
|
| 867 |
+
raise ValueError(f"Year is out of range: {year}")
|
| 868 |
+
|
| 869 |
+
if not 0 < week < 53:
|
| 870 |
+
out_of_range = True
|
| 871 |
+
|
| 872 |
+
if week == 53:
|
| 873 |
+
# ISO years have 53 weeks in them on years starting with a
|
| 874 |
+
# Thursday and leap years starting on a Wednesday
|
| 875 |
+
first_weekday = _ymd2ord(year, 1, 1) % 7
|
| 876 |
+
if (first_weekday == 4 or (first_weekday == 3 and
|
| 877 |
+
_is_leap(year))):
|
| 878 |
+
out_of_range = False
|
| 879 |
+
|
| 880 |
+
if out_of_range:
|
| 881 |
+
raise ValueError(f"Invalid week: {week}")
|
| 882 |
+
|
| 883 |
+
if not 0 < day < 8:
|
| 884 |
+
raise ValueError(f"Invalid weekday: {day} (range is [1, 7])")
|
| 885 |
+
|
| 886 |
+
# Now compute the offset from (Y, 1, 1) in days:
|
| 887 |
+
day_offset = (week - 1) * 7 + (day - 1)
|
| 888 |
+
|
| 889 |
+
# Calculate the ordinal day for monday, week 1
|
| 890 |
+
day_1 = _isoweek1monday(year)
|
| 891 |
+
ord_day = day_1 + day_offset
|
| 892 |
+
|
| 893 |
+
return cls(*_ord2ymd(ord_day))
|
| 894 |
+
|
| 895 |
+
# Conversions to string
|
| 896 |
+
|
| 897 |
+
def __repr__(self):
|
| 898 |
+
"""Convert to formal string, for repr().
|
| 899 |
+
|
| 900 |
+
>>> dt = datetime(2010, 1, 1)
|
| 901 |
+
>>> repr(dt)
|
| 902 |
+
'datetime.datetime(2010, 1, 1, 0, 0)'
|
| 903 |
+
|
| 904 |
+
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
|
| 905 |
+
>>> repr(dt)
|
| 906 |
+
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
|
| 907 |
+
"""
|
| 908 |
+
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
|
| 909 |
+
self.__class__.__qualname__,
|
| 910 |
+
self._year,
|
| 911 |
+
self._month,
|
| 912 |
+
self._day)
|
| 913 |
+
# XXX These shouldn't depend on time.localtime(), because that
|
| 914 |
+
# clips the usable dates to [1970 .. 2038). At least ctime() is
|
| 915 |
+
# easily done without using strftime() -- that's better too because
|
| 916 |
+
# strftime("%c", ...) is locale specific.
|
| 917 |
+
|
| 918 |
+
|
| 919 |
+
def ctime(self):
|
| 920 |
+
"Return ctime() style string."
|
| 921 |
+
weekday = self.toordinal() % 7 or 7
|
| 922 |
+
return "%s %s %2d 00:00:00 %04d" % (
|
| 923 |
+
_DAYNAMES[weekday],
|
| 924 |
+
_MONTHNAMES[self._month],
|
| 925 |
+
self._day, self._year)
|
| 926 |
+
|
| 927 |
+
def strftime(self, fmt):
|
| 928 |
+
"Format using strftime()."
|
| 929 |
+
return _wrap_strftime(self, fmt, self.timetuple())
|
| 930 |
+
|
| 931 |
+
def __format__(self, fmt):
|
| 932 |
+
if not isinstance(fmt, str):
|
| 933 |
+
raise TypeError("must be str, not %s" % type(fmt).__name__)
|
| 934 |
+
if len(fmt) != 0:
|
| 935 |
+
return self.strftime(fmt)
|
| 936 |
+
return str(self)
|
| 937 |
+
|
| 938 |
+
def isoformat(self):
|
| 939 |
+
"""Return the date formatted according to ISO.
|
| 940 |
+
|
| 941 |
+
This is 'YYYY-MM-DD'.
|
| 942 |
+
|
| 943 |
+
References:
|
| 944 |
+
- http://www.w3.org/TR/NOTE-datetime
|
| 945 |
+
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
|
| 946 |
+
"""
|
| 947 |
+
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
|
| 948 |
+
|
| 949 |
+
__str__ = isoformat
|
| 950 |
+
|
| 951 |
+
# Read-only field accessors
|
| 952 |
+
@property
|
| 953 |
+
def year(self):
|
| 954 |
+
"""year (1-9999)"""
|
| 955 |
+
return self._year
|
| 956 |
+
|
| 957 |
+
@property
|
| 958 |
+
def month(self):
|
| 959 |
+
"""month (1-12)"""
|
| 960 |
+
return self._month
|
| 961 |
+
|
| 962 |
+
@property
|
| 963 |
+
def day(self):
|
| 964 |
+
"""day (1-31)"""
|
| 965 |
+
return self._day
|
| 966 |
+
|
| 967 |
+
# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
|
| 968 |
+
# __hash__ (and helpers)
|
| 969 |
+
|
| 970 |
+
def timetuple(self):
|
| 971 |
+
"Return local time tuple compatible with time.localtime()."
|
| 972 |
+
return _build_struct_time(self._year, self._month, self._day,
|
| 973 |
+
0, 0, 0, -1)
|
| 974 |
+
|
| 975 |
+
def toordinal(self):
|
| 976 |
+
"""Return proleptic Gregorian ordinal for the year, month and day.
|
| 977 |
+
|
| 978 |
+
January 1 of year 1 is day 1. Only the year, month and day values
|
| 979 |
+
contribute to the result.
|
| 980 |
+
"""
|
| 981 |
+
return _ymd2ord(self._year, self._month, self._day)
|
| 982 |
+
|
| 983 |
+
def replace(self, year=None, month=None, day=None):
|
| 984 |
+
"""Return a new date with new values for the specified fields."""
|
| 985 |
+
if year is None:
|
| 986 |
+
year = self._year
|
| 987 |
+
if month is None:
|
| 988 |
+
month = self._month
|
| 989 |
+
if day is None:
|
| 990 |
+
day = self._day
|
| 991 |
+
return type(self)(year, month, day)
|
| 992 |
+
|
| 993 |
+
# Comparisons of date objects with other.
|
| 994 |
+
|
| 995 |
+
def __eq__(self, other):
|
| 996 |
+
if isinstance(other, date):
|
| 997 |
+
return self._cmp(other) == 0
|
| 998 |
+
return NotImplemented
|
| 999 |
+
|
| 1000 |
+
def __le__(self, other):
|
| 1001 |
+
if isinstance(other, date):
|
| 1002 |
+
return self._cmp(other) <= 0
|
| 1003 |
+
return NotImplemented
|
| 1004 |
+
|
| 1005 |
+
def __lt__(self, other):
|
| 1006 |
+
if isinstance(other, date):
|
| 1007 |
+
return self._cmp(other) < 0
|
| 1008 |
+
return NotImplemented
|
| 1009 |
+
|
| 1010 |
+
def __ge__(self, other):
|
| 1011 |
+
if isinstance(other, date):
|
| 1012 |
+
return self._cmp(other) >= 0
|
| 1013 |
+
return NotImplemented
|
| 1014 |
+
|
| 1015 |
+
def __gt__(self, other):
|
| 1016 |
+
if isinstance(other, date):
|
| 1017 |
+
return self._cmp(other) > 0
|
| 1018 |
+
return NotImplemented
|
| 1019 |
+
|
| 1020 |
+
def _cmp(self, other):
|
| 1021 |
+
assert isinstance(other, date)
|
| 1022 |
+
y, m, d = self._year, self._month, self._day
|
| 1023 |
+
y2, m2, d2 = other._year, other._month, other._day
|
| 1024 |
+
return _cmp((y, m, d), (y2, m2, d2))
|
| 1025 |
+
|
| 1026 |
+
def __hash__(self):
|
| 1027 |
+
"Hash."
|
| 1028 |
+
if self._hashcode == -1:
|
| 1029 |
+
self._hashcode = hash(self._getstate())
|
| 1030 |
+
return self._hashcode
|
| 1031 |
+
|
| 1032 |
+
# Computations
|
| 1033 |
+
|
| 1034 |
+
def __add__(self, other):
|
| 1035 |
+
"Add a date to a timedelta."
|
| 1036 |
+
if isinstance(other, timedelta):
|
| 1037 |
+
o = self.toordinal() + other.days
|
| 1038 |
+
if 0 < o <= _MAXORDINAL:
|
| 1039 |
+
return type(self).fromordinal(o)
|
| 1040 |
+
raise OverflowError("result out of range")
|
| 1041 |
+
return NotImplemented
|
| 1042 |
+
|
| 1043 |
+
__radd__ = __add__
|
| 1044 |
+
|
| 1045 |
+
def __sub__(self, other):
|
| 1046 |
+
"""Subtract two dates, or a date and a timedelta."""
|
| 1047 |
+
if isinstance(other, timedelta):
|
| 1048 |
+
return self + timedelta(-other.days)
|
| 1049 |
+
if isinstance(other, date):
|
| 1050 |
+
days1 = self.toordinal()
|
| 1051 |
+
days2 = other.toordinal()
|
| 1052 |
+
return timedelta(days1 - days2)
|
| 1053 |
+
return NotImplemented
|
| 1054 |
+
|
| 1055 |
+
def weekday(self):
|
| 1056 |
+
"Return day of the week, where Monday == 0 ... Sunday == 6."
|
| 1057 |
+
return (self.toordinal() + 6) % 7
|
| 1058 |
+
|
| 1059 |
+
# Day-of-the-week and week-of-the-year, according to ISO
|
| 1060 |
+
|
| 1061 |
+
def isoweekday(self):
|
| 1062 |
+
"Return day of the week, where Monday == 1 ... Sunday == 7."
|
| 1063 |
+
# 1-Jan-0001 is a Monday
|
| 1064 |
+
return self.toordinal() % 7 or 7
|
| 1065 |
+
|
| 1066 |
+
def isocalendar(self):
|
| 1067 |
+
"""Return a named tuple containing ISO year, week number, and weekday.
|
| 1068 |
+
|
| 1069 |
+
The first ISO week of the year is the (Mon-Sun) week
|
| 1070 |
+
containing the year's first Thursday; everything else derives
|
| 1071 |
+
from that.
|
| 1072 |
+
|
| 1073 |
+
The first week is 1; Monday is 1 ... Sunday is 7.
|
| 1074 |
+
|
| 1075 |
+
ISO calendar algorithm taken from
|
| 1076 |
+
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
|
| 1077 |
+
(used with permission)
|
| 1078 |
+
"""
|
| 1079 |
+
year = self._year
|
| 1080 |
+
week1monday = _isoweek1monday(year)
|
| 1081 |
+
today = _ymd2ord(self._year, self._month, self._day)
|
| 1082 |
+
# Internally, week and day have origin 0
|
| 1083 |
+
week, day = divmod(today - week1monday, 7)
|
| 1084 |
+
if week < 0:
|
| 1085 |
+
year -= 1
|
| 1086 |
+
week1monday = _isoweek1monday(year)
|
| 1087 |
+
week, day = divmod(today - week1monday, 7)
|
| 1088 |
+
elif week >= 52:
|
| 1089 |
+
if today >= _isoweek1monday(year+1):
|
| 1090 |
+
year += 1
|
| 1091 |
+
week = 0
|
| 1092 |
+
return _IsoCalendarDate(year, week+1, day+1)
|
| 1093 |
+
|
| 1094 |
+
# Pickle support.
|
| 1095 |
+
|
| 1096 |
+
def _getstate(self):
|
| 1097 |
+
yhi, ylo = divmod(self._year, 256)
|
| 1098 |
+
return bytes([yhi, ylo, self._month, self._day]),
|
| 1099 |
+
|
| 1100 |
+
def __setstate(self, string):
|
| 1101 |
+
yhi, ylo, self._month, self._day = string
|
| 1102 |
+
self._year = yhi * 256 + ylo
|
| 1103 |
+
|
| 1104 |
+
def __reduce__(self):
|
| 1105 |
+
return (self.__class__, self._getstate())
|
| 1106 |
+
|
| 1107 |
+
_date_class = date # so functions w/ args named "date" can get at the class
|
| 1108 |
+
|
| 1109 |
+
date.min = date(1, 1, 1)
|
| 1110 |
+
date.max = date(9999, 12, 31)
|
| 1111 |
+
date.resolution = timedelta(days=1)
|
| 1112 |
+
|
| 1113 |
+
|
| 1114 |
+
class tzinfo:
|
| 1115 |
+
"""Abstract base class for time zone info classes.
|
| 1116 |
+
|
| 1117 |
+
Subclasses must override the name(), utcoffset() and dst() methods.
|
| 1118 |
+
"""
|
| 1119 |
+
__slots__ = ()
|
| 1120 |
+
|
| 1121 |
+
def tzname(self, dt):
|
| 1122 |
+
"datetime -> string name of time zone."
|
| 1123 |
+
raise NotImplementedError("tzinfo subclass must override tzname()")
|
| 1124 |
+
|
| 1125 |
+
def utcoffset(self, dt):
|
| 1126 |
+
"datetime -> timedelta, positive for east of UTC, negative for west of UTC"
|
| 1127 |
+
raise NotImplementedError("tzinfo subclass must override utcoffset()")
|
| 1128 |
+
|
| 1129 |
+
def dst(self, dt):
|
| 1130 |
+
"""datetime -> DST offset as timedelta, positive for east of UTC.
|
| 1131 |
+
|
| 1132 |
+
Return 0 if DST not in effect. utcoffset() must include the DST
|
| 1133 |
+
offset.
|
| 1134 |
+
"""
|
| 1135 |
+
raise NotImplementedError("tzinfo subclass must override dst()")
|
| 1136 |
+
|
| 1137 |
+
def fromutc(self, dt):
|
| 1138 |
+
"datetime in UTC -> datetime in local time."
|
| 1139 |
+
|
| 1140 |
+
if not isinstance(dt, datetime):
|
| 1141 |
+
raise TypeError("fromutc() requires a datetime argument")
|
| 1142 |
+
if dt.tzinfo is not self:
|
| 1143 |
+
raise ValueError("dt.tzinfo is not self")
|
| 1144 |
+
|
| 1145 |
+
dtoff = dt.utcoffset()
|
| 1146 |
+
if dtoff is None:
|
| 1147 |
+
raise ValueError("fromutc() requires a non-None utcoffset() "
|
| 1148 |
+
"result")
|
| 1149 |
+
|
| 1150 |
+
# See the long comment block at the end of this file for an
|
| 1151 |
+
# explanation of this algorithm.
|
| 1152 |
+
dtdst = dt.dst()
|
| 1153 |
+
if dtdst is None:
|
| 1154 |
+
raise ValueError("fromutc() requires a non-None dst() result")
|
| 1155 |
+
delta = dtoff - dtdst
|
| 1156 |
+
if delta:
|
| 1157 |
+
dt += delta
|
| 1158 |
+
dtdst = dt.dst()
|
| 1159 |
+
if dtdst is None:
|
| 1160 |
+
raise ValueError("fromutc(): dt.dst gave inconsistent "
|
| 1161 |
+
"results; cannot convert")
|
| 1162 |
+
return dt + dtdst
|
| 1163 |
+
|
| 1164 |
+
# Pickle support.
|
| 1165 |
+
|
| 1166 |
+
def __reduce__(self):
|
| 1167 |
+
getinitargs = getattr(self, "__getinitargs__", None)
|
| 1168 |
+
if getinitargs:
|
| 1169 |
+
args = getinitargs()
|
| 1170 |
+
else:
|
| 1171 |
+
args = ()
|
| 1172 |
+
getstate = getattr(self, "__getstate__", None)
|
| 1173 |
+
if getstate:
|
| 1174 |
+
state = getstate()
|
| 1175 |
+
else:
|
| 1176 |
+
state = getattr(self, "__dict__", None) or None
|
| 1177 |
+
if state is None:
|
| 1178 |
+
return (self.__class__, args)
|
| 1179 |
+
else:
|
| 1180 |
+
return (self.__class__, args, state)
|
| 1181 |
+
|
| 1182 |
+
|
| 1183 |
+
class IsoCalendarDate(tuple):
|
| 1184 |
+
|
| 1185 |
+
def __new__(cls, year, week, weekday, /):
|
| 1186 |
+
return super().__new__(cls, (year, week, weekday))
|
| 1187 |
+
|
| 1188 |
+
@property
|
| 1189 |
+
def year(self):
|
| 1190 |
+
return self[0]
|
| 1191 |
+
|
| 1192 |
+
@property
|
| 1193 |
+
def week(self):
|
| 1194 |
+
return self[1]
|
| 1195 |
+
|
| 1196 |
+
@property
|
| 1197 |
+
def weekday(self):
|
| 1198 |
+
return self[2]
|
| 1199 |
+
|
| 1200 |
+
def __reduce__(self):
|
| 1201 |
+
# This code is intended to pickle the object without making the
|
| 1202 |
+
# class public. See https://bugs.python.org/msg352381
|
| 1203 |
+
return (tuple, (tuple(self),))
|
| 1204 |
+
|
| 1205 |
+
def __repr__(self):
|
| 1206 |
+
return (f'{self.__class__.__name__}'
|
| 1207 |
+
f'(year={self[0]}, week={self[1]}, weekday={self[2]})')
|
| 1208 |
+
|
| 1209 |
+
|
| 1210 |
+
_IsoCalendarDate = IsoCalendarDate
|
| 1211 |
+
del IsoCalendarDate
|
| 1212 |
+
_tzinfo_class = tzinfo
|
| 1213 |
+
|
| 1214 |
+
class time:
|
| 1215 |
+
"""Time with time zone.
|
| 1216 |
+
|
| 1217 |
+
Constructors:
|
| 1218 |
+
|
| 1219 |
+
__new__()
|
| 1220 |
+
|
| 1221 |
+
Operators:
|
| 1222 |
+
|
| 1223 |
+
__repr__, __str__
|
| 1224 |
+
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
|
| 1225 |
+
|
| 1226 |
+
Methods:
|
| 1227 |
+
|
| 1228 |
+
strftime()
|
| 1229 |
+
isoformat()
|
| 1230 |
+
utcoffset()
|
| 1231 |
+
tzname()
|
| 1232 |
+
dst()
|
| 1233 |
+
|
| 1234 |
+
Properties (readonly):
|
| 1235 |
+
hour, minute, second, microsecond, tzinfo, fold
|
| 1236 |
+
"""
|
| 1237 |
+
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold'
|
| 1238 |
+
|
| 1239 |
+
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0):
|
| 1240 |
+
"""Constructor.
|
| 1241 |
+
|
| 1242 |
+
Arguments:
|
| 1243 |
+
|
| 1244 |
+
hour, minute (required)
|
| 1245 |
+
second, microsecond (default to zero)
|
| 1246 |
+
tzinfo (default to None)
|
| 1247 |
+
fold (keyword only, default to zero)
|
| 1248 |
+
"""
|
| 1249 |
+
if (isinstance(hour, (bytes, str)) and len(hour) == 6 and
|
| 1250 |
+
ord(hour[0:1])&0x7F < 24):
|
| 1251 |
+
# Pickle support
|
| 1252 |
+
if isinstance(hour, str):
|
| 1253 |
+
try:
|
| 1254 |
+
hour = hour.encode('latin1')
|
| 1255 |
+
except UnicodeEncodeError:
|
| 1256 |
+
# More informative error message.
|
| 1257 |
+
raise ValueError(
|
| 1258 |
+
"Failed to encode latin1 string when unpickling "
|
| 1259 |
+
"a time object. "
|
| 1260 |
+
"pickle.load(data, encoding='latin1') is assumed.")
|
| 1261 |
+
self = object.__new__(cls)
|
| 1262 |
+
self.__setstate(hour, minute or None)
|
| 1263 |
+
self._hashcode = -1
|
| 1264 |
+
return self
|
| 1265 |
+
hour, minute, second, microsecond, fold = _check_time_fields(
|
| 1266 |
+
hour, minute, second, microsecond, fold)
|
| 1267 |
+
_check_tzinfo_arg(tzinfo)
|
| 1268 |
+
self = object.__new__(cls)
|
| 1269 |
+
self._hour = hour
|
| 1270 |
+
self._minute = minute
|
| 1271 |
+
self._second = second
|
| 1272 |
+
self._microsecond = microsecond
|
| 1273 |
+
self._tzinfo = tzinfo
|
| 1274 |
+
self._hashcode = -1
|
| 1275 |
+
self._fold = fold
|
| 1276 |
+
return self
|
| 1277 |
+
|
| 1278 |
+
# Read-only field accessors
|
| 1279 |
+
@property
|
| 1280 |
+
def hour(self):
|
| 1281 |
+
"""hour (0-23)"""
|
| 1282 |
+
return self._hour
|
| 1283 |
+
|
| 1284 |
+
@property
|
| 1285 |
+
def minute(self):
|
| 1286 |
+
"""minute (0-59)"""
|
| 1287 |
+
return self._minute
|
| 1288 |
+
|
| 1289 |
+
@property
|
| 1290 |
+
def second(self):
|
| 1291 |
+
"""second (0-59)"""
|
| 1292 |
+
return self._second
|
| 1293 |
+
|
| 1294 |
+
@property
|
| 1295 |
+
def microsecond(self):
|
| 1296 |
+
"""microsecond (0-999999)"""
|
| 1297 |
+
return self._microsecond
|
| 1298 |
+
|
| 1299 |
+
@property
|
| 1300 |
+
def tzinfo(self):
|
| 1301 |
+
"""timezone info object"""
|
| 1302 |
+
return self._tzinfo
|
| 1303 |
+
|
| 1304 |
+
@property
|
| 1305 |
+
def fold(self):
|
| 1306 |
+
return self._fold
|
| 1307 |
+
|
| 1308 |
+
# Standard conversions, __hash__ (and helpers)
|
| 1309 |
+
|
| 1310 |
+
# Comparisons of time objects with other.
|
| 1311 |
+
|
| 1312 |
+
def __eq__(self, other):
|
| 1313 |
+
if isinstance(other, time):
|
| 1314 |
+
return self._cmp(other, allow_mixed=True) == 0
|
| 1315 |
+
else:
|
| 1316 |
+
return NotImplemented
|
| 1317 |
+
|
| 1318 |
+
def __le__(self, other):
|
| 1319 |
+
if isinstance(other, time):
|
| 1320 |
+
return self._cmp(other) <= 0
|
| 1321 |
+
else:
|
| 1322 |
+
return NotImplemented
|
| 1323 |
+
|
| 1324 |
+
def __lt__(self, other):
|
| 1325 |
+
if isinstance(other, time):
|
| 1326 |
+
return self._cmp(other) < 0
|
| 1327 |
+
else:
|
| 1328 |
+
return NotImplemented
|
| 1329 |
+
|
| 1330 |
+
def __ge__(self, other):
|
| 1331 |
+
if isinstance(other, time):
|
| 1332 |
+
return self._cmp(other) >= 0
|
| 1333 |
+
else:
|
| 1334 |
+
return NotImplemented
|
| 1335 |
+
|
| 1336 |
+
def __gt__(self, other):
|
| 1337 |
+
if isinstance(other, time):
|
| 1338 |
+
return self._cmp(other) > 0
|
| 1339 |
+
else:
|
| 1340 |
+
return NotImplemented
|
| 1341 |
+
|
| 1342 |
+
def _cmp(self, other, allow_mixed=False):
|
| 1343 |
+
assert isinstance(other, time)
|
| 1344 |
+
mytz = self._tzinfo
|
| 1345 |
+
ottz = other._tzinfo
|
| 1346 |
+
myoff = otoff = None
|
| 1347 |
+
|
| 1348 |
+
if mytz is ottz:
|
| 1349 |
+
base_compare = True
|
| 1350 |
+
else:
|
| 1351 |
+
myoff = self.utcoffset()
|
| 1352 |
+
otoff = other.utcoffset()
|
| 1353 |
+
base_compare = myoff == otoff
|
| 1354 |
+
|
| 1355 |
+
if base_compare:
|
| 1356 |
+
return _cmp((self._hour, self._minute, self._second,
|
| 1357 |
+
self._microsecond),
|
| 1358 |
+
(other._hour, other._minute, other._second,
|
| 1359 |
+
other._microsecond))
|
| 1360 |
+
if myoff is None or otoff is None:
|
| 1361 |
+
if allow_mixed:
|
| 1362 |
+
return 2 # arbitrary non-zero value
|
| 1363 |
+
else:
|
| 1364 |
+
raise TypeError("cannot compare naive and aware times")
|
| 1365 |
+
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
|
| 1366 |
+
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
|
| 1367 |
+
return _cmp((myhhmm, self._second, self._microsecond),
|
| 1368 |
+
(othhmm, other._second, other._microsecond))
|
| 1369 |
+
|
| 1370 |
+
def __hash__(self):
|
| 1371 |
+
"""Hash."""
|
| 1372 |
+
if self._hashcode == -1:
|
| 1373 |
+
if self.fold:
|
| 1374 |
+
t = self.replace(fold=0)
|
| 1375 |
+
else:
|
| 1376 |
+
t = self
|
| 1377 |
+
tzoff = t.utcoffset()
|
| 1378 |
+
if not tzoff: # zero or None
|
| 1379 |
+
self._hashcode = hash(t._getstate()[0])
|
| 1380 |
+
else:
|
| 1381 |
+
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
|
| 1382 |
+
timedelta(hours=1))
|
| 1383 |
+
assert not m % timedelta(minutes=1), "whole minute"
|
| 1384 |
+
m //= timedelta(minutes=1)
|
| 1385 |
+
if 0 <= h < 24:
|
| 1386 |
+
self._hashcode = hash(time(h, m, self.second, self.microsecond))
|
| 1387 |
+
else:
|
| 1388 |
+
self._hashcode = hash((h, m, self.second, self.microsecond))
|
| 1389 |
+
return self._hashcode
|
| 1390 |
+
|
| 1391 |
+
# Conversion to string
|
| 1392 |
+
|
| 1393 |
+
def _tzstr(self):
|
| 1394 |
+
"""Return formatted timezone offset (+xx:xx) or an empty string."""
|
| 1395 |
+
off = self.utcoffset()
|
| 1396 |
+
return _format_offset(off)
|
| 1397 |
+
|
| 1398 |
+
def __repr__(self):
|
| 1399 |
+
"""Convert to formal string, for repr()."""
|
| 1400 |
+
if self._microsecond != 0:
|
| 1401 |
+
s = ", %d, %d" % (self._second, self._microsecond)
|
| 1402 |
+
elif self._second != 0:
|
| 1403 |
+
s = ", %d" % self._second
|
| 1404 |
+
else:
|
| 1405 |
+
s = ""
|
| 1406 |
+
s= "%s.%s(%d, %d%s)" % (self.__class__.__module__,
|
| 1407 |
+
self.__class__.__qualname__,
|
| 1408 |
+
self._hour, self._minute, s)
|
| 1409 |
+
if self._tzinfo is not None:
|
| 1410 |
+
assert s[-1:] == ")"
|
| 1411 |
+
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
|
| 1412 |
+
if self._fold:
|
| 1413 |
+
assert s[-1:] == ")"
|
| 1414 |
+
s = s[:-1] + ", fold=1)"
|
| 1415 |
+
return s
|
| 1416 |
+
|
| 1417 |
+
def isoformat(self, timespec='auto'):
|
| 1418 |
+
"""Return the time formatted according to ISO.
|
| 1419 |
+
|
| 1420 |
+
The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional
|
| 1421 |
+
part is omitted if self.microsecond == 0.
|
| 1422 |
+
|
| 1423 |
+
The optional argument timespec specifies the number of additional
|
| 1424 |
+
terms of the time to include. Valid options are 'auto', 'hours',
|
| 1425 |
+
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
|
| 1426 |
+
"""
|
| 1427 |
+
s = _format_time(self._hour, self._minute, self._second,
|
| 1428 |
+
self._microsecond, timespec)
|
| 1429 |
+
tz = self._tzstr()
|
| 1430 |
+
if tz:
|
| 1431 |
+
s += tz
|
| 1432 |
+
return s
|
| 1433 |
+
|
| 1434 |
+
__str__ = isoformat
|
| 1435 |
+
|
| 1436 |
+
@classmethod
|
| 1437 |
+
def fromisoformat(cls, time_string):
|
| 1438 |
+
"""Construct a time from the output of isoformat()."""
|
| 1439 |
+
if not isinstance(time_string, str):
|
| 1440 |
+
raise TypeError('fromisoformat: argument must be str')
|
| 1441 |
+
|
| 1442 |
+
try:
|
| 1443 |
+
return cls(*_parse_isoformat_time(time_string))
|
| 1444 |
+
except Exception:
|
| 1445 |
+
raise ValueError(f'Invalid isoformat string: {time_string!r}')
|
| 1446 |
+
|
| 1447 |
+
|
| 1448 |
+
def strftime(self, fmt):
|
| 1449 |
+
"""Format using strftime(). The date part of the timestamp passed
|
| 1450 |
+
to underlying strftime should not be used.
|
| 1451 |
+
"""
|
| 1452 |
+
# The year must be >= 1000 else Python's strftime implementation
|
| 1453 |
+
# can raise a bogus exception.
|
| 1454 |
+
timetuple = (1900, 1, 1,
|
| 1455 |
+
self._hour, self._minute, self._second,
|
| 1456 |
+
0, 1, -1)
|
| 1457 |
+
return _wrap_strftime(self, fmt, timetuple)
|
| 1458 |
+
|
| 1459 |
+
def __format__(self, fmt):
|
| 1460 |
+
if not isinstance(fmt, str):
|
| 1461 |
+
raise TypeError("must be str, not %s" % type(fmt).__name__)
|
| 1462 |
+
if len(fmt) != 0:
|
| 1463 |
+
return self.strftime(fmt)
|
| 1464 |
+
return str(self)
|
| 1465 |
+
|
| 1466 |
+
# Timezone functions
|
| 1467 |
+
|
| 1468 |
+
def utcoffset(self):
|
| 1469 |
+
"""Return the timezone offset as timedelta, positive east of UTC
|
| 1470 |
+
(negative west of UTC)."""
|
| 1471 |
+
if self._tzinfo is None:
|
| 1472 |
+
return None
|
| 1473 |
+
offset = self._tzinfo.utcoffset(None)
|
| 1474 |
+
_check_utc_offset("utcoffset", offset)
|
| 1475 |
+
return offset
|
| 1476 |
+
|
| 1477 |
+
def tzname(self):
|
| 1478 |
+
"""Return the timezone name.
|
| 1479 |
+
|
| 1480 |
+
Note that the name is 100% informational -- there's no requirement that
|
| 1481 |
+
it mean anything in particular. For example, "GMT", "UTC", "-500",
|
| 1482 |
+
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
|
| 1483 |
+
"""
|
| 1484 |
+
if self._tzinfo is None:
|
| 1485 |
+
return None
|
| 1486 |
+
name = self._tzinfo.tzname(None)
|
| 1487 |
+
_check_tzname(name)
|
| 1488 |
+
return name
|
| 1489 |
+
|
| 1490 |
+
def dst(self):
|
| 1491 |
+
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
|
| 1492 |
+
positive eastward) if DST is in effect.
|
| 1493 |
+
|
| 1494 |
+
This is purely informational; the DST offset has already been added to
|
| 1495 |
+
the UTC offset returned by utcoffset() if applicable, so there's no
|
| 1496 |
+
need to consult dst() unless you're interested in displaying the DST
|
| 1497 |
+
info.
|
| 1498 |
+
"""
|
| 1499 |
+
if self._tzinfo is None:
|
| 1500 |
+
return None
|
| 1501 |
+
offset = self._tzinfo.dst(None)
|
| 1502 |
+
_check_utc_offset("dst", offset)
|
| 1503 |
+
return offset
|
| 1504 |
+
|
| 1505 |
+
def replace(self, hour=None, minute=None, second=None, microsecond=None,
|
| 1506 |
+
tzinfo=True, *, fold=None):
|
| 1507 |
+
"""Return a new time with new values for the specified fields."""
|
| 1508 |
+
if hour is None:
|
| 1509 |
+
hour = self.hour
|
| 1510 |
+
if minute is None:
|
| 1511 |
+
minute = self.minute
|
| 1512 |
+
if second is None:
|
| 1513 |
+
second = self.second
|
| 1514 |
+
if microsecond is None:
|
| 1515 |
+
microsecond = self.microsecond
|
| 1516 |
+
if tzinfo is True:
|
| 1517 |
+
tzinfo = self.tzinfo
|
| 1518 |
+
if fold is None:
|
| 1519 |
+
fold = self._fold
|
| 1520 |
+
return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold)
|
| 1521 |
+
|
| 1522 |
+
# Pickle support.
|
| 1523 |
+
|
| 1524 |
+
def _getstate(self, protocol=3):
|
| 1525 |
+
us2, us3 = divmod(self._microsecond, 256)
|
| 1526 |
+
us1, us2 = divmod(us2, 256)
|
| 1527 |
+
h = self._hour
|
| 1528 |
+
if self._fold and protocol > 3:
|
| 1529 |
+
h += 128
|
| 1530 |
+
basestate = bytes([h, self._minute, self._second,
|
| 1531 |
+
us1, us2, us3])
|
| 1532 |
+
if self._tzinfo is None:
|
| 1533 |
+
return (basestate,)
|
| 1534 |
+
else:
|
| 1535 |
+
return (basestate, self._tzinfo)
|
| 1536 |
+
|
| 1537 |
+
def __setstate(self, string, tzinfo):
|
| 1538 |
+
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
|
| 1539 |
+
raise TypeError("bad tzinfo state arg")
|
| 1540 |
+
h, self._minute, self._second, us1, us2, us3 = string
|
| 1541 |
+
if h > 127:
|
| 1542 |
+
self._fold = 1
|
| 1543 |
+
self._hour = h - 128
|
| 1544 |
+
else:
|
| 1545 |
+
self._fold = 0
|
| 1546 |
+
self._hour = h
|
| 1547 |
+
self._microsecond = (((us1 << 8) | us2) << 8) | us3
|
| 1548 |
+
self._tzinfo = tzinfo
|
| 1549 |
+
|
| 1550 |
+
def __reduce_ex__(self, protocol):
|
| 1551 |
+
return (self.__class__, self._getstate(protocol))
|
| 1552 |
+
|
| 1553 |
+
def __reduce__(self):
|
| 1554 |
+
return self.__reduce_ex__(2)
|
| 1555 |
+
|
| 1556 |
+
_time_class = time # so functions w/ args named "time" can get at the class
|
| 1557 |
+
|
| 1558 |
+
time.min = time(0, 0, 0)
|
| 1559 |
+
time.max = time(23, 59, 59, 999999)
|
| 1560 |
+
time.resolution = timedelta(microseconds=1)
|
| 1561 |
+
|
| 1562 |
+
|
| 1563 |
+
class datetime(date):
|
| 1564 |
+
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
|
| 1565 |
+
|
| 1566 |
+
The year, month and day arguments are required. tzinfo may be None, or an
|
| 1567 |
+
instance of a tzinfo subclass. The remaining arguments may be ints.
|
| 1568 |
+
"""
|
| 1569 |
+
__slots__ = date.__slots__ + time.__slots__
|
| 1570 |
+
|
| 1571 |
+
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
|
| 1572 |
+
microsecond=0, tzinfo=None, *, fold=0):
|
| 1573 |
+
if (isinstance(year, (bytes, str)) and len(year) == 10 and
|
| 1574 |
+
1 <= ord(year[2:3])&0x7F <= 12):
|
| 1575 |
+
# Pickle support
|
| 1576 |
+
if isinstance(year, str):
|
| 1577 |
+
try:
|
| 1578 |
+
year = bytes(year, 'latin1')
|
| 1579 |
+
except UnicodeEncodeError:
|
| 1580 |
+
# More informative error message.
|
| 1581 |
+
raise ValueError(
|
| 1582 |
+
"Failed to encode latin1 string when unpickling "
|
| 1583 |
+
"a datetime object. "
|
| 1584 |
+
"pickle.load(data, encoding='latin1') is assumed.")
|
| 1585 |
+
self = object.__new__(cls)
|
| 1586 |
+
self.__setstate(year, month)
|
| 1587 |
+
self._hashcode = -1
|
| 1588 |
+
return self
|
| 1589 |
+
year, month, day = _check_date_fields(year, month, day)
|
| 1590 |
+
hour, minute, second, microsecond, fold = _check_time_fields(
|
| 1591 |
+
hour, minute, second, microsecond, fold)
|
| 1592 |
+
_check_tzinfo_arg(tzinfo)
|
| 1593 |
+
self = object.__new__(cls)
|
| 1594 |
+
self._year = year
|
| 1595 |
+
self._month = month
|
| 1596 |
+
self._day = day
|
| 1597 |
+
self._hour = hour
|
| 1598 |
+
self._minute = minute
|
| 1599 |
+
self._second = second
|
| 1600 |
+
self._microsecond = microsecond
|
| 1601 |
+
self._tzinfo = tzinfo
|
| 1602 |
+
self._hashcode = -1
|
| 1603 |
+
self._fold = fold
|
| 1604 |
+
return self
|
| 1605 |
+
|
| 1606 |
+
# Read-only field accessors
|
| 1607 |
+
@property
|
| 1608 |
+
def hour(self):
|
| 1609 |
+
"""hour (0-23)"""
|
| 1610 |
+
return self._hour
|
| 1611 |
+
|
| 1612 |
+
@property
|
| 1613 |
+
def minute(self):
|
| 1614 |
+
"""minute (0-59)"""
|
| 1615 |
+
return self._minute
|
| 1616 |
+
|
| 1617 |
+
@property
|
| 1618 |
+
def second(self):
|
| 1619 |
+
"""second (0-59)"""
|
| 1620 |
+
return self._second
|
| 1621 |
+
|
| 1622 |
+
@property
|
| 1623 |
+
def microsecond(self):
|
| 1624 |
+
"""microsecond (0-999999)"""
|
| 1625 |
+
return self._microsecond
|
| 1626 |
+
|
| 1627 |
+
@property
|
| 1628 |
+
def tzinfo(self):
|
| 1629 |
+
"""timezone info object"""
|
| 1630 |
+
return self._tzinfo
|
| 1631 |
+
|
| 1632 |
+
@property
|
| 1633 |
+
def fold(self):
|
| 1634 |
+
return self._fold
|
| 1635 |
+
|
| 1636 |
+
@classmethod
|
| 1637 |
+
def _fromtimestamp(cls, t, utc, tz):
|
| 1638 |
+
"""Construct a datetime from a POSIX timestamp (like time.time()).
|
| 1639 |
+
|
| 1640 |
+
A timezone info object may be passed in as well.
|
| 1641 |
+
"""
|
| 1642 |
+
frac, t = _math.modf(t)
|
| 1643 |
+
us = round(frac * 1e6)
|
| 1644 |
+
if us >= 1000000:
|
| 1645 |
+
t += 1
|
| 1646 |
+
us -= 1000000
|
| 1647 |
+
elif us < 0:
|
| 1648 |
+
t -= 1
|
| 1649 |
+
us += 1000000
|
| 1650 |
+
|
| 1651 |
+
converter = _time.gmtime if utc else _time.localtime
|
| 1652 |
+
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
|
| 1653 |
+
ss = min(ss, 59) # clamp out leap seconds if the platform has them
|
| 1654 |
+
result = cls(y, m, d, hh, mm, ss, us, tz)
|
| 1655 |
+
if tz is None and not utc:
|
| 1656 |
+
# As of version 2015f max fold in IANA database is
|
| 1657 |
+
# 23 hours at 1969-09-30 13:00:00 in Kwajalein.
|
| 1658 |
+
# Let's probe 24 hours in the past to detect a transition:
|
| 1659 |
+
max_fold_seconds = 24 * 3600
|
| 1660 |
+
|
| 1661 |
+
# On Windows localtime_s throws an OSError for negative values,
|
| 1662 |
+
# thus we can't perform fold detection for values of time less
|
| 1663 |
+
# than the max time fold. See comments in _datetimemodule's
|
| 1664 |
+
# version of this method for more details.
|
| 1665 |
+
if t < max_fold_seconds and sys.platform.startswith("win"):
|
| 1666 |
+
return result
|
| 1667 |
+
|
| 1668 |
+
y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6]
|
| 1669 |
+
probe1 = cls(y, m, d, hh, mm, ss, us, tz)
|
| 1670 |
+
trans = result - probe1 - timedelta(0, max_fold_seconds)
|
| 1671 |
+
if trans.days < 0:
|
| 1672 |
+
y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6]
|
| 1673 |
+
probe2 = cls(y, m, d, hh, mm, ss, us, tz)
|
| 1674 |
+
if probe2 == result:
|
| 1675 |
+
result._fold = 1
|
| 1676 |
+
elif tz is not None:
|
| 1677 |
+
result = tz.fromutc(result)
|
| 1678 |
+
return result
|
| 1679 |
+
|
| 1680 |
+
@classmethod
|
| 1681 |
+
def fromtimestamp(cls, t, tz=None):
|
| 1682 |
+
"""Construct a datetime from a POSIX timestamp (like time.time()).
|
| 1683 |
+
|
| 1684 |
+
A timezone info object may be passed in as well.
|
| 1685 |
+
"""
|
| 1686 |
+
_check_tzinfo_arg(tz)
|
| 1687 |
+
|
| 1688 |
+
return cls._fromtimestamp(t, tz is not None, tz)
|
| 1689 |
+
|
| 1690 |
+
@classmethod
|
| 1691 |
+
def utcfromtimestamp(cls, t):
|
| 1692 |
+
"""Construct a naive UTC datetime from a POSIX timestamp."""
|
| 1693 |
+
return cls._fromtimestamp(t, True, None)
|
| 1694 |
+
|
| 1695 |
+
@classmethod
|
| 1696 |
+
def now(cls, tz=None):
|
| 1697 |
+
"Construct a datetime from time.time() and optional time zone info."
|
| 1698 |
+
t = _time.time()
|
| 1699 |
+
return cls.fromtimestamp(t, tz)
|
| 1700 |
+
|
| 1701 |
+
@classmethod
|
| 1702 |
+
def utcnow(cls):
|
| 1703 |
+
"Construct a UTC datetime from time.time()."
|
| 1704 |
+
t = _time.time()
|
| 1705 |
+
return cls.utcfromtimestamp(t)
|
| 1706 |
+
|
| 1707 |
+
@classmethod
|
| 1708 |
+
def combine(cls, date, time, tzinfo=True):
|
| 1709 |
+
"Construct a datetime from a given date and a given time."
|
| 1710 |
+
if not isinstance(date, _date_class):
|
| 1711 |
+
raise TypeError("date argument must be a date instance")
|
| 1712 |
+
if not isinstance(time, _time_class):
|
| 1713 |
+
raise TypeError("time argument must be a time instance")
|
| 1714 |
+
if tzinfo is True:
|
| 1715 |
+
tzinfo = time.tzinfo
|
| 1716 |
+
return cls(date.year, date.month, date.day,
|
| 1717 |
+
time.hour, time.minute, time.second, time.microsecond,
|
| 1718 |
+
tzinfo, fold=time.fold)
|
| 1719 |
+
|
| 1720 |
+
@classmethod
|
| 1721 |
+
def fromisoformat(cls, date_string):
|
| 1722 |
+
"""Construct a datetime from the output of datetime.isoformat()."""
|
| 1723 |
+
if not isinstance(date_string, str):
|
| 1724 |
+
raise TypeError('fromisoformat: argument must be str')
|
| 1725 |
+
|
| 1726 |
+
# Split this at the separator
|
| 1727 |
+
dstr = date_string[0:10]
|
| 1728 |
+
tstr = date_string[11:]
|
| 1729 |
+
|
| 1730 |
+
try:
|
| 1731 |
+
date_components = _parse_isoformat_date(dstr)
|
| 1732 |
+
except ValueError:
|
| 1733 |
+
raise ValueError(f'Invalid isoformat string: {date_string!r}')
|
| 1734 |
+
|
| 1735 |
+
if tstr:
|
| 1736 |
+
try:
|
| 1737 |
+
time_components = _parse_isoformat_time(tstr)
|
| 1738 |
+
except ValueError:
|
| 1739 |
+
raise ValueError(f'Invalid isoformat string: {date_string!r}')
|
| 1740 |
+
else:
|
| 1741 |
+
time_components = [0, 0, 0, 0, None]
|
| 1742 |
+
|
| 1743 |
+
return cls(*(date_components + time_components))
|
| 1744 |
+
|
| 1745 |
+
def timetuple(self):
|
| 1746 |
+
"Return local time tuple compatible with time.localtime()."
|
| 1747 |
+
dst = self.dst()
|
| 1748 |
+
if dst is None:
|
| 1749 |
+
dst = -1
|
| 1750 |
+
elif dst:
|
| 1751 |
+
dst = 1
|
| 1752 |
+
else:
|
| 1753 |
+
dst = 0
|
| 1754 |
+
return _build_struct_time(self.year, self.month, self.day,
|
| 1755 |
+
self.hour, self.minute, self.second,
|
| 1756 |
+
dst)
|
| 1757 |
+
|
| 1758 |
+
def _mktime(self):
|
| 1759 |
+
"""Return integer POSIX timestamp."""
|
| 1760 |
+
epoch = datetime(1970, 1, 1)
|
| 1761 |
+
max_fold_seconds = 24 * 3600
|
| 1762 |
+
t = (self - epoch) // timedelta(0, 1)
|
| 1763 |
+
def local(u):
|
| 1764 |
+
y, m, d, hh, mm, ss = _time.localtime(u)[:6]
|
| 1765 |
+
return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)
|
| 1766 |
+
|
| 1767 |
+
# Our goal is to solve t = local(u) for u.
|
| 1768 |
+
a = local(t) - t
|
| 1769 |
+
u1 = t - a
|
| 1770 |
+
t1 = local(u1)
|
| 1771 |
+
if t1 == t:
|
| 1772 |
+
# We found one solution, but it may not be the one we need.
|
| 1773 |
+
# Look for an earlier solution (if `fold` is 0), or a
|
| 1774 |
+
# later one (if `fold` is 1).
|
| 1775 |
+
u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]
|
| 1776 |
+
b = local(u2) - u2
|
| 1777 |
+
if a == b:
|
| 1778 |
+
return u1
|
| 1779 |
+
else:
|
| 1780 |
+
b = t1 - u1
|
| 1781 |
+
assert a != b
|
| 1782 |
+
u2 = t - b
|
| 1783 |
+
t2 = local(u2)
|
| 1784 |
+
if t2 == t:
|
| 1785 |
+
return u2
|
| 1786 |
+
if t1 == t:
|
| 1787 |
+
return u1
|
| 1788 |
+
# We have found both offsets a and b, but neither t - a nor t - b is
|
| 1789 |
+
# a solution. This means t is in the gap.
|
| 1790 |
+
return (max, min)[self.fold](u1, u2)
|
| 1791 |
+
|
| 1792 |
+
|
| 1793 |
+
def timestamp(self):
|
| 1794 |
+
"Return POSIX timestamp as float"
|
| 1795 |
+
if self._tzinfo is None:
|
| 1796 |
+
s = self._mktime()
|
| 1797 |
+
return s + self.microsecond / 1e6
|
| 1798 |
+
else:
|
| 1799 |
+
return (self - _EPOCH).total_seconds()
|
| 1800 |
+
|
| 1801 |
+
def utctimetuple(self):
|
| 1802 |
+
"Return UTC time tuple compatible with time.gmtime()."
|
| 1803 |
+
offset = self.utcoffset()
|
| 1804 |
+
if offset:
|
| 1805 |
+
self -= offset
|
| 1806 |
+
y, m, d = self.year, self.month, self.day
|
| 1807 |
+
hh, mm, ss = self.hour, self.minute, self.second
|
| 1808 |
+
return _build_struct_time(y, m, d, hh, mm, ss, 0)
|
| 1809 |
+
|
| 1810 |
+
def date(self):
|
| 1811 |
+
"Return the date part."
|
| 1812 |
+
return date(self._year, self._month, self._day)
|
| 1813 |
+
|
| 1814 |
+
def time(self):
|
| 1815 |
+
"Return the time part, with tzinfo None."
|
| 1816 |
+
return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold)
|
| 1817 |
+
|
| 1818 |
+
def timetz(self):
|
| 1819 |
+
"Return the time part, with same tzinfo."
|
| 1820 |
+
return time(self.hour, self.minute, self.second, self.microsecond,
|
| 1821 |
+
self._tzinfo, fold=self.fold)
|
| 1822 |
+
|
| 1823 |
+
def replace(self, year=None, month=None, day=None, hour=None,
|
| 1824 |
+
minute=None, second=None, microsecond=None, tzinfo=True,
|
| 1825 |
+
*, fold=None):
|
| 1826 |
+
"""Return a new datetime with new values for the specified fields."""
|
| 1827 |
+
if year is None:
|
| 1828 |
+
year = self.year
|
| 1829 |
+
if month is None:
|
| 1830 |
+
month = self.month
|
| 1831 |
+
if day is None:
|
| 1832 |
+
day = self.day
|
| 1833 |
+
if hour is None:
|
| 1834 |
+
hour = self.hour
|
| 1835 |
+
if minute is None:
|
| 1836 |
+
minute = self.minute
|
| 1837 |
+
if second is None:
|
| 1838 |
+
second = self.second
|
| 1839 |
+
if microsecond is None:
|
| 1840 |
+
microsecond = self.microsecond
|
| 1841 |
+
if tzinfo is True:
|
| 1842 |
+
tzinfo = self.tzinfo
|
| 1843 |
+
if fold is None:
|
| 1844 |
+
fold = self.fold
|
| 1845 |
+
return type(self)(year, month, day, hour, minute, second,
|
| 1846 |
+
microsecond, tzinfo, fold=fold)
|
| 1847 |
+
|
| 1848 |
+
def _local_timezone(self):
|
| 1849 |
+
if self.tzinfo is None:
|
| 1850 |
+
ts = self._mktime()
|
| 1851 |
+
else:
|
| 1852 |
+
ts = (self - _EPOCH) // timedelta(seconds=1)
|
| 1853 |
+
localtm = _time.localtime(ts)
|
| 1854 |
+
local = datetime(*localtm[:6])
|
| 1855 |
+
# Extract TZ data
|
| 1856 |
+
gmtoff = localtm.tm_gmtoff
|
| 1857 |
+
zone = localtm.tm_zone
|
| 1858 |
+
return timezone(timedelta(seconds=gmtoff), zone)
|
| 1859 |
+
|
| 1860 |
+
def astimezone(self, tz=None):
|
| 1861 |
+
if tz is None:
|
| 1862 |
+
tz = self._local_timezone()
|
| 1863 |
+
elif not isinstance(tz, tzinfo):
|
| 1864 |
+
raise TypeError("tz argument must be an instance of tzinfo")
|
| 1865 |
+
|
| 1866 |
+
mytz = self.tzinfo
|
| 1867 |
+
if mytz is None:
|
| 1868 |
+
mytz = self._local_timezone()
|
| 1869 |
+
myoffset = mytz.utcoffset(self)
|
| 1870 |
+
else:
|
| 1871 |
+
myoffset = mytz.utcoffset(self)
|
| 1872 |
+
if myoffset is None:
|
| 1873 |
+
mytz = self.replace(tzinfo=None)._local_timezone()
|
| 1874 |
+
myoffset = mytz.utcoffset(self)
|
| 1875 |
+
|
| 1876 |
+
if tz is mytz:
|
| 1877 |
+
return self
|
| 1878 |
+
|
| 1879 |
+
# Convert self to UTC, and attach the new time zone object.
|
| 1880 |
+
utc = (self - myoffset).replace(tzinfo=tz)
|
| 1881 |
+
|
| 1882 |
+
# Convert from UTC to tz's local time.
|
| 1883 |
+
return tz.fromutc(utc)
|
| 1884 |
+
|
| 1885 |
+
# Ways to produce a string.
|
| 1886 |
+
|
| 1887 |
+
def ctime(self):
|
| 1888 |
+
"Return ctime() style string."
|
| 1889 |
+
weekday = self.toordinal() % 7 or 7
|
| 1890 |
+
return "%s %s %2d %02d:%02d:%02d %04d" % (
|
| 1891 |
+
_DAYNAMES[weekday],
|
| 1892 |
+
_MONTHNAMES[self._month],
|
| 1893 |
+
self._day,
|
| 1894 |
+
self._hour, self._minute, self._second,
|
| 1895 |
+
self._year)
|
| 1896 |
+
|
| 1897 |
+
def isoformat(self, sep='T', timespec='auto'):
|
| 1898 |
+
"""Return the time formatted according to ISO.
|
| 1899 |
+
|
| 1900 |
+
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
|
| 1901 |
+
By default, the fractional part is omitted if self.microsecond == 0.
|
| 1902 |
+
|
| 1903 |
+
If self.tzinfo is not None, the UTC offset is also attached, giving
|
| 1904 |
+
giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
|
| 1905 |
+
|
| 1906 |
+
Optional argument sep specifies the separator between date and
|
| 1907 |
+
time, default 'T'.
|
| 1908 |
+
|
| 1909 |
+
The optional argument timespec specifies the number of additional
|
| 1910 |
+
terms of the time to include. Valid options are 'auto', 'hours',
|
| 1911 |
+
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
|
| 1912 |
+
"""
|
| 1913 |
+
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
|
| 1914 |
+
_format_time(self._hour, self._minute, self._second,
|
| 1915 |
+
self._microsecond, timespec))
|
| 1916 |
+
|
| 1917 |
+
off = self.utcoffset()
|
| 1918 |
+
tz = _format_offset(off)
|
| 1919 |
+
if tz:
|
| 1920 |
+
s += tz
|
| 1921 |
+
|
| 1922 |
+
return s
|
| 1923 |
+
|
| 1924 |
+
def __repr__(self):
|
| 1925 |
+
"""Convert to formal string, for repr()."""
|
| 1926 |
+
L = [self._year, self._month, self._day, # These are never zero
|
| 1927 |
+
self._hour, self._minute, self._second, self._microsecond]
|
| 1928 |
+
if L[-1] == 0:
|
| 1929 |
+
del L[-1]
|
| 1930 |
+
if L[-1] == 0:
|
| 1931 |
+
del L[-1]
|
| 1932 |
+
s = "%s.%s(%s)" % (self.__class__.__module__,
|
| 1933 |
+
self.__class__.__qualname__,
|
| 1934 |
+
", ".join(map(str, L)))
|
| 1935 |
+
if self._tzinfo is not None:
|
| 1936 |
+
assert s[-1:] == ")"
|
| 1937 |
+
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
|
| 1938 |
+
if self._fold:
|
| 1939 |
+
assert s[-1:] == ")"
|
| 1940 |
+
s = s[:-1] + ", fold=1)"
|
| 1941 |
+
return s
|
| 1942 |
+
|
| 1943 |
+
def __str__(self):
|
| 1944 |
+
"Convert to string, for str()."
|
| 1945 |
+
return self.isoformat(sep=' ')
|
| 1946 |
+
|
| 1947 |
+
@classmethod
|
| 1948 |
+
def strptime(cls, date_string, format):
|
| 1949 |
+
'string, format -> new datetime parsed from a string (like time.strptime()).'
|
| 1950 |
+
import _strptime
|
| 1951 |
+
return _strptime._strptime_datetime(cls, date_string, format)
|
| 1952 |
+
|
| 1953 |
+
def utcoffset(self):
|
| 1954 |
+
"""Return the timezone offset as timedelta positive east of UTC (negative west of
|
| 1955 |
+
UTC)."""
|
| 1956 |
+
if self._tzinfo is None:
|
| 1957 |
+
return None
|
| 1958 |
+
offset = self._tzinfo.utcoffset(self)
|
| 1959 |
+
_check_utc_offset("utcoffset", offset)
|
| 1960 |
+
return offset
|
| 1961 |
+
|
| 1962 |
+
def tzname(self):
|
| 1963 |
+
"""Return the timezone name.
|
| 1964 |
+
|
| 1965 |
+
Note that the name is 100% informational -- there's no requirement that
|
| 1966 |
+
it mean anything in particular. For example, "GMT", "UTC", "-500",
|
| 1967 |
+
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
|
| 1968 |
+
"""
|
| 1969 |
+
if self._tzinfo is None:
|
| 1970 |
+
return None
|
| 1971 |
+
name = self._tzinfo.tzname(self)
|
| 1972 |
+
_check_tzname(name)
|
| 1973 |
+
return name
|
| 1974 |
+
|
| 1975 |
+
def dst(self):
|
| 1976 |
+
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
|
| 1977 |
+
positive eastward) if DST is in effect.
|
| 1978 |
+
|
| 1979 |
+
This is purely informational; the DST offset has already been added to
|
| 1980 |
+
the UTC offset returned by utcoffset() if applicable, so there's no
|
| 1981 |
+
need to consult dst() unless you're interested in displaying the DST
|
| 1982 |
+
info.
|
| 1983 |
+
"""
|
| 1984 |
+
if self._tzinfo is None:
|
| 1985 |
+
return None
|
| 1986 |
+
offset = self._tzinfo.dst(self)
|
| 1987 |
+
_check_utc_offset("dst", offset)
|
| 1988 |
+
return offset
|
| 1989 |
+
|
| 1990 |
+
# Comparisons of datetime objects with other.
|
| 1991 |
+
|
| 1992 |
+
def __eq__(self, other):
|
| 1993 |
+
if isinstance(other, datetime):
|
| 1994 |
+
return self._cmp(other, allow_mixed=True) == 0
|
| 1995 |
+
elif not isinstance(other, date):
|
| 1996 |
+
return NotImplemented
|
| 1997 |
+
else:
|
| 1998 |
+
return False
|
| 1999 |
+
|
| 2000 |
+
def __le__(self, other):
|
| 2001 |
+
if isinstance(other, datetime):
|
| 2002 |
+
return self._cmp(other) <= 0
|
| 2003 |
+
elif not isinstance(other, date):
|
| 2004 |
+
return NotImplemented
|
| 2005 |
+
else:
|
| 2006 |
+
_cmperror(self, other)
|
| 2007 |
+
|
| 2008 |
+
def __lt__(self, other):
|
| 2009 |
+
if isinstance(other, datetime):
|
| 2010 |
+
return self._cmp(other) < 0
|
| 2011 |
+
elif not isinstance(other, date):
|
| 2012 |
+
return NotImplemented
|
| 2013 |
+
else:
|
| 2014 |
+
_cmperror(self, other)
|
| 2015 |
+
|
| 2016 |
+
def __ge__(self, other):
|
| 2017 |
+
if isinstance(other, datetime):
|
| 2018 |
+
return self._cmp(other) >= 0
|
| 2019 |
+
elif not isinstance(other, date):
|
| 2020 |
+
return NotImplemented
|
| 2021 |
+
else:
|
| 2022 |
+
_cmperror(self, other)
|
| 2023 |
+
|
| 2024 |
+
def __gt__(self, other):
|
| 2025 |
+
if isinstance(other, datetime):
|
| 2026 |
+
return self._cmp(other) > 0
|
| 2027 |
+
elif not isinstance(other, date):
|
| 2028 |
+
return NotImplemented
|
| 2029 |
+
else:
|
| 2030 |
+
_cmperror(self, other)
|
| 2031 |
+
|
| 2032 |
+
def _cmp(self, other, allow_mixed=False):
|
| 2033 |
+
assert isinstance(other, datetime)
|
| 2034 |
+
mytz = self._tzinfo
|
| 2035 |
+
ottz = other._tzinfo
|
| 2036 |
+
myoff = otoff = None
|
| 2037 |
+
|
| 2038 |
+
if mytz is ottz:
|
| 2039 |
+
base_compare = True
|
| 2040 |
+
else:
|
| 2041 |
+
myoff = self.utcoffset()
|
| 2042 |
+
otoff = other.utcoffset()
|
| 2043 |
+
# Assume that allow_mixed means that we are called from __eq__
|
| 2044 |
+
if allow_mixed:
|
| 2045 |
+
if myoff != self.replace(fold=not self.fold).utcoffset():
|
| 2046 |
+
return 2
|
| 2047 |
+
if otoff != other.replace(fold=not other.fold).utcoffset():
|
| 2048 |
+
return 2
|
| 2049 |
+
base_compare = myoff == otoff
|
| 2050 |
+
|
| 2051 |
+
if base_compare:
|
| 2052 |
+
return _cmp((self._year, self._month, self._day,
|
| 2053 |
+
self._hour, self._minute, self._second,
|
| 2054 |
+
self._microsecond),
|
| 2055 |
+
(other._year, other._month, other._day,
|
| 2056 |
+
other._hour, other._minute, other._second,
|
| 2057 |
+
other._microsecond))
|
| 2058 |
+
if myoff is None or otoff is None:
|
| 2059 |
+
if allow_mixed:
|
| 2060 |
+
return 2 # arbitrary non-zero value
|
| 2061 |
+
else:
|
| 2062 |
+
raise TypeError("cannot compare naive and aware datetimes")
|
| 2063 |
+
# XXX What follows could be done more efficiently...
|
| 2064 |
+
diff = self - other # this will take offsets into account
|
| 2065 |
+
if diff.days < 0:
|
| 2066 |
+
return -1
|
| 2067 |
+
return diff and 1 or 0
|
| 2068 |
+
|
| 2069 |
+
def __add__(self, other):
|
| 2070 |
+
"Add a datetime and a timedelta."
|
| 2071 |
+
if not isinstance(other, timedelta):
|
| 2072 |
+
return NotImplemented
|
| 2073 |
+
delta = timedelta(self.toordinal(),
|
| 2074 |
+
hours=self._hour,
|
| 2075 |
+
minutes=self._minute,
|
| 2076 |
+
seconds=self._second,
|
| 2077 |
+
microseconds=self._microsecond)
|
| 2078 |
+
delta += other
|
| 2079 |
+
hour, rem = divmod(delta.seconds, 3600)
|
| 2080 |
+
minute, second = divmod(rem, 60)
|
| 2081 |
+
if 0 < delta.days <= _MAXORDINAL:
|
| 2082 |
+
return type(self).combine(date.fromordinal(delta.days),
|
| 2083 |
+
time(hour, minute, second,
|
| 2084 |
+
delta.microseconds,
|
| 2085 |
+
tzinfo=self._tzinfo))
|
| 2086 |
+
raise OverflowError("result out of range")
|
| 2087 |
+
|
| 2088 |
+
__radd__ = __add__
|
| 2089 |
+
|
| 2090 |
+
def __sub__(self, other):
|
| 2091 |
+
"Subtract two datetimes, or a datetime and a timedelta."
|
| 2092 |
+
if not isinstance(other, datetime):
|
| 2093 |
+
if isinstance(other, timedelta):
|
| 2094 |
+
return self + -other
|
| 2095 |
+
return NotImplemented
|
| 2096 |
+
|
| 2097 |
+
days1 = self.toordinal()
|
| 2098 |
+
days2 = other.toordinal()
|
| 2099 |
+
secs1 = self._second + self._minute * 60 + self._hour * 3600
|
| 2100 |
+
secs2 = other._second + other._minute * 60 + other._hour * 3600
|
| 2101 |
+
base = timedelta(days1 - days2,
|
| 2102 |
+
secs1 - secs2,
|
| 2103 |
+
self._microsecond - other._microsecond)
|
| 2104 |
+
if self._tzinfo is other._tzinfo:
|
| 2105 |
+
return base
|
| 2106 |
+
myoff = self.utcoffset()
|
| 2107 |
+
otoff = other.utcoffset()
|
| 2108 |
+
if myoff == otoff:
|
| 2109 |
+
return base
|
| 2110 |
+
if myoff is None or otoff is None:
|
| 2111 |
+
raise TypeError("cannot mix naive and timezone-aware time")
|
| 2112 |
+
return base + otoff - myoff
|
| 2113 |
+
|
| 2114 |
+
def __hash__(self):
|
| 2115 |
+
if self._hashcode == -1:
|
| 2116 |
+
if self.fold:
|
| 2117 |
+
t = self.replace(fold=0)
|
| 2118 |
+
else:
|
| 2119 |
+
t = self
|
| 2120 |
+
tzoff = t.utcoffset()
|
| 2121 |
+
if tzoff is None:
|
| 2122 |
+
self._hashcode = hash(t._getstate()[0])
|
| 2123 |
+
else:
|
| 2124 |
+
days = _ymd2ord(self.year, self.month, self.day)
|
| 2125 |
+
seconds = self.hour * 3600 + self.minute * 60 + self.second
|
| 2126 |
+
self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
|
| 2127 |
+
return self._hashcode
|
| 2128 |
+
|
| 2129 |
+
# Pickle support.
|
| 2130 |
+
|
| 2131 |
+
def _getstate(self, protocol=3):
|
| 2132 |
+
yhi, ylo = divmod(self._year, 256)
|
| 2133 |
+
us2, us3 = divmod(self._microsecond, 256)
|
| 2134 |
+
us1, us2 = divmod(us2, 256)
|
| 2135 |
+
m = self._month
|
| 2136 |
+
if self._fold and protocol > 3:
|
| 2137 |
+
m += 128
|
| 2138 |
+
basestate = bytes([yhi, ylo, m, self._day,
|
| 2139 |
+
self._hour, self._minute, self._second,
|
| 2140 |
+
us1, us2, us3])
|
| 2141 |
+
if self._tzinfo is None:
|
| 2142 |
+
return (basestate,)
|
| 2143 |
+
else:
|
| 2144 |
+
return (basestate, self._tzinfo)
|
| 2145 |
+
|
| 2146 |
+
def __setstate(self, string, tzinfo):
|
| 2147 |
+
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
|
| 2148 |
+
raise TypeError("bad tzinfo state arg")
|
| 2149 |
+
(yhi, ylo, m, self._day, self._hour,
|
| 2150 |
+
self._minute, self._second, us1, us2, us3) = string
|
| 2151 |
+
if m > 127:
|
| 2152 |
+
self._fold = 1
|
| 2153 |
+
self._month = m - 128
|
| 2154 |
+
else:
|
| 2155 |
+
self._fold = 0
|
| 2156 |
+
self._month = m
|
| 2157 |
+
self._year = yhi * 256 + ylo
|
| 2158 |
+
self._microsecond = (((us1 << 8) | us2) << 8) | us3
|
| 2159 |
+
self._tzinfo = tzinfo
|
| 2160 |
+
|
| 2161 |
+
def __reduce_ex__(self, protocol):
|
| 2162 |
+
return (self.__class__, self._getstate(protocol))
|
| 2163 |
+
|
| 2164 |
+
def __reduce__(self):
|
| 2165 |
+
return self.__reduce_ex__(2)
|
| 2166 |
+
|
| 2167 |
+
|
| 2168 |
+
datetime.min = datetime(1, 1, 1)
|
| 2169 |
+
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
|
| 2170 |
+
datetime.resolution = timedelta(microseconds=1)
|
| 2171 |
+
|
| 2172 |
+
|
| 2173 |
+
def _isoweek1monday(year):
|
| 2174 |
+
# Helper to calculate the day number of the Monday starting week 1
|
| 2175 |
+
# XXX This could be done more efficiently
|
| 2176 |
+
THURSDAY = 3
|
| 2177 |
+
firstday = _ymd2ord(year, 1, 1)
|
| 2178 |
+
firstweekday = (firstday + 6) % 7 # See weekday() above
|
| 2179 |
+
week1monday = firstday - firstweekday
|
| 2180 |
+
if firstweekday > THURSDAY:
|
| 2181 |
+
week1monday += 7
|
| 2182 |
+
return week1monday
|
| 2183 |
+
|
| 2184 |
+
|
| 2185 |
+
class timezone(tzinfo):
|
| 2186 |
+
__slots__ = '_offset', '_name'
|
| 2187 |
+
|
| 2188 |
+
# Sentinel value to disallow None
|
| 2189 |
+
_Omitted = object()
|
| 2190 |
+
def __new__(cls, offset, name=_Omitted):
|
| 2191 |
+
if not isinstance(offset, timedelta):
|
| 2192 |
+
raise TypeError("offset must be a timedelta")
|
| 2193 |
+
if name is cls._Omitted:
|
| 2194 |
+
if not offset:
|
| 2195 |
+
return cls.utc
|
| 2196 |
+
name = None
|
| 2197 |
+
elif not isinstance(name, str):
|
| 2198 |
+
raise TypeError("name must be a string")
|
| 2199 |
+
if not cls._minoffset <= offset <= cls._maxoffset:
|
| 2200 |
+
raise ValueError("offset must be a timedelta "
|
| 2201 |
+
"strictly between -timedelta(hours=24) and "
|
| 2202 |
+
"timedelta(hours=24).")
|
| 2203 |
+
return cls._create(offset, name)
|
| 2204 |
+
|
| 2205 |
+
@classmethod
|
| 2206 |
+
def _create(cls, offset, name=None):
|
| 2207 |
+
self = tzinfo.__new__(cls)
|
| 2208 |
+
self._offset = offset
|
| 2209 |
+
self._name = name
|
| 2210 |
+
return self
|
| 2211 |
+
|
| 2212 |
+
def __getinitargs__(self):
|
| 2213 |
+
"""pickle support"""
|
| 2214 |
+
if self._name is None:
|
| 2215 |
+
return (self._offset,)
|
| 2216 |
+
return (self._offset, self._name)
|
| 2217 |
+
|
| 2218 |
+
def __eq__(self, other):
|
| 2219 |
+
if isinstance(other, timezone):
|
| 2220 |
+
return self._offset == other._offset
|
| 2221 |
+
return NotImplemented
|
| 2222 |
+
|
| 2223 |
+
def __hash__(self):
|
| 2224 |
+
return hash(self._offset)
|
| 2225 |
+
|
| 2226 |
+
def __repr__(self):
|
| 2227 |
+
"""Convert to formal string, for repr().
|
| 2228 |
+
|
| 2229 |
+
>>> tz = timezone.utc
|
| 2230 |
+
>>> repr(tz)
|
| 2231 |
+
'datetime.timezone.utc'
|
| 2232 |
+
>>> tz = timezone(timedelta(hours=-5), 'EST')
|
| 2233 |
+
>>> repr(tz)
|
| 2234 |
+
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
|
| 2235 |
+
"""
|
| 2236 |
+
if self is self.utc:
|
| 2237 |
+
return 'datetime.timezone.utc'
|
| 2238 |
+
if self._name is None:
|
| 2239 |
+
return "%s.%s(%r)" % (self.__class__.__module__,
|
| 2240 |
+
self.__class__.__qualname__,
|
| 2241 |
+
self._offset)
|
| 2242 |
+
return "%s.%s(%r, %r)" % (self.__class__.__module__,
|
| 2243 |
+
self.__class__.__qualname__,
|
| 2244 |
+
self._offset, self._name)
|
| 2245 |
+
|
| 2246 |
+
def __str__(self):
|
| 2247 |
+
return self.tzname(None)
|
| 2248 |
+
|
| 2249 |
+
def utcoffset(self, dt):
|
| 2250 |
+
if isinstance(dt, datetime) or dt is None:
|
| 2251 |
+
return self._offset
|
| 2252 |
+
raise TypeError("utcoffset() argument must be a datetime instance"
|
| 2253 |
+
" or None")
|
| 2254 |
+
|
| 2255 |
+
def tzname(self, dt):
|
| 2256 |
+
if isinstance(dt, datetime) or dt is None:
|
| 2257 |
+
if self._name is None:
|
| 2258 |
+
return self._name_from_offset(self._offset)
|
| 2259 |
+
return self._name
|
| 2260 |
+
raise TypeError("tzname() argument must be a datetime instance"
|
| 2261 |
+
" or None")
|
| 2262 |
+
|
| 2263 |
+
def dst(self, dt):
|
| 2264 |
+
if isinstance(dt, datetime) or dt is None:
|
| 2265 |
+
return None
|
| 2266 |
+
raise TypeError("dst() argument must be a datetime instance"
|
| 2267 |
+
" or None")
|
| 2268 |
+
|
| 2269 |
+
def fromutc(self, dt):
|
| 2270 |
+
if isinstance(dt, datetime):
|
| 2271 |
+
if dt.tzinfo is not self:
|
| 2272 |
+
raise ValueError("fromutc: dt.tzinfo "
|
| 2273 |
+
"is not self")
|
| 2274 |
+
return dt + self._offset
|
| 2275 |
+
raise TypeError("fromutc() argument must be a datetime instance"
|
| 2276 |
+
" or None")
|
| 2277 |
+
|
| 2278 |
+
_maxoffset = timedelta(hours=24, microseconds=-1)
|
| 2279 |
+
_minoffset = -_maxoffset
|
| 2280 |
+
|
| 2281 |
+
@staticmethod
|
| 2282 |
+
def _name_from_offset(delta):
|
| 2283 |
+
if not delta:
|
| 2284 |
+
return 'UTC'
|
| 2285 |
+
if delta < timedelta(0):
|
| 2286 |
+
sign = '-'
|
| 2287 |
+
delta = -delta
|
| 2288 |
+
else:
|
| 2289 |
+
sign = '+'
|
| 2290 |
+
hours, rest = divmod(delta, timedelta(hours=1))
|
| 2291 |
+
minutes, rest = divmod(rest, timedelta(minutes=1))
|
| 2292 |
+
seconds = rest.seconds
|
| 2293 |
+
microseconds = rest.microseconds
|
| 2294 |
+
if microseconds:
|
| 2295 |
+
return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
|
| 2296 |
+
f'.{microseconds:06d}')
|
| 2297 |
+
if seconds:
|
| 2298 |
+
return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
|
| 2299 |
+
return f'UTC{sign}{hours:02d}:{minutes:02d}'
|
| 2300 |
+
|
| 2301 |
+
timezone.utc = timezone._create(timedelta(0))
|
| 2302 |
+
# bpo-37642: These attributes are rounded to the nearest minute for backwards
|
| 2303 |
+
# compatibility, even though the constructor will accept a wider range of
|
| 2304 |
+
# values. This may change in the future.
|
| 2305 |
+
timezone.min = timezone._create(-timedelta(hours=23, minutes=59))
|
| 2306 |
+
timezone.max = timezone._create(timedelta(hours=23, minutes=59))
|
| 2307 |
+
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
|
| 2308 |
+
|
| 2309 |
+
# Some time zone algebra. For a datetime x, let
|
| 2310 |
+
# x.n = x stripped of its timezone -- its naive time.
|
| 2311 |
+
# x.o = x.utcoffset(), and assuming that doesn't raise an exception or
|
| 2312 |
+
# return None
|
| 2313 |
+
# x.d = x.dst(), and assuming that doesn't raise an exception or
|
| 2314 |
+
# return None
|
| 2315 |
+
# x.s = x's standard offset, x.o - x.d
|
| 2316 |
+
#
|
| 2317 |
+
# Now some derived rules, where k is a duration (timedelta).
|
| 2318 |
+
#
|
| 2319 |
+
# 1. x.o = x.s + x.d
|
| 2320 |
+
# This follows from the definition of x.s.
|
| 2321 |
+
#
|
| 2322 |
+
# 2. If x and y have the same tzinfo member, x.s = y.s.
|
| 2323 |
+
# This is actually a requirement, an assumption we need to make about
|
| 2324 |
+
# sane tzinfo classes.
|
| 2325 |
+
#
|
| 2326 |
+
# 3. The naive UTC time corresponding to x is x.n - x.o.
|
| 2327 |
+
# This is again a requirement for a sane tzinfo class.
|
| 2328 |
+
#
|
| 2329 |
+
# 4. (x+k).s = x.s
|
| 2330 |
+
# This follows from #2, and that datetime.timetz+timedelta preserves tzinfo.
|
| 2331 |
+
#
|
| 2332 |
+
# 5. (x+k).n = x.n + k
|
| 2333 |
+
# Again follows from how arithmetic is defined.
|
| 2334 |
+
#
|
| 2335 |
+
# Now we can explain tz.fromutc(x). Let's assume it's an interesting case
|
| 2336 |
+
# (meaning that the various tzinfo methods exist, and don't blow up or return
|
| 2337 |
+
# None when called).
|
| 2338 |
+
#
|
| 2339 |
+
# The function wants to return a datetime y with timezone tz, equivalent to x.
|
| 2340 |
+
# x is already in UTC.
|
| 2341 |
+
#
|
| 2342 |
+
# By #3, we want
|
| 2343 |
+
#
|
| 2344 |
+
# y.n - y.o = x.n [1]
|
| 2345 |
+
#
|
| 2346 |
+
# The algorithm starts by attaching tz to x.n, and calling that y. So
|
| 2347 |
+
# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
|
| 2348 |
+
# becomes true; in effect, we want to solve [2] for k:
|
| 2349 |
+
#
|
| 2350 |
+
# (y+k).n - (y+k).o = x.n [2]
|
| 2351 |
+
#
|
| 2352 |
+
# By #1, this is the same as
|
| 2353 |
+
#
|
| 2354 |
+
# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]
|
| 2355 |
+
#
|
| 2356 |
+
# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
|
| 2357 |
+
# Substituting that into [3],
|
| 2358 |
+
#
|
| 2359 |
+
# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
|
| 2360 |
+
# k - (y+k).s - (y+k).d = 0; rearranging,
|
| 2361 |
+
# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
|
| 2362 |
+
# k = y.s - (y+k).d
|
| 2363 |
+
#
|
| 2364 |
+
# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
|
| 2365 |
+
# approximate k by ignoring the (y+k).d term at first. Note that k can't be
|
| 2366 |
+
# very large, since all offset-returning methods return a duration of magnitude
|
| 2367 |
+
# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
|
| 2368 |
+
# be 0, so ignoring it has no consequence then.
|
| 2369 |
+
#
|
| 2370 |
+
# In any case, the new value is
|
| 2371 |
+
#
|
| 2372 |
+
# z = y + y.s [4]
|
| 2373 |
+
#
|
| 2374 |
+
# It's helpful to step back at look at [4] from a higher level: it's simply
|
| 2375 |
+
# mapping from UTC to tz's standard time.
|
| 2376 |
+
#
|
| 2377 |
+
# At this point, if
|
| 2378 |
+
#
|
| 2379 |
+
# z.n - z.o = x.n [5]
|
| 2380 |
+
#
|
| 2381 |
+
# we have an equivalent time, and are almost done. The insecurity here is
|
| 2382 |
+
# at the start of daylight time. Picture US Eastern for concreteness. The wall
|
| 2383 |
+
# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
|
| 2384 |
+
# sense then. The docs ask that an Eastern tzinfo class consider such a time to
|
| 2385 |
+
# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
|
| 2386 |
+
# on the day DST starts. We want to return the 1:MM EST spelling because that's
|
| 2387 |
+
# the only spelling that makes sense on the local wall clock.
|
| 2388 |
+
#
|
| 2389 |
+
# In fact, if [5] holds at this point, we do have the standard-time spelling,
|
| 2390 |
+
# but that takes a bit of proof. We first prove a stronger result. What's the
|
| 2391 |
+
# difference between the LHS and RHS of [5]? Let
|
| 2392 |
+
#
|
| 2393 |
+
# diff = x.n - (z.n - z.o) [6]
|
| 2394 |
+
#
|
| 2395 |
+
# Now
|
| 2396 |
+
# z.n = by [4]
|
| 2397 |
+
# (y + y.s).n = by #5
|
| 2398 |
+
# y.n + y.s = since y.n = x.n
|
| 2399 |
+
# x.n + y.s = since z and y are have the same tzinfo member,
|
| 2400 |
+
# y.s = z.s by #2
|
| 2401 |
+
# x.n + z.s
|
| 2402 |
+
#
|
| 2403 |
+
# Plugging that back into [6] gives
|
| 2404 |
+
#
|
| 2405 |
+
# diff =
|
| 2406 |
+
# x.n - ((x.n + z.s) - z.o) = expanding
|
| 2407 |
+
# x.n - x.n - z.s + z.o = cancelling
|
| 2408 |
+
# - z.s + z.o = by #2
|
| 2409 |
+
# z.d
|
| 2410 |
+
#
|
| 2411 |
+
# So diff = z.d.
|
| 2412 |
+
#
|
| 2413 |
+
# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
|
| 2414 |
+
# spelling we wanted in the endcase described above. We're done. Contrarily,
|
| 2415 |
+
# if z.d = 0, then we have a UTC equivalent, and are also done.
|
| 2416 |
+
#
|
| 2417 |
+
# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
|
| 2418 |
+
# add to z (in effect, z is in tz's standard time, and we need to shift the
|
| 2419 |
+
# local clock into tz's daylight time).
|
| 2420 |
+
#
|
| 2421 |
+
# Let
|
| 2422 |
+
#
|
| 2423 |
+
# z' = z + z.d = z + diff [7]
|
| 2424 |
+
#
|
| 2425 |
+
# and we can again ask whether
|
| 2426 |
+
#
|
| 2427 |
+
# z'.n - z'.o = x.n [8]
|
| 2428 |
+
#
|
| 2429 |
+
# If so, we're done. If not, the tzinfo class is insane, according to the
|
| 2430 |
+
# assumptions we've made. This also requires a bit of proof. As before, let's
|
| 2431 |
+
# compute the difference between the LHS and RHS of [8] (and skipping some of
|
| 2432 |
+
# the justifications for the kinds of substitutions we've done several times
|
| 2433 |
+
# already):
|
| 2434 |
+
#
|
| 2435 |
+
# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
|
| 2436 |
+
# x.n - (z.n + diff - z'.o) = replacing diff via [6]
|
| 2437 |
+
# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
|
| 2438 |
+
# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
|
| 2439 |
+
# - z.n + z.n - z.o + z'.o = cancel z.n
|
| 2440 |
+
# - z.o + z'.o = #1 twice
|
| 2441 |
+
# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
|
| 2442 |
+
# z'.d - z.d
|
| 2443 |
+
#
|
| 2444 |
+
# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
|
| 2445 |
+
# we've found the UTC-equivalent so are done. In fact, we stop with [7] and
|
| 2446 |
+
# return z', not bothering to compute z'.d.
|
| 2447 |
+
#
|
| 2448 |
+
# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
|
| 2449 |
+
# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
|
| 2450 |
+
# would have to change the result dst() returns: we start in DST, and moving
|
| 2451 |
+
# a little further into it takes us out of DST.
|
| 2452 |
+
#
|
| 2453 |
+
# There isn't a sane case where this can happen. The closest it gets is at
|
| 2454 |
+
# the end of DST, where there's an hour in UTC with no spelling in a hybrid
|
| 2455 |
+
# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
|
| 2456 |
+
# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
|
| 2457 |
+
# UTC) because the docs insist on that, but 0:MM is taken as being in daylight
|
| 2458 |
+
# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
|
| 2459 |
+
# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
|
| 2460 |
+
# standard time. Since that's what the local clock *does*, we want to map both
|
| 2461 |
+
# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
|
| 2462 |
+
# in local time, but so it goes -- it's the way the local clock works.
|
| 2463 |
+
#
|
| 2464 |
+
# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
|
| 2465 |
+
# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
|
| 2466 |
+
# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
|
| 2467 |
+
# (correctly) concludes that z' is not UTC-equivalent to x.
|
| 2468 |
+
#
|
| 2469 |
+
# Because we know z.d said z was in daylight time (else [5] would have held and
|
| 2470 |
+
# we would have stopped then), and we know z.d != z'.d (else [8] would have held
|
| 2471 |
+
# and we have stopped then), and there are only 2 possible values dst() can
|
| 2472 |
+
# return in Eastern, it follows that z'.d must be 0 (which it is in the example,
|
| 2473 |
+
# but the reasoning doesn't depend on the example -- it depends on there being
|
| 2474 |
+
# two possible dst() outcomes, one zero and the other non-zero). Therefore
|
| 2475 |
+
# z' must be in standard time, and is the spelling we want in this case.
|
| 2476 |
+
#
|
| 2477 |
+
# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
|
| 2478 |
+
# concerned (because it takes z' as being in standard time rather than the
|
| 2479 |
+
# daylight time we intend here), but returning it gives the real-life "local
|
| 2480 |
+
# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
|
| 2481 |
+
# tz.
|
| 2482 |
+
#
|
| 2483 |
+
# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
|
| 2484 |
+
# the 1:MM standard time spelling we want.
|
| 2485 |
+
#
|
| 2486 |
+
# So how can this break? One of the assumptions must be violated. Two
|
| 2487 |
+
# possibilities:
|
| 2488 |
+
#
|
| 2489 |
+
# 1) [2] effectively says that y.s is invariant across all y belong to a given
|
| 2490 |
+
# time zone. This isn't true if, for political reasons or continental drift,
|
| 2491 |
+
# a region decides to change its base offset from UTC.
|
| 2492 |
+
#
|
| 2493 |
+
# 2) There may be versions of "double daylight" time where the tail end of
|
| 2494 |
+
# the analysis gives up a step too early. I haven't thought about that
|
| 2495 |
+
# enough to say.
|
| 2496 |
+
#
|
| 2497 |
+
# In any case, it's clear that the default fromutc() is strong enough to handle
|
| 2498 |
+
# "almost all" time zones: so long as the standard offset is invariant, it
|
| 2499 |
+
# doesn't matter if daylight time transition points change from year to year, or
|
| 2500 |
+
# if daylight time is skipped in some years; it doesn't matter how large or
|
| 2501 |
+
# small dst() may get within its bounds; and it doesn't even matter if some
|
| 2502 |
+
# perverse time zone returns a negative dst()). So a breaking case must be
|
| 2503 |
+
# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
|
| 2504 |
+
|
| 2505 |
+
try:
|
| 2506 |
+
from _datetime import *
|
| 2507 |
+
except ImportError:
|
| 2508 |
+
pass
|
| 2509 |
+
else:
|
| 2510 |
+
# Clean up unused names
|
| 2511 |
+
del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
|
| 2512 |
+
_DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
|
| 2513 |
+
_check_date_fields, _check_time_fields,
|
| 2514 |
+
_check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
|
| 2515 |
+
_date_class, _days_before_month, _days_before_year, _days_in_month,
|
| 2516 |
+
_format_time, _format_offset, _index, _is_leap, _isoweek1monday, _math,
|
| 2517 |
+
_ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
|
| 2518 |
+
_divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
|
| 2519 |
+
_parse_hh_mm_ss_ff, _IsoCalendarDate)
|
| 2520 |
+
# XXX Since import * above excludes names that start with _,
|
| 2521 |
+
# docstring does not get overwritten. In the future, it may be
|
| 2522 |
+
# appropriate to maintain a single module level docstring and
|
| 2523 |
+
# remove the following line.
|
| 2524 |
+
from _datetime import __doc__
|
llava/lib/python3.10/filecmp.py
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utilities for comparing files and directories.
|
| 2 |
+
|
| 3 |
+
Classes:
|
| 4 |
+
dircmp
|
| 5 |
+
|
| 6 |
+
Functions:
|
| 7 |
+
cmp(f1, f2, shallow=True) -> int
|
| 8 |
+
cmpfiles(a, b, common) -> ([], [], [])
|
| 9 |
+
clear_cache()
|
| 10 |
+
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import os
|
| 14 |
+
import stat
|
| 15 |
+
from itertools import filterfalse
|
| 16 |
+
from types import GenericAlias
|
| 17 |
+
|
| 18 |
+
__all__ = ['clear_cache', 'cmp', 'dircmp', 'cmpfiles', 'DEFAULT_IGNORES']
|
| 19 |
+
|
| 20 |
+
_cache = {}
|
| 21 |
+
BUFSIZE = 8*1024
|
| 22 |
+
|
| 23 |
+
DEFAULT_IGNORES = [
|
| 24 |
+
'RCS', 'CVS', 'tags', '.git', '.hg', '.bzr', '_darcs', '__pycache__']
|
| 25 |
+
|
| 26 |
+
def clear_cache():
|
| 27 |
+
"""Clear the filecmp cache."""
|
| 28 |
+
_cache.clear()
|
| 29 |
+
|
| 30 |
+
def cmp(f1, f2, shallow=True):
|
| 31 |
+
"""Compare two files.
|
| 32 |
+
|
| 33 |
+
Arguments:
|
| 34 |
+
|
| 35 |
+
f1 -- First file name
|
| 36 |
+
|
| 37 |
+
f2 -- Second file name
|
| 38 |
+
|
| 39 |
+
shallow -- treat files as identical if their stat signatures (type, size,
|
| 40 |
+
mtime) are identical. Otherwise, files are considered different
|
| 41 |
+
if their sizes or contents differ. [default: True]
|
| 42 |
+
|
| 43 |
+
Return value:
|
| 44 |
+
|
| 45 |
+
True if the files are the same, False otherwise.
|
| 46 |
+
|
| 47 |
+
This function uses a cache for past comparisons and the results,
|
| 48 |
+
with cache entries invalidated if their stat information
|
| 49 |
+
changes. The cache may be cleared by calling clear_cache().
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
s1 = _sig(os.stat(f1))
|
| 54 |
+
s2 = _sig(os.stat(f2))
|
| 55 |
+
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
|
| 56 |
+
return False
|
| 57 |
+
if shallow and s1 == s2:
|
| 58 |
+
return True
|
| 59 |
+
if s1[1] != s2[1]:
|
| 60 |
+
return False
|
| 61 |
+
|
| 62 |
+
outcome = _cache.get((f1, f2, s1, s2))
|
| 63 |
+
if outcome is None:
|
| 64 |
+
outcome = _do_cmp(f1, f2)
|
| 65 |
+
if len(_cache) > 100: # limit the maximum size of the cache
|
| 66 |
+
clear_cache()
|
| 67 |
+
_cache[f1, f2, s1, s2] = outcome
|
| 68 |
+
return outcome
|
| 69 |
+
|
| 70 |
+
def _sig(st):
|
| 71 |
+
return (stat.S_IFMT(st.st_mode),
|
| 72 |
+
st.st_size,
|
| 73 |
+
st.st_mtime)
|
| 74 |
+
|
| 75 |
+
def _do_cmp(f1, f2):
|
| 76 |
+
bufsize = BUFSIZE
|
| 77 |
+
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
|
| 78 |
+
while True:
|
| 79 |
+
b1 = fp1.read(bufsize)
|
| 80 |
+
b2 = fp2.read(bufsize)
|
| 81 |
+
if b1 != b2:
|
| 82 |
+
return False
|
| 83 |
+
if not b1:
|
| 84 |
+
return True
|
| 85 |
+
|
| 86 |
+
# Directory comparison class.
|
| 87 |
+
#
|
| 88 |
+
class dircmp:
|
| 89 |
+
"""A class that manages the comparison of 2 directories.
|
| 90 |
+
|
| 91 |
+
dircmp(a, b, ignore=None, hide=None)
|
| 92 |
+
A and B are directories.
|
| 93 |
+
IGNORE is a list of names to ignore,
|
| 94 |
+
defaults to DEFAULT_IGNORES.
|
| 95 |
+
HIDE is a list of names to hide,
|
| 96 |
+
defaults to [os.curdir, os.pardir].
|
| 97 |
+
|
| 98 |
+
High level usage:
|
| 99 |
+
x = dircmp(dir1, dir2)
|
| 100 |
+
x.report() -> prints a report on the differences between dir1 and dir2
|
| 101 |
+
or
|
| 102 |
+
x.report_partial_closure() -> prints report on differences between dir1
|
| 103 |
+
and dir2, and reports on common immediate subdirectories.
|
| 104 |
+
x.report_full_closure() -> like report_partial_closure,
|
| 105 |
+
but fully recursive.
|
| 106 |
+
|
| 107 |
+
Attributes:
|
| 108 |
+
left_list, right_list: The files in dir1 and dir2,
|
| 109 |
+
filtered by hide and ignore.
|
| 110 |
+
common: a list of names in both dir1 and dir2.
|
| 111 |
+
left_only, right_only: names only in dir1, dir2.
|
| 112 |
+
common_dirs: subdirectories in both dir1 and dir2.
|
| 113 |
+
common_files: files in both dir1 and dir2.
|
| 114 |
+
common_funny: names in both dir1 and dir2 where the type differs between
|
| 115 |
+
dir1 and dir2, or the name is not stat-able.
|
| 116 |
+
same_files: list of identical files.
|
| 117 |
+
diff_files: list of filenames which differ.
|
| 118 |
+
funny_files: list of files which could not be compared.
|
| 119 |
+
subdirs: a dictionary of dircmp instances (or MyDirCmp instances if this
|
| 120 |
+
object is of type MyDirCmp, a subclass of dircmp), keyed by names
|
| 121 |
+
in common_dirs.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __init__(self, a, b, ignore=None, hide=None): # Initialize
|
| 125 |
+
self.left = a
|
| 126 |
+
self.right = b
|
| 127 |
+
if hide is None:
|
| 128 |
+
self.hide = [os.curdir, os.pardir] # Names never to be shown
|
| 129 |
+
else:
|
| 130 |
+
self.hide = hide
|
| 131 |
+
if ignore is None:
|
| 132 |
+
self.ignore = DEFAULT_IGNORES
|
| 133 |
+
else:
|
| 134 |
+
self.ignore = ignore
|
| 135 |
+
|
| 136 |
+
def phase0(self): # Compare everything except common subdirectories
|
| 137 |
+
self.left_list = _filter(os.listdir(self.left),
|
| 138 |
+
self.hide+self.ignore)
|
| 139 |
+
self.right_list = _filter(os.listdir(self.right),
|
| 140 |
+
self.hide+self.ignore)
|
| 141 |
+
self.left_list.sort()
|
| 142 |
+
self.right_list.sort()
|
| 143 |
+
|
| 144 |
+
def phase1(self): # Compute common names
|
| 145 |
+
a = dict(zip(map(os.path.normcase, self.left_list), self.left_list))
|
| 146 |
+
b = dict(zip(map(os.path.normcase, self.right_list), self.right_list))
|
| 147 |
+
self.common = list(map(a.__getitem__, filter(b.__contains__, a)))
|
| 148 |
+
self.left_only = list(map(a.__getitem__, filterfalse(b.__contains__, a)))
|
| 149 |
+
self.right_only = list(map(b.__getitem__, filterfalse(a.__contains__, b)))
|
| 150 |
+
|
| 151 |
+
def phase2(self): # Distinguish files, directories, funnies
|
| 152 |
+
self.common_dirs = []
|
| 153 |
+
self.common_files = []
|
| 154 |
+
self.common_funny = []
|
| 155 |
+
|
| 156 |
+
for x in self.common:
|
| 157 |
+
a_path = os.path.join(self.left, x)
|
| 158 |
+
b_path = os.path.join(self.right, x)
|
| 159 |
+
|
| 160 |
+
ok = 1
|
| 161 |
+
try:
|
| 162 |
+
a_stat = os.stat(a_path)
|
| 163 |
+
except OSError:
|
| 164 |
+
# print('Can\'t stat', a_path, ':', why.args[1])
|
| 165 |
+
ok = 0
|
| 166 |
+
try:
|
| 167 |
+
b_stat = os.stat(b_path)
|
| 168 |
+
except OSError:
|
| 169 |
+
# print('Can\'t stat', b_path, ':', why.args[1])
|
| 170 |
+
ok = 0
|
| 171 |
+
|
| 172 |
+
if ok:
|
| 173 |
+
a_type = stat.S_IFMT(a_stat.st_mode)
|
| 174 |
+
b_type = stat.S_IFMT(b_stat.st_mode)
|
| 175 |
+
if a_type != b_type:
|
| 176 |
+
self.common_funny.append(x)
|
| 177 |
+
elif stat.S_ISDIR(a_type):
|
| 178 |
+
self.common_dirs.append(x)
|
| 179 |
+
elif stat.S_ISREG(a_type):
|
| 180 |
+
self.common_files.append(x)
|
| 181 |
+
else:
|
| 182 |
+
self.common_funny.append(x)
|
| 183 |
+
else:
|
| 184 |
+
self.common_funny.append(x)
|
| 185 |
+
|
| 186 |
+
def phase3(self): # Find out differences between common files
|
| 187 |
+
xx = cmpfiles(self.left, self.right, self.common_files)
|
| 188 |
+
self.same_files, self.diff_files, self.funny_files = xx
|
| 189 |
+
|
| 190 |
+
def phase4(self): # Find out differences between common subdirectories
|
| 191 |
+
# A new dircmp (or MyDirCmp if dircmp was subclassed) object is created
|
| 192 |
+
# for each common subdirectory,
|
| 193 |
+
# these are stored in a dictionary indexed by filename.
|
| 194 |
+
# The hide and ignore properties are inherited from the parent
|
| 195 |
+
self.subdirs = {}
|
| 196 |
+
for x in self.common_dirs:
|
| 197 |
+
a_x = os.path.join(self.left, x)
|
| 198 |
+
b_x = os.path.join(self.right, x)
|
| 199 |
+
self.subdirs[x] = self.__class__(a_x, b_x, self.ignore, self.hide)
|
| 200 |
+
|
| 201 |
+
def phase4_closure(self): # Recursively call phase4() on subdirectories
|
| 202 |
+
self.phase4()
|
| 203 |
+
for sd in self.subdirs.values():
|
| 204 |
+
sd.phase4_closure()
|
| 205 |
+
|
| 206 |
+
def report(self): # Print a report on the differences between a and b
|
| 207 |
+
# Output format is purposely lousy
|
| 208 |
+
print('diff', self.left, self.right)
|
| 209 |
+
if self.left_only:
|
| 210 |
+
self.left_only.sort()
|
| 211 |
+
print('Only in', self.left, ':', self.left_only)
|
| 212 |
+
if self.right_only:
|
| 213 |
+
self.right_only.sort()
|
| 214 |
+
print('Only in', self.right, ':', self.right_only)
|
| 215 |
+
if self.same_files:
|
| 216 |
+
self.same_files.sort()
|
| 217 |
+
print('Identical files :', self.same_files)
|
| 218 |
+
if self.diff_files:
|
| 219 |
+
self.diff_files.sort()
|
| 220 |
+
print('Differing files :', self.diff_files)
|
| 221 |
+
if self.funny_files:
|
| 222 |
+
self.funny_files.sort()
|
| 223 |
+
print('Trouble with common files :', self.funny_files)
|
| 224 |
+
if self.common_dirs:
|
| 225 |
+
self.common_dirs.sort()
|
| 226 |
+
print('Common subdirectories :', self.common_dirs)
|
| 227 |
+
if self.common_funny:
|
| 228 |
+
self.common_funny.sort()
|
| 229 |
+
print('Common funny cases :', self.common_funny)
|
| 230 |
+
|
| 231 |
+
def report_partial_closure(self): # Print reports on self and on subdirs
|
| 232 |
+
self.report()
|
| 233 |
+
for sd in self.subdirs.values():
|
| 234 |
+
print()
|
| 235 |
+
sd.report()
|
| 236 |
+
|
| 237 |
+
def report_full_closure(self): # Report on self and subdirs recursively
|
| 238 |
+
self.report()
|
| 239 |
+
for sd in self.subdirs.values():
|
| 240 |
+
print()
|
| 241 |
+
sd.report_full_closure()
|
| 242 |
+
|
| 243 |
+
methodmap = dict(subdirs=phase4,
|
| 244 |
+
same_files=phase3, diff_files=phase3, funny_files=phase3,
|
| 245 |
+
common_dirs = phase2, common_files=phase2, common_funny=phase2,
|
| 246 |
+
common=phase1, left_only=phase1, right_only=phase1,
|
| 247 |
+
left_list=phase0, right_list=phase0)
|
| 248 |
+
|
| 249 |
+
def __getattr__(self, attr):
|
| 250 |
+
if attr not in self.methodmap:
|
| 251 |
+
raise AttributeError(attr)
|
| 252 |
+
self.methodmap[attr](self)
|
| 253 |
+
return getattr(self, attr)
|
| 254 |
+
|
| 255 |
+
__class_getitem__ = classmethod(GenericAlias)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def cmpfiles(a, b, common, shallow=True):
|
| 259 |
+
"""Compare common files in two directories.
|
| 260 |
+
|
| 261 |
+
a, b -- directory names
|
| 262 |
+
common -- list of file names found in both directories
|
| 263 |
+
shallow -- if true, do comparison based solely on stat() information
|
| 264 |
+
|
| 265 |
+
Returns a tuple of three lists:
|
| 266 |
+
files that compare equal
|
| 267 |
+
files that are different
|
| 268 |
+
filenames that aren't regular files.
|
| 269 |
+
|
| 270 |
+
"""
|
| 271 |
+
res = ([], [], [])
|
| 272 |
+
for x in common:
|
| 273 |
+
ax = os.path.join(a, x)
|
| 274 |
+
bx = os.path.join(b, x)
|
| 275 |
+
res[_cmp(ax, bx, shallow)].append(x)
|
| 276 |
+
return res
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
# Compare two files.
|
| 280 |
+
# Return:
|
| 281 |
+
# 0 for equal
|
| 282 |
+
# 1 for different
|
| 283 |
+
# 2 for funny cases (can't stat, etc.)
|
| 284 |
+
#
|
| 285 |
+
def _cmp(a, b, sh, abs=abs, cmp=cmp):
|
| 286 |
+
try:
|
| 287 |
+
return not abs(cmp(a, b, sh))
|
| 288 |
+
except OSError:
|
| 289 |
+
return 2
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
# Return a copy with items that occur in skip removed.
|
| 293 |
+
#
|
| 294 |
+
def _filter(flist, skip):
|
| 295 |
+
return list(filterfalse(skip.__contains__, flist))
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
# Demonstration and testing.
|
| 299 |
+
#
|
| 300 |
+
def demo():
|
| 301 |
+
import sys
|
| 302 |
+
import getopt
|
| 303 |
+
options, args = getopt.getopt(sys.argv[1:], 'r')
|
| 304 |
+
if len(args) != 2:
|
| 305 |
+
raise getopt.GetoptError('need exactly two args', None)
|
| 306 |
+
dd = dircmp(args[0], args[1])
|
| 307 |
+
if ('-r', '') in options:
|
| 308 |
+
dd.report_full_closure()
|
| 309 |
+
else:
|
| 310 |
+
dd.report()
|
| 311 |
+
|
| 312 |
+
if __name__ == '__main__':
|
| 313 |
+
demo()
|
llava/lib/python3.10/gettext.py
ADDED
|
@@ -0,0 +1,788 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Internationalization and localization support.
|
| 2 |
+
|
| 3 |
+
This module provides internationalization (I18N) and localization (L10N)
|
| 4 |
+
support for your Python programs by providing an interface to the GNU gettext
|
| 5 |
+
message catalog library.
|
| 6 |
+
|
| 7 |
+
I18N refers to the operation by which a program is made aware of multiple
|
| 8 |
+
languages. L10N refers to the adaptation of your program, once
|
| 9 |
+
internationalized, to the local language and cultural habits.
|
| 10 |
+
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
# This module represents the integration of work, contributions, feedback, and
|
| 14 |
+
# suggestions from the following people:
|
| 15 |
+
#
|
| 16 |
+
# Martin von Loewis, who wrote the initial implementation of the underlying
|
| 17 |
+
# C-based libintlmodule (later renamed _gettext), along with a skeletal
|
| 18 |
+
# gettext.py implementation.
|
| 19 |
+
#
|
| 20 |
+
# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
|
| 21 |
+
# which also included a pure-Python implementation to read .mo files if
|
| 22 |
+
# intlmodule wasn't available.
|
| 23 |
+
#
|
| 24 |
+
# James Henstridge, who also wrote a gettext.py module, which has some
|
| 25 |
+
# interesting, but currently unsupported experimental features: the notion of
|
| 26 |
+
# a Catalog class and instances, and the ability to add to a catalog file via
|
| 27 |
+
# a Python API.
|
| 28 |
+
#
|
| 29 |
+
# Barry Warsaw integrated these modules, wrote the .install() API and code,
|
| 30 |
+
# and conformed all C and Python code to Python's coding standards.
|
| 31 |
+
#
|
| 32 |
+
# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
|
| 33 |
+
# module.
|
| 34 |
+
#
|
| 35 |
+
# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.
|
| 36 |
+
#
|
| 37 |
+
# TODO:
|
| 38 |
+
# - Lazy loading of .mo files. Currently the entire catalog is loaded into
|
| 39 |
+
# memory, but that's probably bad for large translated programs. Instead,
|
| 40 |
+
# the lexical sort of original strings in GNU .mo files should be exploited
|
| 41 |
+
# to do binary searches and lazy initializations. Or you might want to use
|
| 42 |
+
# the undocumented double-hash algorithm for .mo files with hash tables, but
|
| 43 |
+
# you'll need to study the GNU gettext code to do this.
|
| 44 |
+
#
|
| 45 |
+
# - Support Solaris .mo file formats. Unfortunately, we've been unable to
|
| 46 |
+
# find this format documented anywhere.
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
import os
|
| 50 |
+
import re
|
| 51 |
+
import sys
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
|
| 55 |
+
'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
|
| 56 |
+
'bind_textdomain_codeset',
|
| 57 |
+
'dgettext', 'dngettext', 'gettext', 'lgettext', 'ldgettext',
|
| 58 |
+
'ldngettext', 'lngettext', 'ngettext',
|
| 59 |
+
'pgettext', 'dpgettext', 'npgettext', 'dnpgettext',
|
| 60 |
+
]
|
| 61 |
+
|
| 62 |
+
_default_localedir = os.path.join(sys.base_prefix, 'share', 'locale')
|
| 63 |
+
|
| 64 |
+
# Expression parsing for plural form selection.
|
| 65 |
+
#
|
| 66 |
+
# The gettext library supports a small subset of C syntax. The only
|
| 67 |
+
# incompatible difference is that integer literals starting with zero are
|
| 68 |
+
# decimal.
|
| 69 |
+
#
|
| 70 |
+
# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms
|
| 71 |
+
# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y
|
| 72 |
+
|
| 73 |
+
_token_pattern = re.compile(r"""
|
| 74 |
+
(?P<WHITESPACES>[ \t]+) | # spaces and horizontal tabs
|
| 75 |
+
(?P<NUMBER>[0-9]+\b) | # decimal integer
|
| 76 |
+
(?P<NAME>n\b) | # only n is allowed
|
| 77 |
+
(?P<PARENTHESIS>[()]) |
|
| 78 |
+
(?P<OPERATOR>[-*/%+?:]|[><!]=?|==|&&|\|\|) | # !, *, /, %, +, -, <, >,
|
| 79 |
+
# <=, >=, ==, !=, &&, ||,
|
| 80 |
+
# ? :
|
| 81 |
+
# unary and bitwise ops
|
| 82 |
+
# not allowed
|
| 83 |
+
(?P<INVALID>\w+|.) # invalid token
|
| 84 |
+
""", re.VERBOSE|re.DOTALL)
|
| 85 |
+
|
| 86 |
+
def _tokenize(plural):
|
| 87 |
+
for mo in re.finditer(_token_pattern, plural):
|
| 88 |
+
kind = mo.lastgroup
|
| 89 |
+
if kind == 'WHITESPACES':
|
| 90 |
+
continue
|
| 91 |
+
value = mo.group(kind)
|
| 92 |
+
if kind == 'INVALID':
|
| 93 |
+
raise ValueError('invalid token in plural form: %s' % value)
|
| 94 |
+
yield value
|
| 95 |
+
yield ''
|
| 96 |
+
|
| 97 |
+
def _error(value):
|
| 98 |
+
if value:
|
| 99 |
+
return ValueError('unexpected token in plural form: %s' % value)
|
| 100 |
+
else:
|
| 101 |
+
return ValueError('unexpected end of plural form')
|
| 102 |
+
|
| 103 |
+
_binary_ops = (
|
| 104 |
+
('||',),
|
| 105 |
+
('&&',),
|
| 106 |
+
('==', '!='),
|
| 107 |
+
('<', '>', '<=', '>='),
|
| 108 |
+
('+', '-'),
|
| 109 |
+
('*', '/', '%'),
|
| 110 |
+
)
|
| 111 |
+
_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops}
|
| 112 |
+
_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'}
|
| 113 |
+
|
| 114 |
+
def _parse(tokens, priority=-1):
|
| 115 |
+
result = ''
|
| 116 |
+
nexttok = next(tokens)
|
| 117 |
+
while nexttok == '!':
|
| 118 |
+
result += 'not '
|
| 119 |
+
nexttok = next(tokens)
|
| 120 |
+
|
| 121 |
+
if nexttok == '(':
|
| 122 |
+
sub, nexttok = _parse(tokens)
|
| 123 |
+
result = '%s(%s)' % (result, sub)
|
| 124 |
+
if nexttok != ')':
|
| 125 |
+
raise ValueError('unbalanced parenthesis in plural form')
|
| 126 |
+
elif nexttok == 'n':
|
| 127 |
+
result = '%s%s' % (result, nexttok)
|
| 128 |
+
else:
|
| 129 |
+
try:
|
| 130 |
+
value = int(nexttok, 10)
|
| 131 |
+
except ValueError:
|
| 132 |
+
raise _error(nexttok) from None
|
| 133 |
+
result = '%s%d' % (result, value)
|
| 134 |
+
nexttok = next(tokens)
|
| 135 |
+
|
| 136 |
+
j = 100
|
| 137 |
+
while nexttok in _binary_ops:
|
| 138 |
+
i = _binary_ops[nexttok]
|
| 139 |
+
if i < priority:
|
| 140 |
+
break
|
| 141 |
+
# Break chained comparisons
|
| 142 |
+
if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>='
|
| 143 |
+
result = '(%s)' % result
|
| 144 |
+
# Replace some C operators by their Python equivalents
|
| 145 |
+
op = _c2py_ops.get(nexttok, nexttok)
|
| 146 |
+
right, nexttok = _parse(tokens, i + 1)
|
| 147 |
+
result = '%s %s %s' % (result, op, right)
|
| 148 |
+
j = i
|
| 149 |
+
if j == priority == 4: # '<', '>', '<=', '>='
|
| 150 |
+
result = '(%s)' % result
|
| 151 |
+
|
| 152 |
+
if nexttok == '?' and priority <= 0:
|
| 153 |
+
if_true, nexttok = _parse(tokens, 0)
|
| 154 |
+
if nexttok != ':':
|
| 155 |
+
raise _error(nexttok)
|
| 156 |
+
if_false, nexttok = _parse(tokens)
|
| 157 |
+
result = '%s if %s else %s' % (if_true, result, if_false)
|
| 158 |
+
if priority == 0:
|
| 159 |
+
result = '(%s)' % result
|
| 160 |
+
|
| 161 |
+
return result, nexttok
|
| 162 |
+
|
| 163 |
+
def _as_int(n):
|
| 164 |
+
try:
|
| 165 |
+
i = round(n)
|
| 166 |
+
except TypeError:
|
| 167 |
+
raise TypeError('Plural value must be an integer, got %s' %
|
| 168 |
+
(n.__class__.__name__,)) from None
|
| 169 |
+
import warnings
|
| 170 |
+
warnings.warn('Plural value must be an integer, got %s' %
|
| 171 |
+
(n.__class__.__name__,),
|
| 172 |
+
DeprecationWarning, 4)
|
| 173 |
+
return n
|
| 174 |
+
|
| 175 |
+
def c2py(plural):
|
| 176 |
+
"""Gets a C expression as used in PO files for plural forms and returns a
|
| 177 |
+
Python function that implements an equivalent expression.
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
if len(plural) > 1000:
|
| 181 |
+
raise ValueError('plural form expression is too long')
|
| 182 |
+
try:
|
| 183 |
+
result, nexttok = _parse(_tokenize(plural))
|
| 184 |
+
if nexttok:
|
| 185 |
+
raise _error(nexttok)
|
| 186 |
+
|
| 187 |
+
depth = 0
|
| 188 |
+
for c in result:
|
| 189 |
+
if c == '(':
|
| 190 |
+
depth += 1
|
| 191 |
+
if depth > 20:
|
| 192 |
+
# Python compiler limit is about 90.
|
| 193 |
+
# The most complex example has 2.
|
| 194 |
+
raise ValueError('plural form expression is too complex')
|
| 195 |
+
elif c == ')':
|
| 196 |
+
depth -= 1
|
| 197 |
+
|
| 198 |
+
ns = {'_as_int': _as_int}
|
| 199 |
+
exec('''if True:
|
| 200 |
+
def func(n):
|
| 201 |
+
if not isinstance(n, int):
|
| 202 |
+
n = _as_int(n)
|
| 203 |
+
return int(%s)
|
| 204 |
+
''' % result, ns)
|
| 205 |
+
return ns['func']
|
| 206 |
+
except RecursionError:
|
| 207 |
+
# Recursion error can be raised in _parse() or exec().
|
| 208 |
+
raise ValueError('plural form expression is too complex')
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def _expand_lang(loc):
|
| 212 |
+
import locale
|
| 213 |
+
loc = locale.normalize(loc)
|
| 214 |
+
COMPONENT_CODESET = 1 << 0
|
| 215 |
+
COMPONENT_TERRITORY = 1 << 1
|
| 216 |
+
COMPONENT_MODIFIER = 1 << 2
|
| 217 |
+
# split up the locale into its base components
|
| 218 |
+
mask = 0
|
| 219 |
+
pos = loc.find('@')
|
| 220 |
+
if pos >= 0:
|
| 221 |
+
modifier = loc[pos:]
|
| 222 |
+
loc = loc[:pos]
|
| 223 |
+
mask |= COMPONENT_MODIFIER
|
| 224 |
+
else:
|
| 225 |
+
modifier = ''
|
| 226 |
+
pos = loc.find('.')
|
| 227 |
+
if pos >= 0:
|
| 228 |
+
codeset = loc[pos:]
|
| 229 |
+
loc = loc[:pos]
|
| 230 |
+
mask |= COMPONENT_CODESET
|
| 231 |
+
else:
|
| 232 |
+
codeset = ''
|
| 233 |
+
pos = loc.find('_')
|
| 234 |
+
if pos >= 0:
|
| 235 |
+
territory = loc[pos:]
|
| 236 |
+
loc = loc[:pos]
|
| 237 |
+
mask |= COMPONENT_TERRITORY
|
| 238 |
+
else:
|
| 239 |
+
territory = ''
|
| 240 |
+
language = loc
|
| 241 |
+
ret = []
|
| 242 |
+
for i in range(mask+1):
|
| 243 |
+
if not (i & ~mask): # if all components for this combo exist ...
|
| 244 |
+
val = language
|
| 245 |
+
if i & COMPONENT_TERRITORY: val += territory
|
| 246 |
+
if i & COMPONENT_CODESET: val += codeset
|
| 247 |
+
if i & COMPONENT_MODIFIER: val += modifier
|
| 248 |
+
ret.append(val)
|
| 249 |
+
ret.reverse()
|
| 250 |
+
return ret
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class NullTranslations:
|
| 255 |
+
def __init__(self, fp=None):
|
| 256 |
+
self._info = {}
|
| 257 |
+
self._charset = None
|
| 258 |
+
self._output_charset = None
|
| 259 |
+
self._fallback = None
|
| 260 |
+
if fp is not None:
|
| 261 |
+
self._parse(fp)
|
| 262 |
+
|
| 263 |
+
def _parse(self, fp):
|
| 264 |
+
pass
|
| 265 |
+
|
| 266 |
+
def add_fallback(self, fallback):
|
| 267 |
+
if self._fallback:
|
| 268 |
+
self._fallback.add_fallback(fallback)
|
| 269 |
+
else:
|
| 270 |
+
self._fallback = fallback
|
| 271 |
+
|
| 272 |
+
def gettext(self, message):
|
| 273 |
+
if self._fallback:
|
| 274 |
+
return self._fallback.gettext(message)
|
| 275 |
+
return message
|
| 276 |
+
|
| 277 |
+
def lgettext(self, message):
|
| 278 |
+
import warnings
|
| 279 |
+
warnings.warn('lgettext() is deprecated, use gettext() instead',
|
| 280 |
+
DeprecationWarning, 2)
|
| 281 |
+
import locale
|
| 282 |
+
if self._fallback:
|
| 283 |
+
with warnings.catch_warnings():
|
| 284 |
+
warnings.filterwarnings('ignore', r'.*\blgettext\b.*',
|
| 285 |
+
DeprecationWarning)
|
| 286 |
+
return self._fallback.lgettext(message)
|
| 287 |
+
if self._output_charset:
|
| 288 |
+
return message.encode(self._output_charset)
|
| 289 |
+
return message.encode(locale.getpreferredencoding())
|
| 290 |
+
|
| 291 |
+
def ngettext(self, msgid1, msgid2, n):
|
| 292 |
+
if self._fallback:
|
| 293 |
+
return self._fallback.ngettext(msgid1, msgid2, n)
|
| 294 |
+
if n == 1:
|
| 295 |
+
return msgid1
|
| 296 |
+
else:
|
| 297 |
+
return msgid2
|
| 298 |
+
|
| 299 |
+
def lngettext(self, msgid1, msgid2, n):
|
| 300 |
+
import warnings
|
| 301 |
+
warnings.warn('lngettext() is deprecated, use ngettext() instead',
|
| 302 |
+
DeprecationWarning, 2)
|
| 303 |
+
import locale
|
| 304 |
+
if self._fallback:
|
| 305 |
+
with warnings.catch_warnings():
|
| 306 |
+
warnings.filterwarnings('ignore', r'.*\blngettext\b.*',
|
| 307 |
+
DeprecationWarning)
|
| 308 |
+
return self._fallback.lngettext(msgid1, msgid2, n)
|
| 309 |
+
if n == 1:
|
| 310 |
+
tmsg = msgid1
|
| 311 |
+
else:
|
| 312 |
+
tmsg = msgid2
|
| 313 |
+
if self._output_charset:
|
| 314 |
+
return tmsg.encode(self._output_charset)
|
| 315 |
+
return tmsg.encode(locale.getpreferredencoding())
|
| 316 |
+
|
| 317 |
+
def pgettext(self, context, message):
|
| 318 |
+
if self._fallback:
|
| 319 |
+
return self._fallback.pgettext(context, message)
|
| 320 |
+
return message
|
| 321 |
+
|
| 322 |
+
def npgettext(self, context, msgid1, msgid2, n):
|
| 323 |
+
if self._fallback:
|
| 324 |
+
return self._fallback.npgettext(context, msgid1, msgid2, n)
|
| 325 |
+
if n == 1:
|
| 326 |
+
return msgid1
|
| 327 |
+
else:
|
| 328 |
+
return msgid2
|
| 329 |
+
|
| 330 |
+
def info(self):
|
| 331 |
+
return self._info
|
| 332 |
+
|
| 333 |
+
def charset(self):
|
| 334 |
+
return self._charset
|
| 335 |
+
|
| 336 |
+
def output_charset(self):
|
| 337 |
+
import warnings
|
| 338 |
+
warnings.warn('output_charset() is deprecated',
|
| 339 |
+
DeprecationWarning, 2)
|
| 340 |
+
return self._output_charset
|
| 341 |
+
|
| 342 |
+
def set_output_charset(self, charset):
|
| 343 |
+
import warnings
|
| 344 |
+
warnings.warn('set_output_charset() is deprecated',
|
| 345 |
+
DeprecationWarning, 2)
|
| 346 |
+
self._output_charset = charset
|
| 347 |
+
|
| 348 |
+
def install(self, names=None):
|
| 349 |
+
import builtins
|
| 350 |
+
builtins.__dict__['_'] = self.gettext
|
| 351 |
+
if names is not None:
|
| 352 |
+
allowed = {'gettext', 'lgettext', 'lngettext',
|
| 353 |
+
'ngettext', 'npgettext', 'pgettext'}
|
| 354 |
+
for name in allowed & set(names):
|
| 355 |
+
builtins.__dict__[name] = getattr(self, name)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
class GNUTranslations(NullTranslations):
|
| 359 |
+
# Magic number of .mo files
|
| 360 |
+
LE_MAGIC = 0x950412de
|
| 361 |
+
BE_MAGIC = 0xde120495
|
| 362 |
+
|
| 363 |
+
# The encoding of a msgctxt and a msgid in a .mo file is
|
| 364 |
+
# msgctxt + "\x04" + msgid (gettext version >= 0.15)
|
| 365 |
+
CONTEXT = "%s\x04%s"
|
| 366 |
+
|
| 367 |
+
# Acceptable .mo versions
|
| 368 |
+
VERSIONS = (0, 1)
|
| 369 |
+
|
| 370 |
+
def _get_versions(self, version):
|
| 371 |
+
"""Returns a tuple of major version, minor version"""
|
| 372 |
+
return (version >> 16, version & 0xffff)
|
| 373 |
+
|
| 374 |
+
def _parse(self, fp):
|
| 375 |
+
"""Override this method to support alternative .mo formats."""
|
| 376 |
+
# Delay struct import for speeding up gettext import when .mo files
|
| 377 |
+
# are not used.
|
| 378 |
+
from struct import unpack
|
| 379 |
+
filename = getattr(fp, 'name', '')
|
| 380 |
+
# Parse the .mo file header, which consists of 5 little endian 32
|
| 381 |
+
# bit words.
|
| 382 |
+
self._catalog = catalog = {}
|
| 383 |
+
self.plural = lambda n: int(n != 1) # germanic plural by default
|
| 384 |
+
buf = fp.read()
|
| 385 |
+
buflen = len(buf)
|
| 386 |
+
# Are we big endian or little endian?
|
| 387 |
+
magic = unpack('<I', buf[:4])[0]
|
| 388 |
+
if magic == self.LE_MAGIC:
|
| 389 |
+
version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
|
| 390 |
+
ii = '<II'
|
| 391 |
+
elif magic == self.BE_MAGIC:
|
| 392 |
+
version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
|
| 393 |
+
ii = '>II'
|
| 394 |
+
else:
|
| 395 |
+
raise OSError(0, 'Bad magic number', filename)
|
| 396 |
+
|
| 397 |
+
major_version, minor_version = self._get_versions(version)
|
| 398 |
+
|
| 399 |
+
if major_version not in self.VERSIONS:
|
| 400 |
+
raise OSError(0, 'Bad version number ' + str(major_version), filename)
|
| 401 |
+
|
| 402 |
+
# Now put all messages from the .mo file buffer into the catalog
|
| 403 |
+
# dictionary.
|
| 404 |
+
for i in range(0, msgcount):
|
| 405 |
+
mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
|
| 406 |
+
mend = moff + mlen
|
| 407 |
+
tlen, toff = unpack(ii, buf[transidx:transidx+8])
|
| 408 |
+
tend = toff + tlen
|
| 409 |
+
if mend < buflen and tend < buflen:
|
| 410 |
+
msg = buf[moff:mend]
|
| 411 |
+
tmsg = buf[toff:tend]
|
| 412 |
+
else:
|
| 413 |
+
raise OSError(0, 'File is corrupt', filename)
|
| 414 |
+
# See if we're looking at GNU .mo conventions for metadata
|
| 415 |
+
if mlen == 0:
|
| 416 |
+
# Catalog description
|
| 417 |
+
lastk = None
|
| 418 |
+
for b_item in tmsg.split(b'\n'):
|
| 419 |
+
item = b_item.decode().strip()
|
| 420 |
+
if not item:
|
| 421 |
+
continue
|
| 422 |
+
# Skip over comment lines:
|
| 423 |
+
if item.startswith('#-#-#-#-#') and item.endswith('#-#-#-#-#'):
|
| 424 |
+
continue
|
| 425 |
+
k = v = None
|
| 426 |
+
if ':' in item:
|
| 427 |
+
k, v = item.split(':', 1)
|
| 428 |
+
k = k.strip().lower()
|
| 429 |
+
v = v.strip()
|
| 430 |
+
self._info[k] = v
|
| 431 |
+
lastk = k
|
| 432 |
+
elif lastk:
|
| 433 |
+
self._info[lastk] += '\n' + item
|
| 434 |
+
if k == 'content-type':
|
| 435 |
+
self._charset = v.split('charset=')[1]
|
| 436 |
+
elif k == 'plural-forms':
|
| 437 |
+
v = v.split(';')
|
| 438 |
+
plural = v[1].split('plural=')[1]
|
| 439 |
+
self.plural = c2py(plural)
|
| 440 |
+
# Note: we unconditionally convert both msgids and msgstrs to
|
| 441 |
+
# Unicode using the character encoding specified in the charset
|
| 442 |
+
# parameter of the Content-Type header. The gettext documentation
|
| 443 |
+
# strongly encourages msgids to be us-ascii, but some applications
|
| 444 |
+
# require alternative encodings (e.g. Zope's ZCML and ZPT). For
|
| 445 |
+
# traditional gettext applications, the msgid conversion will
|
| 446 |
+
# cause no problems since us-ascii should always be a subset of
|
| 447 |
+
# the charset encoding. We may want to fall back to 8-bit msgids
|
| 448 |
+
# if the Unicode conversion fails.
|
| 449 |
+
charset = self._charset or 'ascii'
|
| 450 |
+
if b'\x00' in msg:
|
| 451 |
+
# Plural forms
|
| 452 |
+
msgid1, msgid2 = msg.split(b'\x00')
|
| 453 |
+
tmsg = tmsg.split(b'\x00')
|
| 454 |
+
msgid1 = str(msgid1, charset)
|
| 455 |
+
for i, x in enumerate(tmsg):
|
| 456 |
+
catalog[(msgid1, i)] = str(x, charset)
|
| 457 |
+
else:
|
| 458 |
+
catalog[str(msg, charset)] = str(tmsg, charset)
|
| 459 |
+
# advance to next entry in the seek tables
|
| 460 |
+
masteridx += 8
|
| 461 |
+
transidx += 8
|
| 462 |
+
|
| 463 |
+
def lgettext(self, message):
|
| 464 |
+
import warnings
|
| 465 |
+
warnings.warn('lgettext() is deprecated, use gettext() instead',
|
| 466 |
+
DeprecationWarning, 2)
|
| 467 |
+
import locale
|
| 468 |
+
missing = object()
|
| 469 |
+
tmsg = self._catalog.get(message, missing)
|
| 470 |
+
if tmsg is missing:
|
| 471 |
+
if self._fallback:
|
| 472 |
+
return self._fallback.lgettext(message)
|
| 473 |
+
tmsg = message
|
| 474 |
+
if self._output_charset:
|
| 475 |
+
return tmsg.encode(self._output_charset)
|
| 476 |
+
return tmsg.encode(locale.getpreferredencoding())
|
| 477 |
+
|
| 478 |
+
def lngettext(self, msgid1, msgid2, n):
|
| 479 |
+
import warnings
|
| 480 |
+
warnings.warn('lngettext() is deprecated, use ngettext() instead',
|
| 481 |
+
DeprecationWarning, 2)
|
| 482 |
+
import locale
|
| 483 |
+
try:
|
| 484 |
+
tmsg = self._catalog[(msgid1, self.plural(n))]
|
| 485 |
+
except KeyError:
|
| 486 |
+
if self._fallback:
|
| 487 |
+
return self._fallback.lngettext(msgid1, msgid2, n)
|
| 488 |
+
if n == 1:
|
| 489 |
+
tmsg = msgid1
|
| 490 |
+
else:
|
| 491 |
+
tmsg = msgid2
|
| 492 |
+
if self._output_charset:
|
| 493 |
+
return tmsg.encode(self._output_charset)
|
| 494 |
+
return tmsg.encode(locale.getpreferredencoding())
|
| 495 |
+
|
| 496 |
+
def gettext(self, message):
|
| 497 |
+
missing = object()
|
| 498 |
+
tmsg = self._catalog.get(message, missing)
|
| 499 |
+
if tmsg is missing:
|
| 500 |
+
if self._fallback:
|
| 501 |
+
return self._fallback.gettext(message)
|
| 502 |
+
return message
|
| 503 |
+
return tmsg
|
| 504 |
+
|
| 505 |
+
def ngettext(self, msgid1, msgid2, n):
|
| 506 |
+
try:
|
| 507 |
+
tmsg = self._catalog[(msgid1, self.plural(n))]
|
| 508 |
+
except KeyError:
|
| 509 |
+
if self._fallback:
|
| 510 |
+
return self._fallback.ngettext(msgid1, msgid2, n)
|
| 511 |
+
if n == 1:
|
| 512 |
+
tmsg = msgid1
|
| 513 |
+
else:
|
| 514 |
+
tmsg = msgid2
|
| 515 |
+
return tmsg
|
| 516 |
+
|
| 517 |
+
def pgettext(self, context, message):
|
| 518 |
+
ctxt_msg_id = self.CONTEXT % (context, message)
|
| 519 |
+
missing = object()
|
| 520 |
+
tmsg = self._catalog.get(ctxt_msg_id, missing)
|
| 521 |
+
if tmsg is missing:
|
| 522 |
+
if self._fallback:
|
| 523 |
+
return self._fallback.pgettext(context, message)
|
| 524 |
+
return message
|
| 525 |
+
return tmsg
|
| 526 |
+
|
| 527 |
+
def npgettext(self, context, msgid1, msgid2, n):
|
| 528 |
+
ctxt_msg_id = self.CONTEXT % (context, msgid1)
|
| 529 |
+
try:
|
| 530 |
+
tmsg = self._catalog[ctxt_msg_id, self.plural(n)]
|
| 531 |
+
except KeyError:
|
| 532 |
+
if self._fallback:
|
| 533 |
+
return self._fallback.npgettext(context, msgid1, msgid2, n)
|
| 534 |
+
if n == 1:
|
| 535 |
+
tmsg = msgid1
|
| 536 |
+
else:
|
| 537 |
+
tmsg = msgid2
|
| 538 |
+
return tmsg
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
# Locate a .mo file using the gettext strategy
|
| 542 |
+
def find(domain, localedir=None, languages=None, all=False):
|
| 543 |
+
# Get some reasonable defaults for arguments that were not supplied
|
| 544 |
+
if localedir is None:
|
| 545 |
+
localedir = _default_localedir
|
| 546 |
+
if languages is None:
|
| 547 |
+
languages = []
|
| 548 |
+
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
|
| 549 |
+
val = os.environ.get(envar)
|
| 550 |
+
if val:
|
| 551 |
+
languages = val.split(':')
|
| 552 |
+
break
|
| 553 |
+
if 'C' not in languages:
|
| 554 |
+
languages.append('C')
|
| 555 |
+
# now normalize and expand the languages
|
| 556 |
+
nelangs = []
|
| 557 |
+
for lang in languages:
|
| 558 |
+
for nelang in _expand_lang(lang):
|
| 559 |
+
if nelang not in nelangs:
|
| 560 |
+
nelangs.append(nelang)
|
| 561 |
+
# select a language
|
| 562 |
+
if all:
|
| 563 |
+
result = []
|
| 564 |
+
else:
|
| 565 |
+
result = None
|
| 566 |
+
for lang in nelangs:
|
| 567 |
+
if lang == 'C':
|
| 568 |
+
break
|
| 569 |
+
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
|
| 570 |
+
if os.path.exists(mofile):
|
| 571 |
+
if all:
|
| 572 |
+
result.append(mofile)
|
| 573 |
+
else:
|
| 574 |
+
return mofile
|
| 575 |
+
return result
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
# a mapping between absolute .mo file path and Translation object
|
| 580 |
+
_translations = {}
|
| 581 |
+
_unspecified = ['unspecified']
|
| 582 |
+
|
| 583 |
+
def translation(domain, localedir=None, languages=None,
|
| 584 |
+
class_=None, fallback=False, codeset=_unspecified):
|
| 585 |
+
if class_ is None:
|
| 586 |
+
class_ = GNUTranslations
|
| 587 |
+
mofiles = find(domain, localedir, languages, all=True)
|
| 588 |
+
if not mofiles:
|
| 589 |
+
if fallback:
|
| 590 |
+
return NullTranslations()
|
| 591 |
+
from errno import ENOENT
|
| 592 |
+
raise FileNotFoundError(ENOENT,
|
| 593 |
+
'No translation file found for domain', domain)
|
| 594 |
+
# Avoid opening, reading, and parsing the .mo file after it's been done
|
| 595 |
+
# once.
|
| 596 |
+
result = None
|
| 597 |
+
for mofile in mofiles:
|
| 598 |
+
key = (class_, os.path.abspath(mofile))
|
| 599 |
+
t = _translations.get(key)
|
| 600 |
+
if t is None:
|
| 601 |
+
with open(mofile, 'rb') as fp:
|
| 602 |
+
t = _translations.setdefault(key, class_(fp))
|
| 603 |
+
# Copy the translation object to allow setting fallbacks and
|
| 604 |
+
# output charset. All other instance data is shared with the
|
| 605 |
+
# cached object.
|
| 606 |
+
# Delay copy import for speeding up gettext import when .mo files
|
| 607 |
+
# are not used.
|
| 608 |
+
import copy
|
| 609 |
+
t = copy.copy(t)
|
| 610 |
+
if codeset is not _unspecified:
|
| 611 |
+
import warnings
|
| 612 |
+
warnings.warn('parameter codeset is deprecated',
|
| 613 |
+
DeprecationWarning, 2)
|
| 614 |
+
if codeset:
|
| 615 |
+
with warnings.catch_warnings():
|
| 616 |
+
warnings.filterwarnings('ignore', r'.*\bset_output_charset\b.*',
|
| 617 |
+
DeprecationWarning)
|
| 618 |
+
t.set_output_charset(codeset)
|
| 619 |
+
if result is None:
|
| 620 |
+
result = t
|
| 621 |
+
else:
|
| 622 |
+
result.add_fallback(t)
|
| 623 |
+
return result
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
def install(domain, localedir=None, codeset=_unspecified, names=None):
|
| 627 |
+
t = translation(domain, localedir, fallback=True, codeset=codeset)
|
| 628 |
+
t.install(names)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
# a mapping b/w domains and locale directories
|
| 633 |
+
_localedirs = {}
|
| 634 |
+
# a mapping b/w domains and codesets
|
| 635 |
+
_localecodesets = {}
|
| 636 |
+
# current global domain, `messages' used for compatibility w/ GNU gettext
|
| 637 |
+
_current_domain = 'messages'
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def textdomain(domain=None):
|
| 641 |
+
global _current_domain
|
| 642 |
+
if domain is not None:
|
| 643 |
+
_current_domain = domain
|
| 644 |
+
return _current_domain
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
def bindtextdomain(domain, localedir=None):
|
| 648 |
+
global _localedirs
|
| 649 |
+
if localedir is not None:
|
| 650 |
+
_localedirs[domain] = localedir
|
| 651 |
+
return _localedirs.get(domain, _default_localedir)
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def bind_textdomain_codeset(domain, codeset=None):
|
| 655 |
+
import warnings
|
| 656 |
+
warnings.warn('bind_textdomain_codeset() is deprecated',
|
| 657 |
+
DeprecationWarning, 2)
|
| 658 |
+
global _localecodesets
|
| 659 |
+
if codeset is not None:
|
| 660 |
+
_localecodesets[domain] = codeset
|
| 661 |
+
return _localecodesets.get(domain)
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
def dgettext(domain, message):
|
| 665 |
+
try:
|
| 666 |
+
t = translation(domain, _localedirs.get(domain, None))
|
| 667 |
+
except OSError:
|
| 668 |
+
return message
|
| 669 |
+
return t.gettext(message)
|
| 670 |
+
|
| 671 |
+
def ldgettext(domain, message):
|
| 672 |
+
import warnings
|
| 673 |
+
warnings.warn('ldgettext() is deprecated, use dgettext() instead',
|
| 674 |
+
DeprecationWarning, 2)
|
| 675 |
+
import locale
|
| 676 |
+
codeset = _localecodesets.get(domain)
|
| 677 |
+
try:
|
| 678 |
+
with warnings.catch_warnings():
|
| 679 |
+
warnings.filterwarnings('ignore', r'.*\bparameter codeset\b.*',
|
| 680 |
+
DeprecationWarning)
|
| 681 |
+
t = translation(domain, _localedirs.get(domain, None), codeset=codeset)
|
| 682 |
+
except OSError:
|
| 683 |
+
return message.encode(codeset or locale.getpreferredencoding())
|
| 684 |
+
with warnings.catch_warnings():
|
| 685 |
+
warnings.filterwarnings('ignore', r'.*\blgettext\b.*',
|
| 686 |
+
DeprecationWarning)
|
| 687 |
+
return t.lgettext(message)
|
| 688 |
+
|
| 689 |
+
def dngettext(domain, msgid1, msgid2, n):
|
| 690 |
+
try:
|
| 691 |
+
t = translation(domain, _localedirs.get(domain, None))
|
| 692 |
+
except OSError:
|
| 693 |
+
if n == 1:
|
| 694 |
+
return msgid1
|
| 695 |
+
else:
|
| 696 |
+
return msgid2
|
| 697 |
+
return t.ngettext(msgid1, msgid2, n)
|
| 698 |
+
|
| 699 |
+
def ldngettext(domain, msgid1, msgid2, n):
|
| 700 |
+
import warnings
|
| 701 |
+
warnings.warn('ldngettext() is deprecated, use dngettext() instead',
|
| 702 |
+
DeprecationWarning, 2)
|
| 703 |
+
import locale
|
| 704 |
+
codeset = _localecodesets.get(domain)
|
| 705 |
+
try:
|
| 706 |
+
with warnings.catch_warnings():
|
| 707 |
+
warnings.filterwarnings('ignore', r'.*\bparameter codeset\b.*',
|
| 708 |
+
DeprecationWarning)
|
| 709 |
+
t = translation(domain, _localedirs.get(domain, None), codeset=codeset)
|
| 710 |
+
except OSError:
|
| 711 |
+
if n == 1:
|
| 712 |
+
tmsg = msgid1
|
| 713 |
+
else:
|
| 714 |
+
tmsg = msgid2
|
| 715 |
+
return tmsg.encode(codeset or locale.getpreferredencoding())
|
| 716 |
+
with warnings.catch_warnings():
|
| 717 |
+
warnings.filterwarnings('ignore', r'.*\blngettext\b.*',
|
| 718 |
+
DeprecationWarning)
|
| 719 |
+
return t.lngettext(msgid1, msgid2, n)
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
def dpgettext(domain, context, message):
|
| 723 |
+
try:
|
| 724 |
+
t = translation(domain, _localedirs.get(domain, None))
|
| 725 |
+
except OSError:
|
| 726 |
+
return message
|
| 727 |
+
return t.pgettext(context, message)
|
| 728 |
+
|
| 729 |
+
|
| 730 |
+
def dnpgettext(domain, context, msgid1, msgid2, n):
|
| 731 |
+
try:
|
| 732 |
+
t = translation(domain, _localedirs.get(domain, None))
|
| 733 |
+
except OSError:
|
| 734 |
+
if n == 1:
|
| 735 |
+
return msgid1
|
| 736 |
+
else:
|
| 737 |
+
return msgid2
|
| 738 |
+
return t.npgettext(context, msgid1, msgid2, n)
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
def gettext(message):
|
| 742 |
+
return dgettext(_current_domain, message)
|
| 743 |
+
|
| 744 |
+
def lgettext(message):
|
| 745 |
+
import warnings
|
| 746 |
+
warnings.warn('lgettext() is deprecated, use gettext() instead',
|
| 747 |
+
DeprecationWarning, 2)
|
| 748 |
+
with warnings.catch_warnings():
|
| 749 |
+
warnings.filterwarnings('ignore', r'.*\bldgettext\b.*',
|
| 750 |
+
DeprecationWarning)
|
| 751 |
+
return ldgettext(_current_domain, message)
|
| 752 |
+
|
| 753 |
+
def ngettext(msgid1, msgid2, n):
|
| 754 |
+
return dngettext(_current_domain, msgid1, msgid2, n)
|
| 755 |
+
|
| 756 |
+
def lngettext(msgid1, msgid2, n):
|
| 757 |
+
import warnings
|
| 758 |
+
warnings.warn('lngettext() is deprecated, use ngettext() instead',
|
| 759 |
+
DeprecationWarning, 2)
|
| 760 |
+
with warnings.catch_warnings():
|
| 761 |
+
warnings.filterwarnings('ignore', r'.*\bldngettext\b.*',
|
| 762 |
+
DeprecationWarning)
|
| 763 |
+
return ldngettext(_current_domain, msgid1, msgid2, n)
|
| 764 |
+
|
| 765 |
+
|
| 766 |
+
def pgettext(context, message):
|
| 767 |
+
return dpgettext(_current_domain, context, message)
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
def npgettext(context, msgid1, msgid2, n):
|
| 771 |
+
return dnpgettext(_current_domain, context, msgid1, msgid2, n)
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
# dcgettext() has been deemed unnecessary and is not implemented.
|
| 775 |
+
|
| 776 |
+
# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
|
| 777 |
+
# was:
|
| 778 |
+
#
|
| 779 |
+
# import gettext
|
| 780 |
+
# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
|
| 781 |
+
# _ = cat.gettext
|
| 782 |
+
# print _('Hello World')
|
| 783 |
+
|
| 784 |
+
# The resulting catalog object currently don't support access through a
|
| 785 |
+
# dictionary API, which was supported (but apparently unused) in GNOME
|
| 786 |
+
# gettext.
|
| 787 |
+
|
| 788 |
+
Catalog = translation
|
llava/lib/python3.10/hmac.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""HMAC (Keyed-Hashing for Message Authentication) module.
|
| 2 |
+
|
| 3 |
+
Implements the HMAC algorithm as described by RFC 2104.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import warnings as _warnings
|
| 7 |
+
try:
|
| 8 |
+
import _hashlib as _hashopenssl
|
| 9 |
+
except ImportError:
|
| 10 |
+
_hashopenssl = None
|
| 11 |
+
_functype = None
|
| 12 |
+
from _operator import _compare_digest as compare_digest
|
| 13 |
+
else:
|
| 14 |
+
compare_digest = _hashopenssl.compare_digest
|
| 15 |
+
_functype = type(_hashopenssl.openssl_sha256) # builtin type
|
| 16 |
+
|
| 17 |
+
import hashlib as _hashlib
|
| 18 |
+
|
| 19 |
+
trans_5C = bytes((x ^ 0x5C) for x in range(256))
|
| 20 |
+
trans_36 = bytes((x ^ 0x36) for x in range(256))
|
| 21 |
+
|
| 22 |
+
# The size of the digests returned by HMAC depends on the underlying
|
| 23 |
+
# hashing module used. Use digest_size from the instance of HMAC instead.
|
| 24 |
+
digest_size = None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class HMAC:
|
| 28 |
+
"""RFC 2104 HMAC class. Also complies with RFC 4231.
|
| 29 |
+
|
| 30 |
+
This supports the API for Cryptographic Hash Functions (PEP 247).
|
| 31 |
+
"""
|
| 32 |
+
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
|
| 33 |
+
|
| 34 |
+
__slots__ = (
|
| 35 |
+
"_hmac", "_inner", "_outer", "block_size", "digest_size"
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
def __init__(self, key, msg=None, digestmod=''):
|
| 39 |
+
"""Create a new HMAC object.
|
| 40 |
+
|
| 41 |
+
key: bytes or buffer, key for the keyed hash object.
|
| 42 |
+
msg: bytes or buffer, Initial input for the hash or None.
|
| 43 |
+
digestmod: A hash name suitable for hashlib.new(). *OR*
|
| 44 |
+
A hashlib constructor returning a new hash object. *OR*
|
| 45 |
+
A module supporting PEP 247.
|
| 46 |
+
|
| 47 |
+
Required as of 3.8, despite its position after the optional
|
| 48 |
+
msg argument. Passing it as a keyword argument is
|
| 49 |
+
recommended, though not required for legacy API reasons.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
if not isinstance(key, (bytes, bytearray)):
|
| 53 |
+
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
|
| 54 |
+
|
| 55 |
+
if not digestmod:
|
| 56 |
+
raise TypeError("Missing required parameter 'digestmod'.")
|
| 57 |
+
|
| 58 |
+
if _hashopenssl and isinstance(digestmod, (str, _functype)):
|
| 59 |
+
try:
|
| 60 |
+
self._init_hmac(key, msg, digestmod)
|
| 61 |
+
except _hashopenssl.UnsupportedDigestmodError:
|
| 62 |
+
self._init_old(key, msg, digestmod)
|
| 63 |
+
else:
|
| 64 |
+
self._init_old(key, msg, digestmod)
|
| 65 |
+
|
| 66 |
+
def _init_hmac(self, key, msg, digestmod):
|
| 67 |
+
self._hmac = _hashopenssl.hmac_new(key, msg, digestmod=digestmod)
|
| 68 |
+
self.digest_size = self._hmac.digest_size
|
| 69 |
+
self.block_size = self._hmac.block_size
|
| 70 |
+
|
| 71 |
+
def _init_old(self, key, msg, digestmod):
|
| 72 |
+
if callable(digestmod):
|
| 73 |
+
digest_cons = digestmod
|
| 74 |
+
elif isinstance(digestmod, str):
|
| 75 |
+
digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
|
| 76 |
+
else:
|
| 77 |
+
digest_cons = lambda d=b'': digestmod.new(d)
|
| 78 |
+
|
| 79 |
+
self._hmac = None
|
| 80 |
+
self._outer = digest_cons()
|
| 81 |
+
self._inner = digest_cons()
|
| 82 |
+
self.digest_size = self._inner.digest_size
|
| 83 |
+
|
| 84 |
+
if hasattr(self._inner, 'block_size'):
|
| 85 |
+
blocksize = self._inner.block_size
|
| 86 |
+
if blocksize < 16:
|
| 87 |
+
_warnings.warn('block_size of %d seems too small; using our '
|
| 88 |
+
'default of %d.' % (blocksize, self.blocksize),
|
| 89 |
+
RuntimeWarning, 2)
|
| 90 |
+
blocksize = self.blocksize
|
| 91 |
+
else:
|
| 92 |
+
_warnings.warn('No block_size attribute on given digest object; '
|
| 93 |
+
'Assuming %d.' % (self.blocksize),
|
| 94 |
+
RuntimeWarning, 2)
|
| 95 |
+
blocksize = self.blocksize
|
| 96 |
+
|
| 97 |
+
if len(key) > blocksize:
|
| 98 |
+
key = digest_cons(key).digest()
|
| 99 |
+
|
| 100 |
+
# self.blocksize is the default blocksize. self.block_size is
|
| 101 |
+
# effective block size as well as the public API attribute.
|
| 102 |
+
self.block_size = blocksize
|
| 103 |
+
|
| 104 |
+
key = key.ljust(blocksize, b'\0')
|
| 105 |
+
self._outer.update(key.translate(trans_5C))
|
| 106 |
+
self._inner.update(key.translate(trans_36))
|
| 107 |
+
if msg is not None:
|
| 108 |
+
self.update(msg)
|
| 109 |
+
|
| 110 |
+
@property
|
| 111 |
+
def name(self):
|
| 112 |
+
if self._hmac:
|
| 113 |
+
return self._hmac.name
|
| 114 |
+
else:
|
| 115 |
+
return f"hmac-{self._inner.name}"
|
| 116 |
+
|
| 117 |
+
def update(self, msg):
|
| 118 |
+
"""Feed data from msg into this hashing object."""
|
| 119 |
+
inst = self._hmac or self._inner
|
| 120 |
+
inst.update(msg)
|
| 121 |
+
|
| 122 |
+
def copy(self):
|
| 123 |
+
"""Return a separate copy of this hashing object.
|
| 124 |
+
|
| 125 |
+
An update to this copy won't affect the original object.
|
| 126 |
+
"""
|
| 127 |
+
# Call __new__ directly to avoid the expensive __init__.
|
| 128 |
+
other = self.__class__.__new__(self.__class__)
|
| 129 |
+
other.digest_size = self.digest_size
|
| 130 |
+
if self._hmac:
|
| 131 |
+
other._hmac = self._hmac.copy()
|
| 132 |
+
other._inner = other._outer = None
|
| 133 |
+
else:
|
| 134 |
+
other._hmac = None
|
| 135 |
+
other._inner = self._inner.copy()
|
| 136 |
+
other._outer = self._outer.copy()
|
| 137 |
+
return other
|
| 138 |
+
|
| 139 |
+
def _current(self):
|
| 140 |
+
"""Return a hash object for the current state.
|
| 141 |
+
|
| 142 |
+
To be used only internally with digest() and hexdigest().
|
| 143 |
+
"""
|
| 144 |
+
if self._hmac:
|
| 145 |
+
return self._hmac
|
| 146 |
+
else:
|
| 147 |
+
h = self._outer.copy()
|
| 148 |
+
h.update(self._inner.digest())
|
| 149 |
+
return h
|
| 150 |
+
|
| 151 |
+
def digest(self):
|
| 152 |
+
"""Return the hash value of this hashing object.
|
| 153 |
+
|
| 154 |
+
This returns the hmac value as bytes. The object is
|
| 155 |
+
not altered in any way by this function; you can continue
|
| 156 |
+
updating the object after calling this function.
|
| 157 |
+
"""
|
| 158 |
+
h = self._current()
|
| 159 |
+
return h.digest()
|
| 160 |
+
|
| 161 |
+
def hexdigest(self):
|
| 162 |
+
"""Like digest(), but returns a string of hexadecimal digits instead.
|
| 163 |
+
"""
|
| 164 |
+
h = self._current()
|
| 165 |
+
return h.hexdigest()
|
| 166 |
+
|
| 167 |
+
def new(key, msg=None, digestmod=''):
|
| 168 |
+
"""Create a new hashing object and return it.
|
| 169 |
+
|
| 170 |
+
key: bytes or buffer, The starting key for the hash.
|
| 171 |
+
msg: bytes or buffer, Initial input for the hash, or None.
|
| 172 |
+
digestmod: A hash name suitable for hashlib.new(). *OR*
|
| 173 |
+
A hashlib constructor returning a new hash object. *OR*
|
| 174 |
+
A module supporting PEP 247.
|
| 175 |
+
|
| 176 |
+
Required as of 3.8, despite its position after the optional
|
| 177 |
+
msg argument. Passing it as a keyword argument is
|
| 178 |
+
recommended, though not required for legacy API reasons.
|
| 179 |
+
|
| 180 |
+
You can now feed arbitrary bytes into the object using its update()
|
| 181 |
+
method, and can ask for the hash value at any time by calling its digest()
|
| 182 |
+
or hexdigest() methods.
|
| 183 |
+
"""
|
| 184 |
+
return HMAC(key, msg, digestmod)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def digest(key, msg, digest):
|
| 188 |
+
"""Fast inline implementation of HMAC.
|
| 189 |
+
|
| 190 |
+
key: bytes or buffer, The key for the keyed hash object.
|
| 191 |
+
msg: bytes or buffer, Input message.
|
| 192 |
+
digest: A hash name suitable for hashlib.new() for best performance. *OR*
|
| 193 |
+
A hashlib constructor returning a new hash object. *OR*
|
| 194 |
+
A module supporting PEP 247.
|
| 195 |
+
"""
|
| 196 |
+
if _hashopenssl is not None and isinstance(digest, (str, _functype)):
|
| 197 |
+
try:
|
| 198 |
+
return _hashopenssl.hmac_digest(key, msg, digest)
|
| 199 |
+
except _hashopenssl.UnsupportedDigestmodError:
|
| 200 |
+
pass
|
| 201 |
+
|
| 202 |
+
if callable(digest):
|
| 203 |
+
digest_cons = digest
|
| 204 |
+
elif isinstance(digest, str):
|
| 205 |
+
digest_cons = lambda d=b'': _hashlib.new(digest, d)
|
| 206 |
+
else:
|
| 207 |
+
digest_cons = lambda d=b'': digest.new(d)
|
| 208 |
+
|
| 209 |
+
inner = digest_cons()
|
| 210 |
+
outer = digest_cons()
|
| 211 |
+
blocksize = getattr(inner, 'block_size', 64)
|
| 212 |
+
if len(key) > blocksize:
|
| 213 |
+
key = digest_cons(key).digest()
|
| 214 |
+
key = key + b'\x00' * (blocksize - len(key))
|
| 215 |
+
inner.update(key.translate(trans_36))
|
| 216 |
+
outer.update(key.translate(trans_5C))
|
| 217 |
+
inner.update(msg)
|
| 218 |
+
outer.update(inner.digest())
|
| 219 |
+
return outer.digest()
|
llava/lib/python3.10/netrc.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""An object-oriented interface to .netrc files."""
|
| 2 |
+
|
| 3 |
+
# Module and documentation by Eric S. Raymond, 21 Dec 1998
|
| 4 |
+
|
| 5 |
+
import os, shlex, stat
|
| 6 |
+
|
| 7 |
+
__all__ = ["netrc", "NetrcParseError"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class NetrcParseError(Exception):
|
| 11 |
+
"""Exception raised on syntax errors in the .netrc file."""
|
| 12 |
+
def __init__(self, msg, filename=None, lineno=None):
|
| 13 |
+
self.filename = filename
|
| 14 |
+
self.lineno = lineno
|
| 15 |
+
self.msg = msg
|
| 16 |
+
Exception.__init__(self, msg)
|
| 17 |
+
|
| 18 |
+
def __str__(self):
|
| 19 |
+
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class netrc:
|
| 23 |
+
def __init__(self, file=None):
|
| 24 |
+
default_netrc = file is None
|
| 25 |
+
if file is None:
|
| 26 |
+
file = os.path.join(os.path.expanduser("~"), ".netrc")
|
| 27 |
+
self.hosts = {}
|
| 28 |
+
self.macros = {}
|
| 29 |
+
try:
|
| 30 |
+
with open(file, encoding="utf-8") as fp:
|
| 31 |
+
self._parse(file, fp, default_netrc)
|
| 32 |
+
except UnicodeDecodeError:
|
| 33 |
+
with open(file, encoding="locale") as fp:
|
| 34 |
+
self._parse(file, fp, default_netrc)
|
| 35 |
+
|
| 36 |
+
def _parse(self, file, fp, default_netrc):
|
| 37 |
+
lexer = shlex.shlex(fp)
|
| 38 |
+
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
|
| 39 |
+
lexer.commenters = lexer.commenters.replace('#', '')
|
| 40 |
+
while 1:
|
| 41 |
+
# Look for a machine, default, or macdef top-level keyword
|
| 42 |
+
saved_lineno = lexer.lineno
|
| 43 |
+
toplevel = tt = lexer.get_token()
|
| 44 |
+
if not tt:
|
| 45 |
+
break
|
| 46 |
+
elif tt[0] == '#':
|
| 47 |
+
if lexer.lineno == saved_lineno and len(tt) == 1:
|
| 48 |
+
lexer.instream.readline()
|
| 49 |
+
continue
|
| 50 |
+
elif tt == 'machine':
|
| 51 |
+
entryname = lexer.get_token()
|
| 52 |
+
elif tt == 'default':
|
| 53 |
+
entryname = 'default'
|
| 54 |
+
elif tt == 'macdef': # Just skip to end of macdefs
|
| 55 |
+
entryname = lexer.get_token()
|
| 56 |
+
self.macros[entryname] = []
|
| 57 |
+
lexer.whitespace = ' \t'
|
| 58 |
+
while 1:
|
| 59 |
+
line = lexer.instream.readline()
|
| 60 |
+
if not line or line == '\012':
|
| 61 |
+
lexer.whitespace = ' \t\r\n'
|
| 62 |
+
break
|
| 63 |
+
self.macros[entryname].append(line)
|
| 64 |
+
continue
|
| 65 |
+
else:
|
| 66 |
+
raise NetrcParseError(
|
| 67 |
+
"bad toplevel token %r" % tt, file, lexer.lineno)
|
| 68 |
+
|
| 69 |
+
# We're looking at start of an entry for a named machine or default.
|
| 70 |
+
login = ''
|
| 71 |
+
account = password = None
|
| 72 |
+
self.hosts[entryname] = {}
|
| 73 |
+
while 1:
|
| 74 |
+
tt = lexer.get_token()
|
| 75 |
+
if (tt.startswith('#') or
|
| 76 |
+
tt in {'', 'machine', 'default', 'macdef'}):
|
| 77 |
+
if password:
|
| 78 |
+
self.hosts[entryname] = (login, account, password)
|
| 79 |
+
lexer.push_token(tt)
|
| 80 |
+
break
|
| 81 |
+
else:
|
| 82 |
+
raise NetrcParseError(
|
| 83 |
+
"malformed %s entry %s terminated by %s"
|
| 84 |
+
% (toplevel, entryname, repr(tt)),
|
| 85 |
+
file, lexer.lineno)
|
| 86 |
+
elif tt == 'login' or tt == 'user':
|
| 87 |
+
login = lexer.get_token()
|
| 88 |
+
elif tt == 'account':
|
| 89 |
+
account = lexer.get_token()
|
| 90 |
+
elif tt == 'password':
|
| 91 |
+
if os.name == 'posix' and default_netrc:
|
| 92 |
+
prop = os.fstat(fp.fileno())
|
| 93 |
+
if prop.st_uid != os.getuid():
|
| 94 |
+
import pwd
|
| 95 |
+
try:
|
| 96 |
+
fowner = pwd.getpwuid(prop.st_uid)[0]
|
| 97 |
+
except KeyError:
|
| 98 |
+
fowner = 'uid %s' % prop.st_uid
|
| 99 |
+
try:
|
| 100 |
+
user = pwd.getpwuid(os.getuid())[0]
|
| 101 |
+
except KeyError:
|
| 102 |
+
user = 'uid %s' % os.getuid()
|
| 103 |
+
raise NetrcParseError(
|
| 104 |
+
("~/.netrc file owner (%s) does not match"
|
| 105 |
+
" current user (%s)") % (fowner, user),
|
| 106 |
+
file, lexer.lineno)
|
| 107 |
+
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
|
| 108 |
+
raise NetrcParseError(
|
| 109 |
+
"~/.netrc access too permissive: access"
|
| 110 |
+
" permissions must restrict access to only"
|
| 111 |
+
" the owner", file, lexer.lineno)
|
| 112 |
+
password = lexer.get_token()
|
| 113 |
+
else:
|
| 114 |
+
raise NetrcParseError("bad follower token %r" % tt,
|
| 115 |
+
file, lexer.lineno)
|
| 116 |
+
|
| 117 |
+
def authenticators(self, host):
|
| 118 |
+
"""Return a (user, account, password) tuple for given host."""
|
| 119 |
+
if host in self.hosts:
|
| 120 |
+
return self.hosts[host]
|
| 121 |
+
elif 'default' in self.hosts:
|
| 122 |
+
return self.hosts['default']
|
| 123 |
+
else:
|
| 124 |
+
return None
|
| 125 |
+
|
| 126 |
+
def __repr__(self):
|
| 127 |
+
"""Dump the class data in the format of a .netrc file."""
|
| 128 |
+
rep = ""
|
| 129 |
+
for host in self.hosts.keys():
|
| 130 |
+
attrs = self.hosts[host]
|
| 131 |
+
rep += f"machine {host}\n\tlogin {attrs[0]}\n"
|
| 132 |
+
if attrs[1]:
|
| 133 |
+
rep += f"\taccount {attrs[1]}\n"
|
| 134 |
+
rep += f"\tpassword {attrs[2]}\n"
|
| 135 |
+
for macro in self.macros.keys():
|
| 136 |
+
rep += f"macdef {macro}\n"
|
| 137 |
+
for line in self.macros[macro]:
|
| 138 |
+
rep += line
|
| 139 |
+
rep += "\n"
|
| 140 |
+
return rep
|
| 141 |
+
|
| 142 |
+
if __name__ == '__main__':
|
| 143 |
+
print(netrc())
|
llava/lib/python3.10/ntpath.py
ADDED
|
@@ -0,0 +1,838 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
|
| 2 |
+
"""Common pathname manipulations, WindowsNT/95 version.
|
| 3 |
+
|
| 4 |
+
Instead of importing this module directly, import os and refer to this
|
| 5 |
+
module as os.path.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
# strings representing various path-related bits and pieces
|
| 9 |
+
# These are primarily for export; internally, they are hardcoded.
|
| 10 |
+
# Should be set before imports for resolving cyclic dependency.
|
| 11 |
+
curdir = '.'
|
| 12 |
+
pardir = '..'
|
| 13 |
+
extsep = '.'
|
| 14 |
+
sep = '\\'
|
| 15 |
+
pathsep = ';'
|
| 16 |
+
altsep = '/'
|
| 17 |
+
defpath = '.;C:\\bin'
|
| 18 |
+
devnull = 'nul'
|
| 19 |
+
|
| 20 |
+
import os
|
| 21 |
+
import sys
|
| 22 |
+
import stat
|
| 23 |
+
import genericpath
|
| 24 |
+
from genericpath import *
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
|
| 28 |
+
"basename","dirname","commonprefix","getsize","getmtime",
|
| 29 |
+
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
|
| 30 |
+
"ismount", "expanduser","expandvars","normpath","abspath",
|
| 31 |
+
"curdir","pardir","sep","pathsep","defpath","altsep",
|
| 32 |
+
"extsep","devnull","realpath","supports_unicode_filenames","relpath",
|
| 33 |
+
"samefile", "sameopenfile", "samestat", "commonpath"]
|
| 34 |
+
|
| 35 |
+
def _get_bothseps(path):
|
| 36 |
+
if isinstance(path, bytes):
|
| 37 |
+
return b'\\/'
|
| 38 |
+
else:
|
| 39 |
+
return '\\/'
|
| 40 |
+
|
| 41 |
+
# Normalize the case of a pathname and map slashes to backslashes.
|
| 42 |
+
# Other normalizations (such as optimizing '../' away) are not done
|
| 43 |
+
# (this is done by normpath).
|
| 44 |
+
|
| 45 |
+
try:
|
| 46 |
+
from _winapi import (
|
| 47 |
+
LCMapStringEx as _LCMapStringEx,
|
| 48 |
+
LOCALE_NAME_INVARIANT as _LOCALE_NAME_INVARIANT,
|
| 49 |
+
LCMAP_LOWERCASE as _LCMAP_LOWERCASE)
|
| 50 |
+
|
| 51 |
+
def normcase(s):
|
| 52 |
+
"""Normalize case of pathname.
|
| 53 |
+
|
| 54 |
+
Makes all characters lowercase and all slashes into backslashes.
|
| 55 |
+
"""
|
| 56 |
+
s = os.fspath(s)
|
| 57 |
+
if not s:
|
| 58 |
+
return s
|
| 59 |
+
if isinstance(s, bytes):
|
| 60 |
+
encoding = sys.getfilesystemencoding()
|
| 61 |
+
s = s.decode(encoding, 'surrogateescape').replace('/', '\\')
|
| 62 |
+
s = _LCMapStringEx(_LOCALE_NAME_INVARIANT,
|
| 63 |
+
_LCMAP_LOWERCASE, s)
|
| 64 |
+
return s.encode(encoding, 'surrogateescape')
|
| 65 |
+
else:
|
| 66 |
+
return _LCMapStringEx(_LOCALE_NAME_INVARIANT,
|
| 67 |
+
_LCMAP_LOWERCASE,
|
| 68 |
+
s.replace('/', '\\'))
|
| 69 |
+
except ImportError:
|
| 70 |
+
def normcase(s):
|
| 71 |
+
"""Normalize case of pathname.
|
| 72 |
+
|
| 73 |
+
Makes all characters lowercase and all slashes into backslashes.
|
| 74 |
+
"""
|
| 75 |
+
s = os.fspath(s)
|
| 76 |
+
if isinstance(s, bytes):
|
| 77 |
+
return os.fsencode(os.fsdecode(s).replace('/', '\\').lower())
|
| 78 |
+
return s.replace('/', '\\').lower()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# Return whether a path is absolute.
|
| 82 |
+
# Trivial in Posix, harder on Windows.
|
| 83 |
+
# For Windows it is absolute if it starts with a slash or backslash (current
|
| 84 |
+
# volume), or if a pathname after the volume-letter-and-colon or UNC-resource
|
| 85 |
+
# starts with a slash or backslash.
|
| 86 |
+
|
| 87 |
+
def isabs(s):
|
| 88 |
+
"""Test whether a path is absolute"""
|
| 89 |
+
s = os.fspath(s)
|
| 90 |
+
# Paths beginning with \\?\ are always absolute, but do not
|
| 91 |
+
# necessarily contain a drive.
|
| 92 |
+
if isinstance(s, bytes):
|
| 93 |
+
if s.replace(b'/', b'\\').startswith(b'\\\\?\\'):
|
| 94 |
+
return True
|
| 95 |
+
else:
|
| 96 |
+
if s.replace('/', '\\').startswith('\\\\?\\'):
|
| 97 |
+
return True
|
| 98 |
+
s = splitdrive(s)[1]
|
| 99 |
+
return len(s) > 0 and s[0] in _get_bothseps(s)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
# Join two (or more) paths.
|
| 103 |
+
def join(path, *paths):
|
| 104 |
+
path = os.fspath(path)
|
| 105 |
+
if isinstance(path, bytes):
|
| 106 |
+
sep = b'\\'
|
| 107 |
+
seps = b'\\/'
|
| 108 |
+
colon = b':'
|
| 109 |
+
else:
|
| 110 |
+
sep = '\\'
|
| 111 |
+
seps = '\\/'
|
| 112 |
+
colon = ':'
|
| 113 |
+
try:
|
| 114 |
+
if not paths:
|
| 115 |
+
path[:0] + sep #23780: Ensure compatible data type even if p is null.
|
| 116 |
+
result_drive, result_path = splitdrive(path)
|
| 117 |
+
for p in map(os.fspath, paths):
|
| 118 |
+
p_drive, p_path = splitdrive(p)
|
| 119 |
+
if p_path and p_path[0] in seps:
|
| 120 |
+
# Second path is absolute
|
| 121 |
+
if p_drive or not result_drive:
|
| 122 |
+
result_drive = p_drive
|
| 123 |
+
result_path = p_path
|
| 124 |
+
continue
|
| 125 |
+
elif p_drive and p_drive != result_drive:
|
| 126 |
+
if p_drive.lower() != result_drive.lower():
|
| 127 |
+
# Different drives => ignore the first path entirely
|
| 128 |
+
result_drive = p_drive
|
| 129 |
+
result_path = p_path
|
| 130 |
+
continue
|
| 131 |
+
# Same drive in different case
|
| 132 |
+
result_drive = p_drive
|
| 133 |
+
# Second path is relative to the first
|
| 134 |
+
if result_path and result_path[-1] not in seps:
|
| 135 |
+
result_path = result_path + sep
|
| 136 |
+
result_path = result_path + p_path
|
| 137 |
+
## add separator between UNC and non-absolute path
|
| 138 |
+
if (result_path and result_path[0] not in seps and
|
| 139 |
+
result_drive and result_drive[-1:] != colon):
|
| 140 |
+
return result_drive + sep + result_path
|
| 141 |
+
return result_drive + result_path
|
| 142 |
+
except (TypeError, AttributeError, BytesWarning):
|
| 143 |
+
genericpath._check_arg_types('join', path, *paths)
|
| 144 |
+
raise
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# Split a path in a drive specification (a drive letter followed by a
|
| 148 |
+
# colon) and the path specification.
|
| 149 |
+
# It is always true that drivespec + pathspec == p
|
| 150 |
+
def splitdrive(p):
|
| 151 |
+
"""Split a pathname into drive/UNC sharepoint and relative path specifiers.
|
| 152 |
+
Returns a 2-tuple (drive_or_unc, path); either part may be empty.
|
| 153 |
+
|
| 154 |
+
If you assign
|
| 155 |
+
result = splitdrive(p)
|
| 156 |
+
It is always true that:
|
| 157 |
+
result[0] + result[1] == p
|
| 158 |
+
|
| 159 |
+
If the path contained a drive letter, drive_or_unc will contain everything
|
| 160 |
+
up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir")
|
| 161 |
+
|
| 162 |
+
If the path contained a UNC path, the drive_or_unc will contain the host name
|
| 163 |
+
and share up to but not including the fourth directory separator character.
|
| 164 |
+
e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir")
|
| 165 |
+
|
| 166 |
+
Paths cannot contain both a drive letter and a UNC path.
|
| 167 |
+
|
| 168 |
+
"""
|
| 169 |
+
p = os.fspath(p)
|
| 170 |
+
if len(p) >= 2:
|
| 171 |
+
if isinstance(p, bytes):
|
| 172 |
+
sep = b'\\'
|
| 173 |
+
altsep = b'/'
|
| 174 |
+
colon = b':'
|
| 175 |
+
else:
|
| 176 |
+
sep = '\\'
|
| 177 |
+
altsep = '/'
|
| 178 |
+
colon = ':'
|
| 179 |
+
normp = p.replace(altsep, sep)
|
| 180 |
+
if (normp[0:2] == sep*2) and (normp[2:3] != sep):
|
| 181 |
+
# is a UNC path:
|
| 182 |
+
# vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
|
| 183 |
+
# \\machine\mountpoint\directory\etc\...
|
| 184 |
+
# directory ^^^^^^^^^^^^^^^
|
| 185 |
+
index = normp.find(sep, 2)
|
| 186 |
+
if index == -1:
|
| 187 |
+
return p[:0], p
|
| 188 |
+
index2 = normp.find(sep, index + 1)
|
| 189 |
+
# a UNC path can't have two slashes in a row
|
| 190 |
+
# (after the initial two)
|
| 191 |
+
if index2 == index + 1:
|
| 192 |
+
return p[:0], p
|
| 193 |
+
if index2 == -1:
|
| 194 |
+
index2 = len(p)
|
| 195 |
+
return p[:index2], p[index2:]
|
| 196 |
+
if normp[1:2] == colon:
|
| 197 |
+
return p[:2], p[2:]
|
| 198 |
+
return p[:0], p
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# Split a path in head (everything up to the last '/') and tail (the
|
| 202 |
+
# rest). After the trailing '/' is stripped, the invariant
|
| 203 |
+
# join(head, tail) == p holds.
|
| 204 |
+
# The resulting head won't end in '/' unless it is the root.
|
| 205 |
+
|
| 206 |
+
def split(p):
|
| 207 |
+
"""Split a pathname.
|
| 208 |
+
|
| 209 |
+
Return tuple (head, tail) where tail is everything after the final slash.
|
| 210 |
+
Either part may be empty."""
|
| 211 |
+
p = os.fspath(p)
|
| 212 |
+
seps = _get_bothseps(p)
|
| 213 |
+
d, p = splitdrive(p)
|
| 214 |
+
# set i to index beyond p's last slash
|
| 215 |
+
i = len(p)
|
| 216 |
+
while i and p[i-1] not in seps:
|
| 217 |
+
i -= 1
|
| 218 |
+
head, tail = p[:i], p[i:] # now tail has no slashes
|
| 219 |
+
# remove trailing slashes from head, unless it's all slashes
|
| 220 |
+
head = head.rstrip(seps) or head
|
| 221 |
+
return d + head, tail
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
# Split a path in root and extension.
|
| 225 |
+
# The extension is everything starting at the last dot in the last
|
| 226 |
+
# pathname component; the root is everything before that.
|
| 227 |
+
# It is always true that root + ext == p.
|
| 228 |
+
|
| 229 |
+
def splitext(p):
|
| 230 |
+
p = os.fspath(p)
|
| 231 |
+
if isinstance(p, bytes):
|
| 232 |
+
return genericpath._splitext(p, b'\\', b'/', b'.')
|
| 233 |
+
else:
|
| 234 |
+
return genericpath._splitext(p, '\\', '/', '.')
|
| 235 |
+
splitext.__doc__ = genericpath._splitext.__doc__
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
# Return the tail (basename) part of a path.
|
| 239 |
+
|
| 240 |
+
def basename(p):
|
| 241 |
+
"""Returns the final component of a pathname"""
|
| 242 |
+
return split(p)[1]
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
# Return the head (dirname) part of a path.
|
| 246 |
+
|
| 247 |
+
def dirname(p):
|
| 248 |
+
"""Returns the directory component of a pathname"""
|
| 249 |
+
return split(p)[0]
|
| 250 |
+
|
| 251 |
+
# Is a path a symbolic link?
|
| 252 |
+
# This will always return false on systems where os.lstat doesn't exist.
|
| 253 |
+
|
| 254 |
+
def islink(path):
|
| 255 |
+
"""Test whether a path is a symbolic link.
|
| 256 |
+
This will always return false for Windows prior to 6.0.
|
| 257 |
+
"""
|
| 258 |
+
try:
|
| 259 |
+
st = os.lstat(path)
|
| 260 |
+
except (OSError, ValueError, AttributeError):
|
| 261 |
+
return False
|
| 262 |
+
return stat.S_ISLNK(st.st_mode)
|
| 263 |
+
|
| 264 |
+
# Being true for dangling symbolic links is also useful.
|
| 265 |
+
|
| 266 |
+
def lexists(path):
|
| 267 |
+
"""Test whether a path exists. Returns True for broken symbolic links"""
|
| 268 |
+
try:
|
| 269 |
+
st = os.lstat(path)
|
| 270 |
+
except (OSError, ValueError):
|
| 271 |
+
return False
|
| 272 |
+
return True
|
| 273 |
+
|
| 274 |
+
# Is a path a mount point?
|
| 275 |
+
# Any drive letter root (eg c:\)
|
| 276 |
+
# Any share UNC (eg \\server\share)
|
| 277 |
+
# Any volume mounted on a filesystem folder
|
| 278 |
+
#
|
| 279 |
+
# No one method detects all three situations. Historically we've lexically
|
| 280 |
+
# detected drive letter roots and share UNCs. The canonical approach to
|
| 281 |
+
# detecting mounted volumes (querying the reparse tag) fails for the most
|
| 282 |
+
# common case: drive letter roots. The alternative which uses GetVolumePathName
|
| 283 |
+
# fails if the drive letter is the result of a SUBST.
|
| 284 |
+
try:
|
| 285 |
+
from nt import _getvolumepathname
|
| 286 |
+
except ImportError:
|
| 287 |
+
_getvolumepathname = None
|
| 288 |
+
def ismount(path):
|
| 289 |
+
"""Test whether a path is a mount point (a drive root, the root of a
|
| 290 |
+
share, or a mounted volume)"""
|
| 291 |
+
path = os.fspath(path)
|
| 292 |
+
seps = _get_bothseps(path)
|
| 293 |
+
path = abspath(path)
|
| 294 |
+
root, rest = splitdrive(path)
|
| 295 |
+
if root and root[0] in seps:
|
| 296 |
+
return (not rest) or (rest in seps)
|
| 297 |
+
if rest in seps:
|
| 298 |
+
return True
|
| 299 |
+
|
| 300 |
+
if _getvolumepathname:
|
| 301 |
+
return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps)
|
| 302 |
+
else:
|
| 303 |
+
return False
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
# Expand paths beginning with '~' or '~user'.
|
| 307 |
+
# '~' means $HOME; '~user' means that user's home directory.
|
| 308 |
+
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
|
| 309 |
+
# the path is returned unchanged (leaving error reporting to whatever
|
| 310 |
+
# function is called with the expanded path as argument).
|
| 311 |
+
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
|
| 312 |
+
# (A function should also be defined to do full *sh-style environment
|
| 313 |
+
# variable expansion.)
|
| 314 |
+
|
| 315 |
+
def expanduser(path):
|
| 316 |
+
"""Expand ~ and ~user constructs.
|
| 317 |
+
|
| 318 |
+
If user or $HOME is unknown, do nothing."""
|
| 319 |
+
path = os.fspath(path)
|
| 320 |
+
if isinstance(path, bytes):
|
| 321 |
+
tilde = b'~'
|
| 322 |
+
else:
|
| 323 |
+
tilde = '~'
|
| 324 |
+
if not path.startswith(tilde):
|
| 325 |
+
return path
|
| 326 |
+
i, n = 1, len(path)
|
| 327 |
+
while i < n and path[i] not in _get_bothseps(path):
|
| 328 |
+
i += 1
|
| 329 |
+
|
| 330 |
+
if 'USERPROFILE' in os.environ:
|
| 331 |
+
userhome = os.environ['USERPROFILE']
|
| 332 |
+
elif not 'HOMEPATH' in os.environ:
|
| 333 |
+
return path
|
| 334 |
+
else:
|
| 335 |
+
try:
|
| 336 |
+
drive = os.environ['HOMEDRIVE']
|
| 337 |
+
except KeyError:
|
| 338 |
+
drive = ''
|
| 339 |
+
userhome = join(drive, os.environ['HOMEPATH'])
|
| 340 |
+
|
| 341 |
+
if i != 1: #~user
|
| 342 |
+
target_user = path[1:i]
|
| 343 |
+
if isinstance(target_user, bytes):
|
| 344 |
+
target_user = os.fsdecode(target_user)
|
| 345 |
+
current_user = os.environ.get('USERNAME')
|
| 346 |
+
|
| 347 |
+
if target_user != current_user:
|
| 348 |
+
# Try to guess user home directory. By default all user
|
| 349 |
+
# profile directories are located in the same place and are
|
| 350 |
+
# named by corresponding usernames. If userhome isn't a
|
| 351 |
+
# normal profile directory, this guess is likely wrong,
|
| 352 |
+
# so we bail out.
|
| 353 |
+
if current_user != basename(userhome):
|
| 354 |
+
return path
|
| 355 |
+
userhome = join(dirname(userhome), target_user)
|
| 356 |
+
|
| 357 |
+
if isinstance(path, bytes):
|
| 358 |
+
userhome = os.fsencode(userhome)
|
| 359 |
+
|
| 360 |
+
return userhome + path[i:]
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
# Expand paths containing shell variable substitutions.
|
| 364 |
+
# The following rules apply:
|
| 365 |
+
# - no expansion within single quotes
|
| 366 |
+
# - '$$' is translated into '$'
|
| 367 |
+
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
|
| 368 |
+
# - ${varname} is accepted.
|
| 369 |
+
# - $varname is accepted.
|
| 370 |
+
# - %varname% is accepted.
|
| 371 |
+
# - varnames can be made out of letters, digits and the characters '_-'
|
| 372 |
+
# (though is not verified in the ${varname} and %varname% cases)
|
| 373 |
+
# XXX With COMMAND.COM you can use any characters in a variable name,
|
| 374 |
+
# XXX except '^|<>='.
|
| 375 |
+
|
| 376 |
+
def expandvars(path):
|
| 377 |
+
"""Expand shell variables of the forms $var, ${var} and %var%.
|
| 378 |
+
|
| 379 |
+
Unknown variables are left unchanged."""
|
| 380 |
+
path = os.fspath(path)
|
| 381 |
+
if isinstance(path, bytes):
|
| 382 |
+
if b'$' not in path and b'%' not in path:
|
| 383 |
+
return path
|
| 384 |
+
import string
|
| 385 |
+
varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii')
|
| 386 |
+
quote = b'\''
|
| 387 |
+
percent = b'%'
|
| 388 |
+
brace = b'{'
|
| 389 |
+
rbrace = b'}'
|
| 390 |
+
dollar = b'$'
|
| 391 |
+
environ = getattr(os, 'environb', None)
|
| 392 |
+
else:
|
| 393 |
+
if '$' not in path and '%' not in path:
|
| 394 |
+
return path
|
| 395 |
+
import string
|
| 396 |
+
varchars = string.ascii_letters + string.digits + '_-'
|
| 397 |
+
quote = '\''
|
| 398 |
+
percent = '%'
|
| 399 |
+
brace = '{'
|
| 400 |
+
rbrace = '}'
|
| 401 |
+
dollar = '$'
|
| 402 |
+
environ = os.environ
|
| 403 |
+
res = path[:0]
|
| 404 |
+
index = 0
|
| 405 |
+
pathlen = len(path)
|
| 406 |
+
while index < pathlen:
|
| 407 |
+
c = path[index:index+1]
|
| 408 |
+
if c == quote: # no expansion within single quotes
|
| 409 |
+
path = path[index + 1:]
|
| 410 |
+
pathlen = len(path)
|
| 411 |
+
try:
|
| 412 |
+
index = path.index(c)
|
| 413 |
+
res += c + path[:index + 1]
|
| 414 |
+
except ValueError:
|
| 415 |
+
res += c + path
|
| 416 |
+
index = pathlen - 1
|
| 417 |
+
elif c == percent: # variable or '%'
|
| 418 |
+
if path[index + 1:index + 2] == percent:
|
| 419 |
+
res += c
|
| 420 |
+
index += 1
|
| 421 |
+
else:
|
| 422 |
+
path = path[index+1:]
|
| 423 |
+
pathlen = len(path)
|
| 424 |
+
try:
|
| 425 |
+
index = path.index(percent)
|
| 426 |
+
except ValueError:
|
| 427 |
+
res += percent + path
|
| 428 |
+
index = pathlen - 1
|
| 429 |
+
else:
|
| 430 |
+
var = path[:index]
|
| 431 |
+
try:
|
| 432 |
+
if environ is None:
|
| 433 |
+
value = os.fsencode(os.environ[os.fsdecode(var)])
|
| 434 |
+
else:
|
| 435 |
+
value = environ[var]
|
| 436 |
+
except KeyError:
|
| 437 |
+
value = percent + var + percent
|
| 438 |
+
res += value
|
| 439 |
+
elif c == dollar: # variable or '$$'
|
| 440 |
+
if path[index + 1:index + 2] == dollar:
|
| 441 |
+
res += c
|
| 442 |
+
index += 1
|
| 443 |
+
elif path[index + 1:index + 2] == brace:
|
| 444 |
+
path = path[index+2:]
|
| 445 |
+
pathlen = len(path)
|
| 446 |
+
try:
|
| 447 |
+
index = path.index(rbrace)
|
| 448 |
+
except ValueError:
|
| 449 |
+
res += dollar + brace + path
|
| 450 |
+
index = pathlen - 1
|
| 451 |
+
else:
|
| 452 |
+
var = path[:index]
|
| 453 |
+
try:
|
| 454 |
+
if environ is None:
|
| 455 |
+
value = os.fsencode(os.environ[os.fsdecode(var)])
|
| 456 |
+
else:
|
| 457 |
+
value = environ[var]
|
| 458 |
+
except KeyError:
|
| 459 |
+
value = dollar + brace + var + rbrace
|
| 460 |
+
res += value
|
| 461 |
+
else:
|
| 462 |
+
var = path[:0]
|
| 463 |
+
index += 1
|
| 464 |
+
c = path[index:index + 1]
|
| 465 |
+
while c and c in varchars:
|
| 466 |
+
var += c
|
| 467 |
+
index += 1
|
| 468 |
+
c = path[index:index + 1]
|
| 469 |
+
try:
|
| 470 |
+
if environ is None:
|
| 471 |
+
value = os.fsencode(os.environ[os.fsdecode(var)])
|
| 472 |
+
else:
|
| 473 |
+
value = environ[var]
|
| 474 |
+
except KeyError:
|
| 475 |
+
value = dollar + var
|
| 476 |
+
res += value
|
| 477 |
+
if c:
|
| 478 |
+
index -= 1
|
| 479 |
+
else:
|
| 480 |
+
res += c
|
| 481 |
+
index += 1
|
| 482 |
+
return res
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
|
| 486 |
+
# Previously, this function also truncated pathnames to 8+3 format,
|
| 487 |
+
# but as this module is called "ntpath", that's obviously wrong!
|
| 488 |
+
|
| 489 |
+
def normpath(path):
|
| 490 |
+
"""Normalize path, eliminating double slashes, etc."""
|
| 491 |
+
path = os.fspath(path)
|
| 492 |
+
if isinstance(path, bytes):
|
| 493 |
+
sep = b'\\'
|
| 494 |
+
altsep = b'/'
|
| 495 |
+
curdir = b'.'
|
| 496 |
+
pardir = b'..'
|
| 497 |
+
special_prefixes = (b'\\\\.\\', b'\\\\?\\')
|
| 498 |
+
else:
|
| 499 |
+
sep = '\\'
|
| 500 |
+
altsep = '/'
|
| 501 |
+
curdir = '.'
|
| 502 |
+
pardir = '..'
|
| 503 |
+
special_prefixes = ('\\\\.\\', '\\\\?\\')
|
| 504 |
+
if path.startswith(special_prefixes):
|
| 505 |
+
# in the case of paths with these prefixes:
|
| 506 |
+
# \\.\ -> device names
|
| 507 |
+
# \\?\ -> literal paths
|
| 508 |
+
# do not do any normalization, but return the path
|
| 509 |
+
# unchanged apart from the call to os.fspath()
|
| 510 |
+
return path
|
| 511 |
+
path = path.replace(altsep, sep)
|
| 512 |
+
prefix, path = splitdrive(path)
|
| 513 |
+
|
| 514 |
+
# collapse initial backslashes
|
| 515 |
+
if path.startswith(sep):
|
| 516 |
+
prefix += sep
|
| 517 |
+
path = path.lstrip(sep)
|
| 518 |
+
|
| 519 |
+
comps = path.split(sep)
|
| 520 |
+
i = 0
|
| 521 |
+
while i < len(comps):
|
| 522 |
+
if not comps[i] or comps[i] == curdir:
|
| 523 |
+
del comps[i]
|
| 524 |
+
elif comps[i] == pardir:
|
| 525 |
+
if i > 0 and comps[i-1] != pardir:
|
| 526 |
+
del comps[i-1:i+1]
|
| 527 |
+
i -= 1
|
| 528 |
+
elif i == 0 and prefix.endswith(sep):
|
| 529 |
+
del comps[i]
|
| 530 |
+
else:
|
| 531 |
+
i += 1
|
| 532 |
+
else:
|
| 533 |
+
i += 1
|
| 534 |
+
# If the path is now empty, substitute '.'
|
| 535 |
+
if not prefix and not comps:
|
| 536 |
+
comps.append(curdir)
|
| 537 |
+
return prefix + sep.join(comps)
|
| 538 |
+
|
| 539 |
+
def _abspath_fallback(path):
|
| 540 |
+
"""Return the absolute version of a path as a fallback function in case
|
| 541 |
+
`nt._getfullpathname` is not available or raises OSError. See bpo-31047 for
|
| 542 |
+
more.
|
| 543 |
+
|
| 544 |
+
"""
|
| 545 |
+
|
| 546 |
+
path = os.fspath(path)
|
| 547 |
+
if not isabs(path):
|
| 548 |
+
if isinstance(path, bytes):
|
| 549 |
+
cwd = os.getcwdb()
|
| 550 |
+
else:
|
| 551 |
+
cwd = os.getcwd()
|
| 552 |
+
path = join(cwd, path)
|
| 553 |
+
return normpath(path)
|
| 554 |
+
|
| 555 |
+
# Return an absolute path.
|
| 556 |
+
try:
|
| 557 |
+
from nt import _getfullpathname
|
| 558 |
+
|
| 559 |
+
except ImportError: # not running on Windows - mock up something sensible
|
| 560 |
+
abspath = _abspath_fallback
|
| 561 |
+
|
| 562 |
+
else: # use native Windows method on Windows
|
| 563 |
+
def abspath(path):
|
| 564 |
+
"""Return the absolute version of a path."""
|
| 565 |
+
try:
|
| 566 |
+
return normpath(_getfullpathname(path))
|
| 567 |
+
except (OSError, ValueError):
|
| 568 |
+
return _abspath_fallback(path)
|
| 569 |
+
|
| 570 |
+
try:
|
| 571 |
+
from nt import _getfinalpathname, readlink as _nt_readlink
|
| 572 |
+
except ImportError:
|
| 573 |
+
# realpath is a no-op on systems without _getfinalpathname support.
|
| 574 |
+
realpath = abspath
|
| 575 |
+
else:
|
| 576 |
+
def _readlink_deep(path):
|
| 577 |
+
# These error codes indicate that we should stop reading links and
|
| 578 |
+
# return the path we currently have.
|
| 579 |
+
# 1: ERROR_INVALID_FUNCTION
|
| 580 |
+
# 2: ERROR_FILE_NOT_FOUND
|
| 581 |
+
# 3: ERROR_DIRECTORY_NOT_FOUND
|
| 582 |
+
# 5: ERROR_ACCESS_DENIED
|
| 583 |
+
# 21: ERROR_NOT_READY (implies drive with no media)
|
| 584 |
+
# 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file)
|
| 585 |
+
# 50: ERROR_NOT_SUPPORTED (implies no support for reparse points)
|
| 586 |
+
# 67: ERROR_BAD_NET_NAME (implies remote server unavailable)
|
| 587 |
+
# 87: ERROR_INVALID_PARAMETER
|
| 588 |
+
# 4390: ERROR_NOT_A_REPARSE_POINT
|
| 589 |
+
# 4392: ERROR_INVALID_REPARSE_DATA
|
| 590 |
+
# 4393: ERROR_REPARSE_TAG_INVALID
|
| 591 |
+
allowed_winerror = 1, 2, 3, 5, 21, 32, 50, 67, 87, 4390, 4392, 4393
|
| 592 |
+
|
| 593 |
+
seen = set()
|
| 594 |
+
while normcase(path) not in seen:
|
| 595 |
+
seen.add(normcase(path))
|
| 596 |
+
try:
|
| 597 |
+
old_path = path
|
| 598 |
+
path = _nt_readlink(path)
|
| 599 |
+
# Links may be relative, so resolve them against their
|
| 600 |
+
# own location
|
| 601 |
+
if not isabs(path):
|
| 602 |
+
# If it's something other than a symlink, we don't know
|
| 603 |
+
# what it's actually going to be resolved against, so
|
| 604 |
+
# just return the old path.
|
| 605 |
+
if not islink(old_path):
|
| 606 |
+
path = old_path
|
| 607 |
+
break
|
| 608 |
+
path = normpath(join(dirname(old_path), path))
|
| 609 |
+
except OSError as ex:
|
| 610 |
+
if ex.winerror in allowed_winerror:
|
| 611 |
+
break
|
| 612 |
+
raise
|
| 613 |
+
except ValueError:
|
| 614 |
+
# Stop on reparse points that are not symlinks
|
| 615 |
+
break
|
| 616 |
+
return path
|
| 617 |
+
|
| 618 |
+
def _getfinalpathname_nonstrict(path):
|
| 619 |
+
# These error codes indicate that we should stop resolving the path
|
| 620 |
+
# and return the value we currently have.
|
| 621 |
+
# 1: ERROR_INVALID_FUNCTION
|
| 622 |
+
# 2: ERROR_FILE_NOT_FOUND
|
| 623 |
+
# 3: ERROR_DIRECTORY_NOT_FOUND
|
| 624 |
+
# 5: ERROR_ACCESS_DENIED
|
| 625 |
+
# 21: ERROR_NOT_READY (implies drive with no media)
|
| 626 |
+
# 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file)
|
| 627 |
+
# 50: ERROR_NOT_SUPPORTED
|
| 628 |
+
# 53: ERROR_BAD_NETPATH
|
| 629 |
+
# 65: ERROR_NETWORK_ACCESS_DENIED
|
| 630 |
+
# 67: ERROR_BAD_NET_NAME (implies remote server unavailable)
|
| 631 |
+
# 87: ERROR_INVALID_PARAMETER
|
| 632 |
+
# 123: ERROR_INVALID_NAME
|
| 633 |
+
# 161: ERROR_BAD_PATHNAME
|
| 634 |
+
# 1920: ERROR_CANT_ACCESS_FILE
|
| 635 |
+
# 1921: ERROR_CANT_RESOLVE_FILENAME (implies unfollowable symlink)
|
| 636 |
+
allowed_winerror = 1, 2, 3, 5, 21, 32, 50, 53, 65, 67, 87, 123, 161, 1920, 1921
|
| 637 |
+
|
| 638 |
+
# Non-strict algorithm is to find as much of the target directory
|
| 639 |
+
# as we can and join the rest.
|
| 640 |
+
tail = ''
|
| 641 |
+
while path:
|
| 642 |
+
try:
|
| 643 |
+
path = _getfinalpathname(path)
|
| 644 |
+
return join(path, tail) if tail else path
|
| 645 |
+
except OSError as ex:
|
| 646 |
+
if ex.winerror not in allowed_winerror:
|
| 647 |
+
raise
|
| 648 |
+
try:
|
| 649 |
+
# The OS could not resolve this path fully, so we attempt
|
| 650 |
+
# to follow the link ourselves. If we succeed, join the tail
|
| 651 |
+
# and return.
|
| 652 |
+
new_path = _readlink_deep(path)
|
| 653 |
+
if new_path != path:
|
| 654 |
+
return join(new_path, tail) if tail else new_path
|
| 655 |
+
except OSError:
|
| 656 |
+
# If we fail to readlink(), let's keep traversing
|
| 657 |
+
pass
|
| 658 |
+
path, name = split(path)
|
| 659 |
+
# TODO (bpo-38186): Request the real file name from the directory
|
| 660 |
+
# entry using FindFirstFileW. For now, we will return the path
|
| 661 |
+
# as best we have it
|
| 662 |
+
if path and not name:
|
| 663 |
+
return path + tail
|
| 664 |
+
tail = join(name, tail) if tail else name
|
| 665 |
+
return tail
|
| 666 |
+
|
| 667 |
+
def realpath(path, *, strict=False):
|
| 668 |
+
path = normpath(path)
|
| 669 |
+
if isinstance(path, bytes):
|
| 670 |
+
prefix = b'\\\\?\\'
|
| 671 |
+
unc_prefix = b'\\\\?\\UNC\\'
|
| 672 |
+
new_unc_prefix = b'\\\\'
|
| 673 |
+
cwd = os.getcwdb()
|
| 674 |
+
# bpo-38081: Special case for realpath(b'nul')
|
| 675 |
+
if normcase(path) == normcase(os.fsencode(devnull)):
|
| 676 |
+
return b'\\\\.\\NUL'
|
| 677 |
+
else:
|
| 678 |
+
prefix = '\\\\?\\'
|
| 679 |
+
unc_prefix = '\\\\?\\UNC\\'
|
| 680 |
+
new_unc_prefix = '\\\\'
|
| 681 |
+
cwd = os.getcwd()
|
| 682 |
+
# bpo-38081: Special case for realpath('nul')
|
| 683 |
+
if normcase(path) == normcase(devnull):
|
| 684 |
+
return '\\\\.\\NUL'
|
| 685 |
+
had_prefix = path.startswith(prefix)
|
| 686 |
+
if not had_prefix and not isabs(path):
|
| 687 |
+
path = join(cwd, path)
|
| 688 |
+
try:
|
| 689 |
+
path = _getfinalpathname(path)
|
| 690 |
+
initial_winerror = 0
|
| 691 |
+
except OSError as ex:
|
| 692 |
+
if strict:
|
| 693 |
+
raise
|
| 694 |
+
initial_winerror = ex.winerror
|
| 695 |
+
path = _getfinalpathname_nonstrict(path)
|
| 696 |
+
# The path returned by _getfinalpathname will always start with \\?\ -
|
| 697 |
+
# strip off that prefix unless it was already provided on the original
|
| 698 |
+
# path.
|
| 699 |
+
if not had_prefix and path.startswith(prefix):
|
| 700 |
+
# For UNC paths, the prefix will actually be \\?\UNC\
|
| 701 |
+
# Handle that case as well.
|
| 702 |
+
if path.startswith(unc_prefix):
|
| 703 |
+
spath = new_unc_prefix + path[len(unc_prefix):]
|
| 704 |
+
else:
|
| 705 |
+
spath = path[len(prefix):]
|
| 706 |
+
# Ensure that the non-prefixed path resolves to the same path
|
| 707 |
+
try:
|
| 708 |
+
if _getfinalpathname(spath) == path:
|
| 709 |
+
path = spath
|
| 710 |
+
except OSError as ex:
|
| 711 |
+
# If the path does not exist and originally did not exist, then
|
| 712 |
+
# strip the prefix anyway.
|
| 713 |
+
if ex.winerror == initial_winerror:
|
| 714 |
+
path = spath
|
| 715 |
+
return path
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
# Win9x family and earlier have no Unicode filename support.
|
| 719 |
+
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
|
| 720 |
+
sys.getwindowsversion()[3] >= 2)
|
| 721 |
+
|
| 722 |
+
def relpath(path, start=None):
|
| 723 |
+
"""Return a relative version of a path"""
|
| 724 |
+
path = os.fspath(path)
|
| 725 |
+
if isinstance(path, bytes):
|
| 726 |
+
sep = b'\\'
|
| 727 |
+
curdir = b'.'
|
| 728 |
+
pardir = b'..'
|
| 729 |
+
else:
|
| 730 |
+
sep = '\\'
|
| 731 |
+
curdir = '.'
|
| 732 |
+
pardir = '..'
|
| 733 |
+
|
| 734 |
+
if start is None:
|
| 735 |
+
start = curdir
|
| 736 |
+
|
| 737 |
+
if not path:
|
| 738 |
+
raise ValueError("no path specified")
|
| 739 |
+
|
| 740 |
+
start = os.fspath(start)
|
| 741 |
+
try:
|
| 742 |
+
start_abs = abspath(normpath(start))
|
| 743 |
+
path_abs = abspath(normpath(path))
|
| 744 |
+
start_drive, start_rest = splitdrive(start_abs)
|
| 745 |
+
path_drive, path_rest = splitdrive(path_abs)
|
| 746 |
+
if normcase(start_drive) != normcase(path_drive):
|
| 747 |
+
raise ValueError("path is on mount %r, start on mount %r" % (
|
| 748 |
+
path_drive, start_drive))
|
| 749 |
+
|
| 750 |
+
start_list = [x for x in start_rest.split(sep) if x]
|
| 751 |
+
path_list = [x for x in path_rest.split(sep) if x]
|
| 752 |
+
# Work out how much of the filepath is shared by start and path.
|
| 753 |
+
i = 0
|
| 754 |
+
for e1, e2 in zip(start_list, path_list):
|
| 755 |
+
if normcase(e1) != normcase(e2):
|
| 756 |
+
break
|
| 757 |
+
i += 1
|
| 758 |
+
|
| 759 |
+
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
|
| 760 |
+
if not rel_list:
|
| 761 |
+
return curdir
|
| 762 |
+
return join(*rel_list)
|
| 763 |
+
except (TypeError, ValueError, AttributeError, BytesWarning, DeprecationWarning):
|
| 764 |
+
genericpath._check_arg_types('relpath', path, start)
|
| 765 |
+
raise
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
# Return the longest common sub-path of the sequence of paths given as input.
|
| 769 |
+
# The function is case-insensitive and 'separator-insensitive', i.e. if the
|
| 770 |
+
# only difference between two paths is the use of '\' versus '/' as separator,
|
| 771 |
+
# they are deemed to be equal.
|
| 772 |
+
#
|
| 773 |
+
# However, the returned path will have the standard '\' separator (even if the
|
| 774 |
+
# given paths had the alternative '/' separator) and will have the case of the
|
| 775 |
+
# first path given in the sequence. Additionally, any trailing separator is
|
| 776 |
+
# stripped from the returned path.
|
| 777 |
+
|
| 778 |
+
def commonpath(paths):
|
| 779 |
+
"""Given a sequence of path names, returns the longest common sub-path."""
|
| 780 |
+
|
| 781 |
+
if not paths:
|
| 782 |
+
raise ValueError('commonpath() arg is an empty sequence')
|
| 783 |
+
|
| 784 |
+
paths = tuple(map(os.fspath, paths))
|
| 785 |
+
if isinstance(paths[0], bytes):
|
| 786 |
+
sep = b'\\'
|
| 787 |
+
altsep = b'/'
|
| 788 |
+
curdir = b'.'
|
| 789 |
+
else:
|
| 790 |
+
sep = '\\'
|
| 791 |
+
altsep = '/'
|
| 792 |
+
curdir = '.'
|
| 793 |
+
|
| 794 |
+
try:
|
| 795 |
+
drivesplits = [splitdrive(p.replace(altsep, sep).lower()) for p in paths]
|
| 796 |
+
split_paths = [p.split(sep) for d, p in drivesplits]
|
| 797 |
+
|
| 798 |
+
try:
|
| 799 |
+
isabs, = set(p[:1] == sep for d, p in drivesplits)
|
| 800 |
+
except ValueError:
|
| 801 |
+
raise ValueError("Can't mix absolute and relative paths") from None
|
| 802 |
+
|
| 803 |
+
# Check that all drive letters or UNC paths match. The check is made only
|
| 804 |
+
# now otherwise type errors for mixing strings and bytes would not be
|
| 805 |
+
# caught.
|
| 806 |
+
if len(set(d for d, p in drivesplits)) != 1:
|
| 807 |
+
raise ValueError("Paths don't have the same drive")
|
| 808 |
+
|
| 809 |
+
drive, path = splitdrive(paths[0].replace(altsep, sep))
|
| 810 |
+
common = path.split(sep)
|
| 811 |
+
common = [c for c in common if c and c != curdir]
|
| 812 |
+
|
| 813 |
+
split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
|
| 814 |
+
s1 = min(split_paths)
|
| 815 |
+
s2 = max(split_paths)
|
| 816 |
+
for i, c in enumerate(s1):
|
| 817 |
+
if c != s2[i]:
|
| 818 |
+
common = common[:i]
|
| 819 |
+
break
|
| 820 |
+
else:
|
| 821 |
+
common = common[:len(s1)]
|
| 822 |
+
|
| 823 |
+
prefix = drive + sep if isabs else drive
|
| 824 |
+
return prefix + sep.join(common)
|
| 825 |
+
except (TypeError, AttributeError):
|
| 826 |
+
genericpath._check_arg_types('commonpath', *paths)
|
| 827 |
+
raise
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
try:
|
| 831 |
+
# The genericpath.isdir implementation uses os.stat and checks the mode
|
| 832 |
+
# attribute to tell whether or not the path is a directory.
|
| 833 |
+
# This is overkill on Windows - just pass the path to GetFileAttributes
|
| 834 |
+
# and check the attribute from there.
|
| 835 |
+
from nt import _isdir as isdir
|
| 836 |
+
except ImportError:
|
| 837 |
+
# Use genericpath.isdir as imported above.
|
| 838 |
+
pass
|
llava/lib/python3.10/operator.py
ADDED
|
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Operator Interface
|
| 3 |
+
|
| 4 |
+
This module exports a set of functions corresponding to the intrinsic
|
| 5 |
+
operators of Python. For example, operator.add(x, y) is equivalent
|
| 6 |
+
to the expression x+y. The function names are those used for special
|
| 7 |
+
methods; variants without leading and trailing '__' are also provided
|
| 8 |
+
for convenience.
|
| 9 |
+
|
| 10 |
+
This is the pure Python implementation of the module.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
__all__ = ['abs', 'add', 'and_', 'attrgetter', 'concat', 'contains', 'countOf',
|
| 14 |
+
'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand',
|
| 15 |
+
'iconcat', 'ifloordiv', 'ilshift', 'imatmul', 'imod', 'imul',
|
| 16 |
+
'index', 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift',
|
| 17 |
+
'is_', 'is_not', 'isub', 'itemgetter', 'itruediv', 'ixor', 'le',
|
| 18 |
+
'length_hint', 'lshift', 'lt', 'matmul', 'methodcaller', 'mod',
|
| 19 |
+
'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift',
|
| 20 |
+
'setitem', 'sub', 'truediv', 'truth', 'xor']
|
| 21 |
+
|
| 22 |
+
from builtins import abs as _abs
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# Comparison Operations *******************************************************#
|
| 26 |
+
|
| 27 |
+
def lt(a, b):
|
| 28 |
+
"Same as a < b."
|
| 29 |
+
return a < b
|
| 30 |
+
|
| 31 |
+
def le(a, b):
|
| 32 |
+
"Same as a <= b."
|
| 33 |
+
return a <= b
|
| 34 |
+
|
| 35 |
+
def eq(a, b):
|
| 36 |
+
"Same as a == b."
|
| 37 |
+
return a == b
|
| 38 |
+
|
| 39 |
+
def ne(a, b):
|
| 40 |
+
"Same as a != b."
|
| 41 |
+
return a != b
|
| 42 |
+
|
| 43 |
+
def ge(a, b):
|
| 44 |
+
"Same as a >= b."
|
| 45 |
+
return a >= b
|
| 46 |
+
|
| 47 |
+
def gt(a, b):
|
| 48 |
+
"Same as a > b."
|
| 49 |
+
return a > b
|
| 50 |
+
|
| 51 |
+
# Logical Operations **********************************************************#
|
| 52 |
+
|
| 53 |
+
def not_(a):
|
| 54 |
+
"Same as not a."
|
| 55 |
+
return not a
|
| 56 |
+
|
| 57 |
+
def truth(a):
|
| 58 |
+
"Return True if a is true, False otherwise."
|
| 59 |
+
return True if a else False
|
| 60 |
+
|
| 61 |
+
def is_(a, b):
|
| 62 |
+
"Same as a is b."
|
| 63 |
+
return a is b
|
| 64 |
+
|
| 65 |
+
def is_not(a, b):
|
| 66 |
+
"Same as a is not b."
|
| 67 |
+
return a is not b
|
| 68 |
+
|
| 69 |
+
# Mathematical/Bitwise Operations *********************************************#
|
| 70 |
+
|
| 71 |
+
def abs(a):
|
| 72 |
+
"Same as abs(a)."
|
| 73 |
+
return _abs(a)
|
| 74 |
+
|
| 75 |
+
def add(a, b):
|
| 76 |
+
"Same as a + b."
|
| 77 |
+
return a + b
|
| 78 |
+
|
| 79 |
+
def and_(a, b):
|
| 80 |
+
"Same as a & b."
|
| 81 |
+
return a & b
|
| 82 |
+
|
| 83 |
+
def floordiv(a, b):
|
| 84 |
+
"Same as a // b."
|
| 85 |
+
return a // b
|
| 86 |
+
|
| 87 |
+
def index(a):
|
| 88 |
+
"Same as a.__index__()."
|
| 89 |
+
return a.__index__()
|
| 90 |
+
|
| 91 |
+
def inv(a):
|
| 92 |
+
"Same as ~a."
|
| 93 |
+
return ~a
|
| 94 |
+
invert = inv
|
| 95 |
+
|
| 96 |
+
def lshift(a, b):
|
| 97 |
+
"Same as a << b."
|
| 98 |
+
return a << b
|
| 99 |
+
|
| 100 |
+
def mod(a, b):
|
| 101 |
+
"Same as a % b."
|
| 102 |
+
return a % b
|
| 103 |
+
|
| 104 |
+
def mul(a, b):
|
| 105 |
+
"Same as a * b."
|
| 106 |
+
return a * b
|
| 107 |
+
|
| 108 |
+
def matmul(a, b):
|
| 109 |
+
"Same as a @ b."
|
| 110 |
+
return a @ b
|
| 111 |
+
|
| 112 |
+
def neg(a):
|
| 113 |
+
"Same as -a."
|
| 114 |
+
return -a
|
| 115 |
+
|
| 116 |
+
def or_(a, b):
|
| 117 |
+
"Same as a | b."
|
| 118 |
+
return a | b
|
| 119 |
+
|
| 120 |
+
def pos(a):
|
| 121 |
+
"Same as +a."
|
| 122 |
+
return +a
|
| 123 |
+
|
| 124 |
+
def pow(a, b):
|
| 125 |
+
"Same as a ** b."
|
| 126 |
+
return a ** b
|
| 127 |
+
|
| 128 |
+
def rshift(a, b):
|
| 129 |
+
"Same as a >> b."
|
| 130 |
+
return a >> b
|
| 131 |
+
|
| 132 |
+
def sub(a, b):
|
| 133 |
+
"Same as a - b."
|
| 134 |
+
return a - b
|
| 135 |
+
|
| 136 |
+
def truediv(a, b):
|
| 137 |
+
"Same as a / b."
|
| 138 |
+
return a / b
|
| 139 |
+
|
| 140 |
+
def xor(a, b):
|
| 141 |
+
"Same as a ^ b."
|
| 142 |
+
return a ^ b
|
| 143 |
+
|
| 144 |
+
# Sequence Operations *********************************************************#
|
| 145 |
+
|
| 146 |
+
def concat(a, b):
|
| 147 |
+
"Same as a + b, for a and b sequences."
|
| 148 |
+
if not hasattr(a, '__getitem__'):
|
| 149 |
+
msg = "'%s' object can't be concatenated" % type(a).__name__
|
| 150 |
+
raise TypeError(msg)
|
| 151 |
+
return a + b
|
| 152 |
+
|
| 153 |
+
def contains(a, b):
|
| 154 |
+
"Same as b in a (note reversed operands)."
|
| 155 |
+
return b in a
|
| 156 |
+
|
| 157 |
+
def countOf(a, b):
|
| 158 |
+
"Return the number of items in a which are, or which equal, b."
|
| 159 |
+
count = 0
|
| 160 |
+
for i in a:
|
| 161 |
+
if i is b or i == b:
|
| 162 |
+
count += 1
|
| 163 |
+
return count
|
| 164 |
+
|
| 165 |
+
def delitem(a, b):
|
| 166 |
+
"Same as del a[b]."
|
| 167 |
+
del a[b]
|
| 168 |
+
|
| 169 |
+
def getitem(a, b):
|
| 170 |
+
"Same as a[b]."
|
| 171 |
+
return a[b]
|
| 172 |
+
|
| 173 |
+
def indexOf(a, b):
|
| 174 |
+
"Return the first index of b in a."
|
| 175 |
+
for i, j in enumerate(a):
|
| 176 |
+
if j is b or j == b:
|
| 177 |
+
return i
|
| 178 |
+
else:
|
| 179 |
+
raise ValueError('sequence.index(x): x not in sequence')
|
| 180 |
+
|
| 181 |
+
def setitem(a, b, c):
|
| 182 |
+
"Same as a[b] = c."
|
| 183 |
+
a[b] = c
|
| 184 |
+
|
| 185 |
+
def length_hint(obj, default=0):
|
| 186 |
+
"""
|
| 187 |
+
Return an estimate of the number of items in obj.
|
| 188 |
+
This is useful for presizing containers when building from an iterable.
|
| 189 |
+
|
| 190 |
+
If the object supports len(), the result will be exact. Otherwise, it may
|
| 191 |
+
over- or under-estimate by an arbitrary amount. The result will be an
|
| 192 |
+
integer >= 0.
|
| 193 |
+
"""
|
| 194 |
+
if not isinstance(default, int):
|
| 195 |
+
msg = ("'%s' object cannot be interpreted as an integer" %
|
| 196 |
+
type(default).__name__)
|
| 197 |
+
raise TypeError(msg)
|
| 198 |
+
|
| 199 |
+
try:
|
| 200 |
+
return len(obj)
|
| 201 |
+
except TypeError:
|
| 202 |
+
pass
|
| 203 |
+
|
| 204 |
+
try:
|
| 205 |
+
hint = type(obj).__length_hint__
|
| 206 |
+
except AttributeError:
|
| 207 |
+
return default
|
| 208 |
+
|
| 209 |
+
try:
|
| 210 |
+
val = hint(obj)
|
| 211 |
+
except TypeError:
|
| 212 |
+
return default
|
| 213 |
+
if val is NotImplemented:
|
| 214 |
+
return default
|
| 215 |
+
if not isinstance(val, int):
|
| 216 |
+
msg = ('__length_hint__ must be integer, not %s' %
|
| 217 |
+
type(val).__name__)
|
| 218 |
+
raise TypeError(msg)
|
| 219 |
+
if val < 0:
|
| 220 |
+
msg = '__length_hint__() should return >= 0'
|
| 221 |
+
raise ValueError(msg)
|
| 222 |
+
return val
|
| 223 |
+
|
| 224 |
+
# Generalized Lookup Objects **************************************************#
|
| 225 |
+
|
| 226 |
+
class attrgetter:
|
| 227 |
+
"""
|
| 228 |
+
Return a callable object that fetches the given attribute(s) from its operand.
|
| 229 |
+
After f = attrgetter('name'), the call f(r) returns r.name.
|
| 230 |
+
After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).
|
| 231 |
+
After h = attrgetter('name.first', 'name.last'), the call h(r) returns
|
| 232 |
+
(r.name.first, r.name.last).
|
| 233 |
+
"""
|
| 234 |
+
__slots__ = ('_attrs', '_call')
|
| 235 |
+
|
| 236 |
+
def __init__(self, attr, *attrs):
|
| 237 |
+
if not attrs:
|
| 238 |
+
if not isinstance(attr, str):
|
| 239 |
+
raise TypeError('attribute name must be a string')
|
| 240 |
+
self._attrs = (attr,)
|
| 241 |
+
names = attr.split('.')
|
| 242 |
+
def func(obj):
|
| 243 |
+
for name in names:
|
| 244 |
+
obj = getattr(obj, name)
|
| 245 |
+
return obj
|
| 246 |
+
self._call = func
|
| 247 |
+
else:
|
| 248 |
+
self._attrs = (attr,) + attrs
|
| 249 |
+
getters = tuple(map(attrgetter, self._attrs))
|
| 250 |
+
def func(obj):
|
| 251 |
+
return tuple(getter(obj) for getter in getters)
|
| 252 |
+
self._call = func
|
| 253 |
+
|
| 254 |
+
def __call__(self, obj):
|
| 255 |
+
return self._call(obj)
|
| 256 |
+
|
| 257 |
+
def __repr__(self):
|
| 258 |
+
return '%s.%s(%s)' % (self.__class__.__module__,
|
| 259 |
+
self.__class__.__qualname__,
|
| 260 |
+
', '.join(map(repr, self._attrs)))
|
| 261 |
+
|
| 262 |
+
def __reduce__(self):
|
| 263 |
+
return self.__class__, self._attrs
|
| 264 |
+
|
| 265 |
+
class itemgetter:
|
| 266 |
+
"""
|
| 267 |
+
Return a callable object that fetches the given item(s) from its operand.
|
| 268 |
+
After f = itemgetter(2), the call f(r) returns r[2].
|
| 269 |
+
After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])
|
| 270 |
+
"""
|
| 271 |
+
__slots__ = ('_items', '_call')
|
| 272 |
+
|
| 273 |
+
def __init__(self, item, *items):
|
| 274 |
+
if not items:
|
| 275 |
+
self._items = (item,)
|
| 276 |
+
def func(obj):
|
| 277 |
+
return obj[item]
|
| 278 |
+
self._call = func
|
| 279 |
+
else:
|
| 280 |
+
self._items = items = (item,) + items
|
| 281 |
+
def func(obj):
|
| 282 |
+
return tuple(obj[i] for i in items)
|
| 283 |
+
self._call = func
|
| 284 |
+
|
| 285 |
+
def __call__(self, obj):
|
| 286 |
+
return self._call(obj)
|
| 287 |
+
|
| 288 |
+
def __repr__(self):
|
| 289 |
+
return '%s.%s(%s)' % (self.__class__.__module__,
|
| 290 |
+
self.__class__.__name__,
|
| 291 |
+
', '.join(map(repr, self._items)))
|
| 292 |
+
|
| 293 |
+
def __reduce__(self):
|
| 294 |
+
return self.__class__, self._items
|
| 295 |
+
|
| 296 |
+
class methodcaller:
|
| 297 |
+
"""
|
| 298 |
+
Return a callable object that calls the given method on its operand.
|
| 299 |
+
After f = methodcaller('name'), the call f(r) returns r.name().
|
| 300 |
+
After g = methodcaller('name', 'date', foo=1), the call g(r) returns
|
| 301 |
+
r.name('date', foo=1).
|
| 302 |
+
"""
|
| 303 |
+
__slots__ = ('_name', '_args', '_kwargs')
|
| 304 |
+
|
| 305 |
+
def __init__(self, name, /, *args, **kwargs):
|
| 306 |
+
self._name = name
|
| 307 |
+
if not isinstance(self._name, str):
|
| 308 |
+
raise TypeError('method name must be a string')
|
| 309 |
+
self._args = args
|
| 310 |
+
self._kwargs = kwargs
|
| 311 |
+
|
| 312 |
+
def __call__(self, obj):
|
| 313 |
+
return getattr(obj, self._name)(*self._args, **self._kwargs)
|
| 314 |
+
|
| 315 |
+
def __repr__(self):
|
| 316 |
+
args = [repr(self._name)]
|
| 317 |
+
args.extend(map(repr, self._args))
|
| 318 |
+
args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items())
|
| 319 |
+
return '%s.%s(%s)' % (self.__class__.__module__,
|
| 320 |
+
self.__class__.__name__,
|
| 321 |
+
', '.join(args))
|
| 322 |
+
|
| 323 |
+
def __reduce__(self):
|
| 324 |
+
if not self._kwargs:
|
| 325 |
+
return self.__class__, (self._name,) + self._args
|
| 326 |
+
else:
|
| 327 |
+
from functools import partial
|
| 328 |
+
return partial(self.__class__, self._name, **self._kwargs), self._args
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
# In-place Operations *********************************************************#
|
| 332 |
+
|
| 333 |
+
def iadd(a, b):
|
| 334 |
+
"Same as a += b."
|
| 335 |
+
a += b
|
| 336 |
+
return a
|
| 337 |
+
|
| 338 |
+
def iand(a, b):
|
| 339 |
+
"Same as a &= b."
|
| 340 |
+
a &= b
|
| 341 |
+
return a
|
| 342 |
+
|
| 343 |
+
def iconcat(a, b):
|
| 344 |
+
"Same as a += b, for a and b sequences."
|
| 345 |
+
if not hasattr(a, '__getitem__'):
|
| 346 |
+
msg = "'%s' object can't be concatenated" % type(a).__name__
|
| 347 |
+
raise TypeError(msg)
|
| 348 |
+
a += b
|
| 349 |
+
return a
|
| 350 |
+
|
| 351 |
+
def ifloordiv(a, b):
|
| 352 |
+
"Same as a //= b."
|
| 353 |
+
a //= b
|
| 354 |
+
return a
|
| 355 |
+
|
| 356 |
+
def ilshift(a, b):
|
| 357 |
+
"Same as a <<= b."
|
| 358 |
+
a <<= b
|
| 359 |
+
return a
|
| 360 |
+
|
| 361 |
+
def imod(a, b):
|
| 362 |
+
"Same as a %= b."
|
| 363 |
+
a %= b
|
| 364 |
+
return a
|
| 365 |
+
|
| 366 |
+
def imul(a, b):
|
| 367 |
+
"Same as a *= b."
|
| 368 |
+
a *= b
|
| 369 |
+
return a
|
| 370 |
+
|
| 371 |
+
def imatmul(a, b):
|
| 372 |
+
"Same as a @= b."
|
| 373 |
+
a @= b
|
| 374 |
+
return a
|
| 375 |
+
|
| 376 |
+
def ior(a, b):
|
| 377 |
+
"Same as a |= b."
|
| 378 |
+
a |= b
|
| 379 |
+
return a
|
| 380 |
+
|
| 381 |
+
def ipow(a, b):
|
| 382 |
+
"Same as a **= b."
|
| 383 |
+
a **=b
|
| 384 |
+
return a
|
| 385 |
+
|
| 386 |
+
def irshift(a, b):
|
| 387 |
+
"Same as a >>= b."
|
| 388 |
+
a >>= b
|
| 389 |
+
return a
|
| 390 |
+
|
| 391 |
+
def isub(a, b):
|
| 392 |
+
"Same as a -= b."
|
| 393 |
+
a -= b
|
| 394 |
+
return a
|
| 395 |
+
|
| 396 |
+
def itruediv(a, b):
|
| 397 |
+
"Same as a /= b."
|
| 398 |
+
a /= b
|
| 399 |
+
return a
|
| 400 |
+
|
| 401 |
+
def ixor(a, b):
|
| 402 |
+
"Same as a ^= b."
|
| 403 |
+
a ^= b
|
| 404 |
+
return a
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
try:
|
| 408 |
+
from _operator import *
|
| 409 |
+
except ImportError:
|
| 410 |
+
pass
|
| 411 |
+
else:
|
| 412 |
+
from _operator import __doc__
|
| 413 |
+
|
| 414 |
+
# All of these "__func__ = func" assignments have to happen after importing
|
| 415 |
+
# from _operator to make sure they're set to the right function
|
| 416 |
+
__lt__ = lt
|
| 417 |
+
__le__ = le
|
| 418 |
+
__eq__ = eq
|
| 419 |
+
__ne__ = ne
|
| 420 |
+
__ge__ = ge
|
| 421 |
+
__gt__ = gt
|
| 422 |
+
__not__ = not_
|
| 423 |
+
__abs__ = abs
|
| 424 |
+
__add__ = add
|
| 425 |
+
__and__ = and_
|
| 426 |
+
__floordiv__ = floordiv
|
| 427 |
+
__index__ = index
|
| 428 |
+
__inv__ = inv
|
| 429 |
+
__invert__ = invert
|
| 430 |
+
__lshift__ = lshift
|
| 431 |
+
__mod__ = mod
|
| 432 |
+
__mul__ = mul
|
| 433 |
+
__matmul__ = matmul
|
| 434 |
+
__neg__ = neg
|
| 435 |
+
__or__ = or_
|
| 436 |
+
__pos__ = pos
|
| 437 |
+
__pow__ = pow
|
| 438 |
+
__rshift__ = rshift
|
| 439 |
+
__sub__ = sub
|
| 440 |
+
__truediv__ = truediv
|
| 441 |
+
__xor__ = xor
|
| 442 |
+
__concat__ = concat
|
| 443 |
+
__contains__ = contains
|
| 444 |
+
__delitem__ = delitem
|
| 445 |
+
__getitem__ = getitem
|
| 446 |
+
__setitem__ = setitem
|
| 447 |
+
__iadd__ = iadd
|
| 448 |
+
__iand__ = iand
|
| 449 |
+
__iconcat__ = iconcat
|
| 450 |
+
__ifloordiv__ = ifloordiv
|
| 451 |
+
__ilshift__ = ilshift
|
| 452 |
+
__imod__ = imod
|
| 453 |
+
__imul__ = imul
|
| 454 |
+
__imatmul__ = imatmul
|
| 455 |
+
__ior__ = ior
|
| 456 |
+
__ipow__ = ipow
|
| 457 |
+
__irshift__ = irshift
|
| 458 |
+
__isub__ = isub
|
| 459 |
+
__itruediv__ = itruediv
|
| 460 |
+
__ixor__ = ixor
|
llava/lib/python3.10/pipes.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Conversion pipeline templates.
|
| 2 |
+
|
| 3 |
+
The problem:
|
| 4 |
+
------------
|
| 5 |
+
|
| 6 |
+
Suppose you have some data that you want to convert to another format,
|
| 7 |
+
such as from GIF image format to PPM image format. Maybe the
|
| 8 |
+
conversion involves several steps (e.g. piping it through compress or
|
| 9 |
+
uuencode). Some of the conversion steps may require that their input
|
| 10 |
+
is a disk file, others may be able to read standard input; similar for
|
| 11 |
+
their output. The input to the entire conversion may also be read
|
| 12 |
+
from a disk file or from an open file, and similar for its output.
|
| 13 |
+
|
| 14 |
+
The module lets you construct a pipeline template by sticking one or
|
| 15 |
+
more conversion steps together. It will take care of creating and
|
| 16 |
+
removing temporary files if they are necessary to hold intermediate
|
| 17 |
+
data. You can then use the template to do conversions from many
|
| 18 |
+
different sources to many different destinations. The temporary
|
| 19 |
+
file names used are different each time the template is used.
|
| 20 |
+
|
| 21 |
+
The templates are objects so you can create templates for many
|
| 22 |
+
different conversion steps and store them in a dictionary, for
|
| 23 |
+
instance.
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
Directions:
|
| 27 |
+
-----------
|
| 28 |
+
|
| 29 |
+
To create a template:
|
| 30 |
+
t = Template()
|
| 31 |
+
|
| 32 |
+
To add a conversion step to a template:
|
| 33 |
+
t.append(command, kind)
|
| 34 |
+
where kind is a string of two characters: the first is '-' if the
|
| 35 |
+
command reads its standard input or 'f' if it requires a file; the
|
| 36 |
+
second likewise for the output. The command must be valid /bin/sh
|
| 37 |
+
syntax. If input or output files are required, they are passed as
|
| 38 |
+
$IN and $OUT; otherwise, it must be possible to use the command in
|
| 39 |
+
a pipeline.
|
| 40 |
+
|
| 41 |
+
To add a conversion step at the beginning:
|
| 42 |
+
t.prepend(command, kind)
|
| 43 |
+
|
| 44 |
+
To convert a file to another file using a template:
|
| 45 |
+
sts = t.copy(infile, outfile)
|
| 46 |
+
If infile or outfile are the empty string, standard input is read or
|
| 47 |
+
standard output is written, respectively. The return value is the
|
| 48 |
+
exit status of the conversion pipeline.
|
| 49 |
+
|
| 50 |
+
To open a file for reading or writing through a conversion pipeline:
|
| 51 |
+
fp = t.open(file, mode)
|
| 52 |
+
where mode is 'r' to read the file, or 'w' to write it -- just like
|
| 53 |
+
for the built-in function open() or for os.popen().
|
| 54 |
+
|
| 55 |
+
To create a new template object initialized to a given one:
|
| 56 |
+
t2 = t.clone()
|
| 57 |
+
""" # '
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
import re
|
| 61 |
+
import os
|
| 62 |
+
import tempfile
|
| 63 |
+
# we import the quote function rather than the module for backward compat
|
| 64 |
+
# (quote used to be an undocumented but used function in pipes)
|
| 65 |
+
from shlex import quote
|
| 66 |
+
|
| 67 |
+
__all__ = ["Template"]
|
| 68 |
+
|
| 69 |
+
# Conversion step kinds
|
| 70 |
+
|
| 71 |
+
FILEIN_FILEOUT = 'ff' # Must read & write real files
|
| 72 |
+
STDIN_FILEOUT = '-f' # Must write a real file
|
| 73 |
+
FILEIN_STDOUT = 'f-' # Must read a real file
|
| 74 |
+
STDIN_STDOUT = '--' # Normal pipeline element
|
| 75 |
+
SOURCE = '.-' # Must be first, writes stdout
|
| 76 |
+
SINK = '-.' # Must be last, reads stdin
|
| 77 |
+
|
| 78 |
+
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
|
| 79 |
+
SOURCE, SINK]
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class Template:
|
| 83 |
+
"""Class representing a pipeline template."""
|
| 84 |
+
|
| 85 |
+
def __init__(self):
|
| 86 |
+
"""Template() returns a fresh pipeline template."""
|
| 87 |
+
self.debugging = 0
|
| 88 |
+
self.reset()
|
| 89 |
+
|
| 90 |
+
def __repr__(self):
|
| 91 |
+
"""t.__repr__() implements repr(t)."""
|
| 92 |
+
return '<Template instance, steps=%r>' % (self.steps,)
|
| 93 |
+
|
| 94 |
+
def reset(self):
|
| 95 |
+
"""t.reset() restores a pipeline template to its initial state."""
|
| 96 |
+
self.steps = []
|
| 97 |
+
|
| 98 |
+
def clone(self):
|
| 99 |
+
"""t.clone() returns a new pipeline template with identical
|
| 100 |
+
initial state as the current one."""
|
| 101 |
+
t = Template()
|
| 102 |
+
t.steps = self.steps[:]
|
| 103 |
+
t.debugging = self.debugging
|
| 104 |
+
return t
|
| 105 |
+
|
| 106 |
+
def debug(self, flag):
|
| 107 |
+
"""t.debug(flag) turns debugging on or off."""
|
| 108 |
+
self.debugging = flag
|
| 109 |
+
|
| 110 |
+
def append(self, cmd, kind):
|
| 111 |
+
"""t.append(cmd, kind) adds a new step at the end."""
|
| 112 |
+
if not isinstance(cmd, str):
|
| 113 |
+
raise TypeError('Template.append: cmd must be a string')
|
| 114 |
+
if kind not in stepkinds:
|
| 115 |
+
raise ValueError('Template.append: bad kind %r' % (kind,))
|
| 116 |
+
if kind == SOURCE:
|
| 117 |
+
raise ValueError('Template.append: SOURCE can only be prepended')
|
| 118 |
+
if self.steps and self.steps[-1][1] == SINK:
|
| 119 |
+
raise ValueError('Template.append: already ends with SINK')
|
| 120 |
+
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
|
| 121 |
+
raise ValueError('Template.append: missing $IN in cmd')
|
| 122 |
+
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
|
| 123 |
+
raise ValueError('Template.append: missing $OUT in cmd')
|
| 124 |
+
self.steps.append((cmd, kind))
|
| 125 |
+
|
| 126 |
+
def prepend(self, cmd, kind):
|
| 127 |
+
"""t.prepend(cmd, kind) adds a new step at the front."""
|
| 128 |
+
if not isinstance(cmd, str):
|
| 129 |
+
raise TypeError('Template.prepend: cmd must be a string')
|
| 130 |
+
if kind not in stepkinds:
|
| 131 |
+
raise ValueError('Template.prepend: bad kind %r' % (kind,))
|
| 132 |
+
if kind == SINK:
|
| 133 |
+
raise ValueError('Template.prepend: SINK can only be appended')
|
| 134 |
+
if self.steps and self.steps[0][1] == SOURCE:
|
| 135 |
+
raise ValueError('Template.prepend: already begins with SOURCE')
|
| 136 |
+
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
|
| 137 |
+
raise ValueError('Template.prepend: missing $IN in cmd')
|
| 138 |
+
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
|
| 139 |
+
raise ValueError('Template.prepend: missing $OUT in cmd')
|
| 140 |
+
self.steps.insert(0, (cmd, kind))
|
| 141 |
+
|
| 142 |
+
def open(self, file, rw):
|
| 143 |
+
"""t.open(file, rw) returns a pipe or file object open for
|
| 144 |
+
reading or writing; the file is the other end of the pipeline."""
|
| 145 |
+
if rw == 'r':
|
| 146 |
+
return self.open_r(file)
|
| 147 |
+
if rw == 'w':
|
| 148 |
+
return self.open_w(file)
|
| 149 |
+
raise ValueError('Template.open: rw must be \'r\' or \'w\', not %r'
|
| 150 |
+
% (rw,))
|
| 151 |
+
|
| 152 |
+
def open_r(self, file):
|
| 153 |
+
"""t.open_r(file) and t.open_w(file) implement
|
| 154 |
+
t.open(file, 'r') and t.open(file, 'w') respectively."""
|
| 155 |
+
if not self.steps:
|
| 156 |
+
return open(file, 'r')
|
| 157 |
+
if self.steps[-1][1] == SINK:
|
| 158 |
+
raise ValueError('Template.open_r: pipeline ends width SINK')
|
| 159 |
+
cmd = self.makepipeline(file, '')
|
| 160 |
+
return os.popen(cmd, 'r')
|
| 161 |
+
|
| 162 |
+
def open_w(self, file):
|
| 163 |
+
if not self.steps:
|
| 164 |
+
return open(file, 'w')
|
| 165 |
+
if self.steps[0][1] == SOURCE:
|
| 166 |
+
raise ValueError('Template.open_w: pipeline begins with SOURCE')
|
| 167 |
+
cmd = self.makepipeline('', file)
|
| 168 |
+
return os.popen(cmd, 'w')
|
| 169 |
+
|
| 170 |
+
def copy(self, infile, outfile):
|
| 171 |
+
return os.system(self.makepipeline(infile, outfile))
|
| 172 |
+
|
| 173 |
+
def makepipeline(self, infile, outfile):
|
| 174 |
+
cmd = makepipeline(infile, self.steps, outfile)
|
| 175 |
+
if self.debugging:
|
| 176 |
+
print(cmd)
|
| 177 |
+
cmd = 'set -x; ' + cmd
|
| 178 |
+
return cmd
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def makepipeline(infile, steps, outfile):
|
| 182 |
+
# Build a list with for each command:
|
| 183 |
+
# [input filename or '', command string, kind, output filename or '']
|
| 184 |
+
|
| 185 |
+
list = []
|
| 186 |
+
for cmd, kind in steps:
|
| 187 |
+
list.append(['', cmd, kind, ''])
|
| 188 |
+
#
|
| 189 |
+
# Make sure there is at least one step
|
| 190 |
+
#
|
| 191 |
+
if not list:
|
| 192 |
+
list.append(['', 'cat', '--', ''])
|
| 193 |
+
#
|
| 194 |
+
# Take care of the input and output ends
|
| 195 |
+
#
|
| 196 |
+
[cmd, kind] = list[0][1:3]
|
| 197 |
+
if kind[0] == 'f' and not infile:
|
| 198 |
+
list.insert(0, ['', 'cat', '--', ''])
|
| 199 |
+
list[0][0] = infile
|
| 200 |
+
#
|
| 201 |
+
[cmd, kind] = list[-1][1:3]
|
| 202 |
+
if kind[1] == 'f' and not outfile:
|
| 203 |
+
list.append(['', 'cat', '--', ''])
|
| 204 |
+
list[-1][-1] = outfile
|
| 205 |
+
#
|
| 206 |
+
# Invent temporary files to connect stages that need files
|
| 207 |
+
#
|
| 208 |
+
garbage = []
|
| 209 |
+
for i in range(1, len(list)):
|
| 210 |
+
lkind = list[i-1][2]
|
| 211 |
+
rkind = list[i][2]
|
| 212 |
+
if lkind[1] == 'f' or rkind[0] == 'f':
|
| 213 |
+
(fd, temp) = tempfile.mkstemp()
|
| 214 |
+
os.close(fd)
|
| 215 |
+
garbage.append(temp)
|
| 216 |
+
list[i-1][-1] = list[i][0] = temp
|
| 217 |
+
#
|
| 218 |
+
for item in list:
|
| 219 |
+
[inf, cmd, kind, outf] = item
|
| 220 |
+
if kind[1] == 'f':
|
| 221 |
+
cmd = 'OUT=' + quote(outf) + '; ' + cmd
|
| 222 |
+
if kind[0] == 'f':
|
| 223 |
+
cmd = 'IN=' + quote(inf) + '; ' + cmd
|
| 224 |
+
if kind[0] == '-' and inf:
|
| 225 |
+
cmd = cmd + ' <' + quote(inf)
|
| 226 |
+
if kind[1] == '-' and outf:
|
| 227 |
+
cmd = cmd + ' >' + quote(outf)
|
| 228 |
+
item[1] = cmd
|
| 229 |
+
#
|
| 230 |
+
cmdlist = list[0][1]
|
| 231 |
+
for item in list[1:]:
|
| 232 |
+
[cmd, kind] = item[1:3]
|
| 233 |
+
if item[0] == '':
|
| 234 |
+
if 'f' in kind:
|
| 235 |
+
cmd = '{ ' + cmd + '; }'
|
| 236 |
+
cmdlist = cmdlist + ' |\n' + cmd
|
| 237 |
+
else:
|
| 238 |
+
cmdlist = cmdlist + '\n' + cmd
|
| 239 |
+
#
|
| 240 |
+
if garbage:
|
| 241 |
+
rmcmd = 'rm -f'
|
| 242 |
+
for file in garbage:
|
| 243 |
+
rmcmd = rmcmd + ' ' + quote(file)
|
| 244 |
+
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
|
| 245 |
+
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
|
| 246 |
+
#
|
| 247 |
+
return cmdlist
|
llava/lib/python3.10/pkgutil.py
ADDED
|
@@ -0,0 +1,715 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utilities to support packages."""
|
| 2 |
+
|
| 3 |
+
from collections import namedtuple
|
| 4 |
+
from functools import singledispatch as simplegeneric
|
| 5 |
+
import importlib
|
| 6 |
+
import importlib.util
|
| 7 |
+
import importlib.machinery
|
| 8 |
+
import os
|
| 9 |
+
import os.path
|
| 10 |
+
import sys
|
| 11 |
+
from types import ModuleType
|
| 12 |
+
import warnings
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
|
| 16 |
+
'walk_packages', 'iter_modules', 'get_data',
|
| 17 |
+
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
|
| 18 |
+
'ModuleInfo',
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
ModuleInfo = namedtuple('ModuleInfo', 'module_finder name ispkg')
|
| 23 |
+
ModuleInfo.__doc__ = 'A namedtuple with minimal info about a module.'
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _get_spec(finder, name):
|
| 27 |
+
"""Return the finder-specific module spec."""
|
| 28 |
+
# Works with legacy finders.
|
| 29 |
+
try:
|
| 30 |
+
find_spec = finder.find_spec
|
| 31 |
+
except AttributeError:
|
| 32 |
+
loader = finder.find_module(name)
|
| 33 |
+
if loader is None:
|
| 34 |
+
return None
|
| 35 |
+
return importlib.util.spec_from_loader(name, loader)
|
| 36 |
+
else:
|
| 37 |
+
return find_spec(name)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def read_code(stream):
|
| 41 |
+
# This helper is needed in order for the PEP 302 emulation to
|
| 42 |
+
# correctly handle compiled files
|
| 43 |
+
import marshal
|
| 44 |
+
|
| 45 |
+
magic = stream.read(4)
|
| 46 |
+
if magic != importlib.util.MAGIC_NUMBER:
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
stream.read(12) # Skip rest of the header
|
| 50 |
+
return marshal.load(stream)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def walk_packages(path=None, prefix='', onerror=None):
|
| 54 |
+
"""Yields ModuleInfo for all modules recursively
|
| 55 |
+
on path, or, if path is None, all accessible modules.
|
| 56 |
+
|
| 57 |
+
'path' should be either None or a list of paths to look for
|
| 58 |
+
modules in.
|
| 59 |
+
|
| 60 |
+
'prefix' is a string to output on the front of every module name
|
| 61 |
+
on output.
|
| 62 |
+
|
| 63 |
+
Note that this function must import all *packages* (NOT all
|
| 64 |
+
modules!) on the given path, in order to access the __path__
|
| 65 |
+
attribute to find submodules.
|
| 66 |
+
|
| 67 |
+
'onerror' is a function which gets called with one argument (the
|
| 68 |
+
name of the package which was being imported) if any exception
|
| 69 |
+
occurs while trying to import a package. If no onerror function is
|
| 70 |
+
supplied, ImportErrors are caught and ignored, while all other
|
| 71 |
+
exceptions are propagated, terminating the search.
|
| 72 |
+
|
| 73 |
+
Examples:
|
| 74 |
+
|
| 75 |
+
# list all modules python can access
|
| 76 |
+
walk_packages()
|
| 77 |
+
|
| 78 |
+
# list all submodules of ctypes
|
| 79 |
+
walk_packages(ctypes.__path__, ctypes.__name__+'.')
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
def seen(p, m={}):
|
| 83 |
+
if p in m:
|
| 84 |
+
return True
|
| 85 |
+
m[p] = True
|
| 86 |
+
|
| 87 |
+
for info in iter_modules(path, prefix):
|
| 88 |
+
yield info
|
| 89 |
+
|
| 90 |
+
if info.ispkg:
|
| 91 |
+
try:
|
| 92 |
+
__import__(info.name)
|
| 93 |
+
except ImportError:
|
| 94 |
+
if onerror is not None:
|
| 95 |
+
onerror(info.name)
|
| 96 |
+
except Exception:
|
| 97 |
+
if onerror is not None:
|
| 98 |
+
onerror(info.name)
|
| 99 |
+
else:
|
| 100 |
+
raise
|
| 101 |
+
else:
|
| 102 |
+
path = getattr(sys.modules[info.name], '__path__', None) or []
|
| 103 |
+
|
| 104 |
+
# don't traverse path items we've seen before
|
| 105 |
+
path = [p for p in path if not seen(p)]
|
| 106 |
+
|
| 107 |
+
yield from walk_packages(path, info.name+'.', onerror)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def iter_modules(path=None, prefix=''):
|
| 111 |
+
"""Yields ModuleInfo for all submodules on path,
|
| 112 |
+
or, if path is None, all top-level modules on sys.path.
|
| 113 |
+
|
| 114 |
+
'path' should be either None or a list of paths to look for
|
| 115 |
+
modules in.
|
| 116 |
+
|
| 117 |
+
'prefix' is a string to output on the front of every module name
|
| 118 |
+
on output.
|
| 119 |
+
"""
|
| 120 |
+
if path is None:
|
| 121 |
+
importers = iter_importers()
|
| 122 |
+
elif isinstance(path, str):
|
| 123 |
+
raise ValueError("path must be None or list of paths to look for "
|
| 124 |
+
"modules in")
|
| 125 |
+
else:
|
| 126 |
+
importers = map(get_importer, path)
|
| 127 |
+
|
| 128 |
+
yielded = {}
|
| 129 |
+
for i in importers:
|
| 130 |
+
for name, ispkg in iter_importer_modules(i, prefix):
|
| 131 |
+
if name not in yielded:
|
| 132 |
+
yielded[name] = 1
|
| 133 |
+
yield ModuleInfo(i, name, ispkg)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@simplegeneric
|
| 137 |
+
def iter_importer_modules(importer, prefix=''):
|
| 138 |
+
if not hasattr(importer, 'iter_modules'):
|
| 139 |
+
return []
|
| 140 |
+
return importer.iter_modules(prefix)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
# Implement a file walker for the normal importlib path hook
|
| 144 |
+
def _iter_file_finder_modules(importer, prefix=''):
|
| 145 |
+
if importer.path is None or not os.path.isdir(importer.path):
|
| 146 |
+
return
|
| 147 |
+
|
| 148 |
+
yielded = {}
|
| 149 |
+
import inspect
|
| 150 |
+
try:
|
| 151 |
+
filenames = os.listdir(importer.path)
|
| 152 |
+
except OSError:
|
| 153 |
+
# ignore unreadable directories like import does
|
| 154 |
+
filenames = []
|
| 155 |
+
filenames.sort() # handle packages before same-named modules
|
| 156 |
+
|
| 157 |
+
for fn in filenames:
|
| 158 |
+
modname = inspect.getmodulename(fn)
|
| 159 |
+
if modname=='__init__' or modname in yielded:
|
| 160 |
+
continue
|
| 161 |
+
|
| 162 |
+
path = os.path.join(importer.path, fn)
|
| 163 |
+
ispkg = False
|
| 164 |
+
|
| 165 |
+
if not modname and os.path.isdir(path) and '.' not in fn:
|
| 166 |
+
modname = fn
|
| 167 |
+
try:
|
| 168 |
+
dircontents = os.listdir(path)
|
| 169 |
+
except OSError:
|
| 170 |
+
# ignore unreadable directories like import does
|
| 171 |
+
dircontents = []
|
| 172 |
+
for fn in dircontents:
|
| 173 |
+
subname = inspect.getmodulename(fn)
|
| 174 |
+
if subname=='__init__':
|
| 175 |
+
ispkg = True
|
| 176 |
+
break
|
| 177 |
+
else:
|
| 178 |
+
continue # not a package
|
| 179 |
+
|
| 180 |
+
if modname and '.' not in modname:
|
| 181 |
+
yielded[modname] = 1
|
| 182 |
+
yield prefix + modname, ispkg
|
| 183 |
+
|
| 184 |
+
iter_importer_modules.register(
|
| 185 |
+
importlib.machinery.FileFinder, _iter_file_finder_modules)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _import_imp():
|
| 189 |
+
global imp
|
| 190 |
+
with warnings.catch_warnings():
|
| 191 |
+
warnings.simplefilter('ignore', DeprecationWarning)
|
| 192 |
+
imp = importlib.import_module('imp')
|
| 193 |
+
|
| 194 |
+
class ImpImporter:
|
| 195 |
+
"""PEP 302 Finder that wraps Python's "classic" import algorithm
|
| 196 |
+
|
| 197 |
+
ImpImporter(dirname) produces a PEP 302 finder that searches that
|
| 198 |
+
directory. ImpImporter(None) produces a PEP 302 finder that searches
|
| 199 |
+
the current sys.path, plus any modules that are frozen or built-in.
|
| 200 |
+
|
| 201 |
+
Note that ImpImporter does not currently support being used by placement
|
| 202 |
+
on sys.meta_path.
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
def __init__(self, path=None):
|
| 206 |
+
global imp
|
| 207 |
+
warnings.warn("This emulation is deprecated and slated for removal "
|
| 208 |
+
"in Python 3.12; use 'importlib' instead",
|
| 209 |
+
DeprecationWarning)
|
| 210 |
+
_import_imp()
|
| 211 |
+
self.path = path
|
| 212 |
+
|
| 213 |
+
def find_module(self, fullname, path=None):
|
| 214 |
+
# Note: we ignore 'path' argument since it is only used via meta_path
|
| 215 |
+
subname = fullname.split(".")[-1]
|
| 216 |
+
if subname != fullname and self.path is None:
|
| 217 |
+
return None
|
| 218 |
+
if self.path is None:
|
| 219 |
+
path = None
|
| 220 |
+
else:
|
| 221 |
+
path = [os.path.realpath(self.path)]
|
| 222 |
+
try:
|
| 223 |
+
file, filename, etc = imp.find_module(subname, path)
|
| 224 |
+
except ImportError:
|
| 225 |
+
return None
|
| 226 |
+
return ImpLoader(fullname, file, filename, etc)
|
| 227 |
+
|
| 228 |
+
def iter_modules(self, prefix=''):
|
| 229 |
+
if self.path is None or not os.path.isdir(self.path):
|
| 230 |
+
return
|
| 231 |
+
|
| 232 |
+
yielded = {}
|
| 233 |
+
import inspect
|
| 234 |
+
try:
|
| 235 |
+
filenames = os.listdir(self.path)
|
| 236 |
+
except OSError:
|
| 237 |
+
# ignore unreadable directories like import does
|
| 238 |
+
filenames = []
|
| 239 |
+
filenames.sort() # handle packages before same-named modules
|
| 240 |
+
|
| 241 |
+
for fn in filenames:
|
| 242 |
+
modname = inspect.getmodulename(fn)
|
| 243 |
+
if modname=='__init__' or modname in yielded:
|
| 244 |
+
continue
|
| 245 |
+
|
| 246 |
+
path = os.path.join(self.path, fn)
|
| 247 |
+
ispkg = False
|
| 248 |
+
|
| 249 |
+
if not modname and os.path.isdir(path) and '.' not in fn:
|
| 250 |
+
modname = fn
|
| 251 |
+
try:
|
| 252 |
+
dircontents = os.listdir(path)
|
| 253 |
+
except OSError:
|
| 254 |
+
# ignore unreadable directories like import does
|
| 255 |
+
dircontents = []
|
| 256 |
+
for fn in dircontents:
|
| 257 |
+
subname = inspect.getmodulename(fn)
|
| 258 |
+
if subname=='__init__':
|
| 259 |
+
ispkg = True
|
| 260 |
+
break
|
| 261 |
+
else:
|
| 262 |
+
continue # not a package
|
| 263 |
+
|
| 264 |
+
if modname and '.' not in modname:
|
| 265 |
+
yielded[modname] = 1
|
| 266 |
+
yield prefix + modname, ispkg
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class ImpLoader:
|
| 270 |
+
"""PEP 302 Loader that wraps Python's "classic" import algorithm
|
| 271 |
+
"""
|
| 272 |
+
code = source = None
|
| 273 |
+
|
| 274 |
+
def __init__(self, fullname, file, filename, etc):
|
| 275 |
+
warnings.warn("This emulation is deprecated and slated for removal in "
|
| 276 |
+
"Python 3.12; use 'importlib' instead",
|
| 277 |
+
DeprecationWarning)
|
| 278 |
+
_import_imp()
|
| 279 |
+
self.file = file
|
| 280 |
+
self.filename = filename
|
| 281 |
+
self.fullname = fullname
|
| 282 |
+
self.etc = etc
|
| 283 |
+
|
| 284 |
+
def load_module(self, fullname):
|
| 285 |
+
self._reopen()
|
| 286 |
+
try:
|
| 287 |
+
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
|
| 288 |
+
finally:
|
| 289 |
+
if self.file:
|
| 290 |
+
self.file.close()
|
| 291 |
+
# Note: we don't set __loader__ because we want the module to look
|
| 292 |
+
# normal; i.e. this is just a wrapper for standard import machinery
|
| 293 |
+
return mod
|
| 294 |
+
|
| 295 |
+
def get_data(self, pathname):
|
| 296 |
+
with open(pathname, "rb") as file:
|
| 297 |
+
return file.read()
|
| 298 |
+
|
| 299 |
+
def _reopen(self):
|
| 300 |
+
if self.file and self.file.closed:
|
| 301 |
+
mod_type = self.etc[2]
|
| 302 |
+
if mod_type==imp.PY_SOURCE:
|
| 303 |
+
self.file = open(self.filename, 'r')
|
| 304 |
+
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
|
| 305 |
+
self.file = open(self.filename, 'rb')
|
| 306 |
+
|
| 307 |
+
def _fix_name(self, fullname):
|
| 308 |
+
if fullname is None:
|
| 309 |
+
fullname = self.fullname
|
| 310 |
+
elif fullname != self.fullname:
|
| 311 |
+
raise ImportError("Loader for module %s cannot handle "
|
| 312 |
+
"module %s" % (self.fullname, fullname))
|
| 313 |
+
return fullname
|
| 314 |
+
|
| 315 |
+
def is_package(self, fullname):
|
| 316 |
+
fullname = self._fix_name(fullname)
|
| 317 |
+
return self.etc[2]==imp.PKG_DIRECTORY
|
| 318 |
+
|
| 319 |
+
def get_code(self, fullname=None):
|
| 320 |
+
fullname = self._fix_name(fullname)
|
| 321 |
+
if self.code is None:
|
| 322 |
+
mod_type = self.etc[2]
|
| 323 |
+
if mod_type==imp.PY_SOURCE:
|
| 324 |
+
source = self.get_source(fullname)
|
| 325 |
+
self.code = compile(source, self.filename, 'exec')
|
| 326 |
+
elif mod_type==imp.PY_COMPILED:
|
| 327 |
+
self._reopen()
|
| 328 |
+
try:
|
| 329 |
+
self.code = read_code(self.file)
|
| 330 |
+
finally:
|
| 331 |
+
self.file.close()
|
| 332 |
+
elif mod_type==imp.PKG_DIRECTORY:
|
| 333 |
+
self.code = self._get_delegate().get_code()
|
| 334 |
+
return self.code
|
| 335 |
+
|
| 336 |
+
def get_source(self, fullname=None):
|
| 337 |
+
fullname = self._fix_name(fullname)
|
| 338 |
+
if self.source is None:
|
| 339 |
+
mod_type = self.etc[2]
|
| 340 |
+
if mod_type==imp.PY_SOURCE:
|
| 341 |
+
self._reopen()
|
| 342 |
+
try:
|
| 343 |
+
self.source = self.file.read()
|
| 344 |
+
finally:
|
| 345 |
+
self.file.close()
|
| 346 |
+
elif mod_type==imp.PY_COMPILED:
|
| 347 |
+
if os.path.exists(self.filename[:-1]):
|
| 348 |
+
with open(self.filename[:-1], 'r') as f:
|
| 349 |
+
self.source = f.read()
|
| 350 |
+
elif mod_type==imp.PKG_DIRECTORY:
|
| 351 |
+
self.source = self._get_delegate().get_source()
|
| 352 |
+
return self.source
|
| 353 |
+
|
| 354 |
+
def _get_delegate(self):
|
| 355 |
+
finder = ImpImporter(self.filename)
|
| 356 |
+
spec = _get_spec(finder, '__init__')
|
| 357 |
+
return spec.loader
|
| 358 |
+
|
| 359 |
+
def get_filename(self, fullname=None):
|
| 360 |
+
fullname = self._fix_name(fullname)
|
| 361 |
+
mod_type = self.etc[2]
|
| 362 |
+
if mod_type==imp.PKG_DIRECTORY:
|
| 363 |
+
return self._get_delegate().get_filename()
|
| 364 |
+
elif mod_type in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
|
| 365 |
+
return self.filename
|
| 366 |
+
return None
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
try:
|
| 370 |
+
import zipimport
|
| 371 |
+
from zipimport import zipimporter
|
| 372 |
+
|
| 373 |
+
def iter_zipimport_modules(importer, prefix=''):
|
| 374 |
+
dirlist = sorted(zipimport._zip_directory_cache[importer.archive])
|
| 375 |
+
_prefix = importer.prefix
|
| 376 |
+
plen = len(_prefix)
|
| 377 |
+
yielded = {}
|
| 378 |
+
import inspect
|
| 379 |
+
for fn in dirlist:
|
| 380 |
+
if not fn.startswith(_prefix):
|
| 381 |
+
continue
|
| 382 |
+
|
| 383 |
+
fn = fn[plen:].split(os.sep)
|
| 384 |
+
|
| 385 |
+
if len(fn)==2 and fn[1].startswith('__init__.py'):
|
| 386 |
+
if fn[0] not in yielded:
|
| 387 |
+
yielded[fn[0]] = 1
|
| 388 |
+
yield prefix + fn[0], True
|
| 389 |
+
|
| 390 |
+
if len(fn)!=1:
|
| 391 |
+
continue
|
| 392 |
+
|
| 393 |
+
modname = inspect.getmodulename(fn[0])
|
| 394 |
+
if modname=='__init__':
|
| 395 |
+
continue
|
| 396 |
+
|
| 397 |
+
if modname and '.' not in modname and modname not in yielded:
|
| 398 |
+
yielded[modname] = 1
|
| 399 |
+
yield prefix + modname, False
|
| 400 |
+
|
| 401 |
+
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
|
| 402 |
+
|
| 403 |
+
except ImportError:
|
| 404 |
+
pass
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def get_importer(path_item):
|
| 408 |
+
"""Retrieve a finder for the given path item
|
| 409 |
+
|
| 410 |
+
The returned finder is cached in sys.path_importer_cache
|
| 411 |
+
if it was newly created by a path hook.
|
| 412 |
+
|
| 413 |
+
The cache (or part of it) can be cleared manually if a
|
| 414 |
+
rescan of sys.path_hooks is necessary.
|
| 415 |
+
"""
|
| 416 |
+
try:
|
| 417 |
+
importer = sys.path_importer_cache[path_item]
|
| 418 |
+
except KeyError:
|
| 419 |
+
for path_hook in sys.path_hooks:
|
| 420 |
+
try:
|
| 421 |
+
importer = path_hook(path_item)
|
| 422 |
+
sys.path_importer_cache.setdefault(path_item, importer)
|
| 423 |
+
break
|
| 424 |
+
except ImportError:
|
| 425 |
+
pass
|
| 426 |
+
else:
|
| 427 |
+
importer = None
|
| 428 |
+
return importer
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
def iter_importers(fullname=""):
|
| 432 |
+
"""Yield finders for the given module name
|
| 433 |
+
|
| 434 |
+
If fullname contains a '.', the finders will be for the package
|
| 435 |
+
containing fullname, otherwise they will be all registered top level
|
| 436 |
+
finders (i.e. those on both sys.meta_path and sys.path_hooks).
|
| 437 |
+
|
| 438 |
+
If the named module is in a package, that package is imported as a side
|
| 439 |
+
effect of invoking this function.
|
| 440 |
+
|
| 441 |
+
If no module name is specified, all top level finders are produced.
|
| 442 |
+
"""
|
| 443 |
+
if fullname.startswith('.'):
|
| 444 |
+
msg = "Relative module name {!r} not supported".format(fullname)
|
| 445 |
+
raise ImportError(msg)
|
| 446 |
+
if '.' in fullname:
|
| 447 |
+
# Get the containing package's __path__
|
| 448 |
+
pkg_name = fullname.rpartition(".")[0]
|
| 449 |
+
pkg = importlib.import_module(pkg_name)
|
| 450 |
+
path = getattr(pkg, '__path__', None)
|
| 451 |
+
if path is None:
|
| 452 |
+
return
|
| 453 |
+
else:
|
| 454 |
+
yield from sys.meta_path
|
| 455 |
+
path = sys.path
|
| 456 |
+
for item in path:
|
| 457 |
+
yield get_importer(item)
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
def get_loader(module_or_name):
|
| 461 |
+
"""Get a "loader" object for module_or_name
|
| 462 |
+
|
| 463 |
+
Returns None if the module cannot be found or imported.
|
| 464 |
+
If the named module is not already imported, its containing package
|
| 465 |
+
(if any) is imported, in order to establish the package __path__.
|
| 466 |
+
"""
|
| 467 |
+
if module_or_name in sys.modules:
|
| 468 |
+
module_or_name = sys.modules[module_or_name]
|
| 469 |
+
if module_or_name is None:
|
| 470 |
+
return None
|
| 471 |
+
if isinstance(module_or_name, ModuleType):
|
| 472 |
+
module = module_or_name
|
| 473 |
+
loader = getattr(module, '__loader__', None)
|
| 474 |
+
if loader is not None:
|
| 475 |
+
return loader
|
| 476 |
+
if getattr(module, '__spec__', None) is None:
|
| 477 |
+
return None
|
| 478 |
+
fullname = module.__name__
|
| 479 |
+
else:
|
| 480 |
+
fullname = module_or_name
|
| 481 |
+
return find_loader(fullname)
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
def find_loader(fullname):
|
| 485 |
+
"""Find a "loader" object for fullname
|
| 486 |
+
|
| 487 |
+
This is a backwards compatibility wrapper around
|
| 488 |
+
importlib.util.find_spec that converts most failures to ImportError
|
| 489 |
+
and only returns the loader rather than the full spec
|
| 490 |
+
"""
|
| 491 |
+
if fullname.startswith('.'):
|
| 492 |
+
msg = "Relative module name {!r} not supported".format(fullname)
|
| 493 |
+
raise ImportError(msg)
|
| 494 |
+
try:
|
| 495 |
+
spec = importlib.util.find_spec(fullname)
|
| 496 |
+
except (ImportError, AttributeError, TypeError, ValueError) as ex:
|
| 497 |
+
# This hack fixes an impedance mismatch between pkgutil and
|
| 498 |
+
# importlib, where the latter raises other errors for cases where
|
| 499 |
+
# pkgutil previously raised ImportError
|
| 500 |
+
msg = "Error while finding loader for {!r} ({}: {})"
|
| 501 |
+
raise ImportError(msg.format(fullname, type(ex), ex)) from ex
|
| 502 |
+
return spec.loader if spec is not None else None
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def extend_path(path, name):
|
| 506 |
+
"""Extend a package's path.
|
| 507 |
+
|
| 508 |
+
Intended use is to place the following code in a package's __init__.py:
|
| 509 |
+
|
| 510 |
+
from pkgutil import extend_path
|
| 511 |
+
__path__ = extend_path(__path__, __name__)
|
| 512 |
+
|
| 513 |
+
This will add to the package's __path__ all subdirectories of
|
| 514 |
+
directories on sys.path named after the package. This is useful
|
| 515 |
+
if one wants to distribute different parts of a single logical
|
| 516 |
+
package as multiple directories.
|
| 517 |
+
|
| 518 |
+
It also looks for *.pkg files beginning where * matches the name
|
| 519 |
+
argument. This feature is similar to *.pth files (see site.py),
|
| 520 |
+
except that it doesn't special-case lines starting with 'import'.
|
| 521 |
+
A *.pkg file is trusted at face value: apart from checking for
|
| 522 |
+
duplicates, all entries found in a *.pkg file are added to the
|
| 523 |
+
path, regardless of whether they are exist the filesystem. (This
|
| 524 |
+
is a feature.)
|
| 525 |
+
|
| 526 |
+
If the input path is not a list (as is the case for frozen
|
| 527 |
+
packages) it is returned unchanged. The input path is not
|
| 528 |
+
modified; an extended copy is returned. Items are only appended
|
| 529 |
+
to the copy at the end.
|
| 530 |
+
|
| 531 |
+
It is assumed that sys.path is a sequence. Items of sys.path that
|
| 532 |
+
are not (unicode or 8-bit) strings referring to existing
|
| 533 |
+
directories are ignored. Unicode items of sys.path that cause
|
| 534 |
+
errors when used as filenames may cause this function to raise an
|
| 535 |
+
exception (in line with os.path.isdir() behavior).
|
| 536 |
+
"""
|
| 537 |
+
|
| 538 |
+
if not isinstance(path, list):
|
| 539 |
+
# This could happen e.g. when this is called from inside a
|
| 540 |
+
# frozen package. Return the path unchanged in that case.
|
| 541 |
+
return path
|
| 542 |
+
|
| 543 |
+
sname_pkg = name + ".pkg"
|
| 544 |
+
|
| 545 |
+
path = path[:] # Start with a copy of the existing path
|
| 546 |
+
|
| 547 |
+
parent_package, _, final_name = name.rpartition('.')
|
| 548 |
+
if parent_package:
|
| 549 |
+
try:
|
| 550 |
+
search_path = sys.modules[parent_package].__path__
|
| 551 |
+
except (KeyError, AttributeError):
|
| 552 |
+
# We can't do anything: find_loader() returns None when
|
| 553 |
+
# passed a dotted name.
|
| 554 |
+
return path
|
| 555 |
+
else:
|
| 556 |
+
search_path = sys.path
|
| 557 |
+
|
| 558 |
+
for dir in search_path:
|
| 559 |
+
if not isinstance(dir, str):
|
| 560 |
+
continue
|
| 561 |
+
|
| 562 |
+
finder = get_importer(dir)
|
| 563 |
+
if finder is not None:
|
| 564 |
+
portions = []
|
| 565 |
+
if hasattr(finder, 'find_spec'):
|
| 566 |
+
spec = finder.find_spec(final_name)
|
| 567 |
+
if spec is not None:
|
| 568 |
+
portions = spec.submodule_search_locations or []
|
| 569 |
+
# Is this finder PEP 420 compliant?
|
| 570 |
+
elif hasattr(finder, 'find_loader'):
|
| 571 |
+
_, portions = finder.find_loader(final_name)
|
| 572 |
+
|
| 573 |
+
for portion in portions:
|
| 574 |
+
# XXX This may still add duplicate entries to path on
|
| 575 |
+
# case-insensitive filesystems
|
| 576 |
+
if portion not in path:
|
| 577 |
+
path.append(portion)
|
| 578 |
+
|
| 579 |
+
# XXX Is this the right thing for subpackages like zope.app?
|
| 580 |
+
# It looks for a file named "zope.app.pkg"
|
| 581 |
+
pkgfile = os.path.join(dir, sname_pkg)
|
| 582 |
+
if os.path.isfile(pkgfile):
|
| 583 |
+
try:
|
| 584 |
+
f = open(pkgfile)
|
| 585 |
+
except OSError as msg:
|
| 586 |
+
sys.stderr.write("Can't open %s: %s\n" %
|
| 587 |
+
(pkgfile, msg))
|
| 588 |
+
else:
|
| 589 |
+
with f:
|
| 590 |
+
for line in f:
|
| 591 |
+
line = line.rstrip('\n')
|
| 592 |
+
if not line or line.startswith('#'):
|
| 593 |
+
continue
|
| 594 |
+
path.append(line) # Don't check for existence!
|
| 595 |
+
|
| 596 |
+
return path
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def get_data(package, resource):
|
| 600 |
+
"""Get a resource from a package.
|
| 601 |
+
|
| 602 |
+
This is a wrapper round the PEP 302 loader get_data API. The package
|
| 603 |
+
argument should be the name of a package, in standard module format
|
| 604 |
+
(foo.bar). The resource argument should be in the form of a relative
|
| 605 |
+
filename, using '/' as the path separator. The parent directory name '..'
|
| 606 |
+
is not allowed, and nor is a rooted name (starting with a '/').
|
| 607 |
+
|
| 608 |
+
The function returns a binary string, which is the contents of the
|
| 609 |
+
specified resource.
|
| 610 |
+
|
| 611 |
+
For packages located in the filesystem, which have already been imported,
|
| 612 |
+
this is the rough equivalent of
|
| 613 |
+
|
| 614 |
+
d = os.path.dirname(sys.modules[package].__file__)
|
| 615 |
+
data = open(os.path.join(d, resource), 'rb').read()
|
| 616 |
+
|
| 617 |
+
If the package cannot be located or loaded, or it uses a PEP 302 loader
|
| 618 |
+
which does not support get_data(), then None is returned.
|
| 619 |
+
"""
|
| 620 |
+
|
| 621 |
+
spec = importlib.util.find_spec(package)
|
| 622 |
+
if spec is None:
|
| 623 |
+
return None
|
| 624 |
+
loader = spec.loader
|
| 625 |
+
if loader is None or not hasattr(loader, 'get_data'):
|
| 626 |
+
return None
|
| 627 |
+
# XXX needs test
|
| 628 |
+
mod = (sys.modules.get(package) or
|
| 629 |
+
importlib._bootstrap._load(spec))
|
| 630 |
+
if mod is None or not hasattr(mod, '__file__'):
|
| 631 |
+
return None
|
| 632 |
+
|
| 633 |
+
# Modify the resource name to be compatible with the loader.get_data
|
| 634 |
+
# signature - an os.path format "filename" starting with the dirname of
|
| 635 |
+
# the package's __file__
|
| 636 |
+
parts = resource.split('/')
|
| 637 |
+
parts.insert(0, os.path.dirname(mod.__file__))
|
| 638 |
+
resource_name = os.path.join(*parts)
|
| 639 |
+
return loader.get_data(resource_name)
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
_NAME_PATTERN = None
|
| 643 |
+
|
| 644 |
+
def resolve_name(name):
|
| 645 |
+
"""
|
| 646 |
+
Resolve a name to an object.
|
| 647 |
+
|
| 648 |
+
It is expected that `name` will be a string in one of the following
|
| 649 |
+
formats, where W is shorthand for a valid Python identifier and dot stands
|
| 650 |
+
for a literal period in these pseudo-regexes:
|
| 651 |
+
|
| 652 |
+
W(.W)*
|
| 653 |
+
W(.W)*:(W(.W)*)?
|
| 654 |
+
|
| 655 |
+
The first form is intended for backward compatibility only. It assumes that
|
| 656 |
+
some part of the dotted name is a package, and the rest is an object
|
| 657 |
+
somewhere within that package, possibly nested inside other objects.
|
| 658 |
+
Because the place where the package stops and the object hierarchy starts
|
| 659 |
+
can't be inferred by inspection, repeated attempts to import must be done
|
| 660 |
+
with this form.
|
| 661 |
+
|
| 662 |
+
In the second form, the caller makes the division point clear through the
|
| 663 |
+
provision of a single colon: the dotted name to the left of the colon is a
|
| 664 |
+
package to be imported, and the dotted name to the right is the object
|
| 665 |
+
hierarchy within that package. Only one import is needed in this form. If
|
| 666 |
+
it ends with the colon, then a module object is returned.
|
| 667 |
+
|
| 668 |
+
The function will return an object (which might be a module), or raise one
|
| 669 |
+
of the following exceptions:
|
| 670 |
+
|
| 671 |
+
ValueError - if `name` isn't in a recognised format
|
| 672 |
+
ImportError - if an import failed when it shouldn't have
|
| 673 |
+
AttributeError - if a failure occurred when traversing the object hierarchy
|
| 674 |
+
within the imported package to get to the desired object.
|
| 675 |
+
"""
|
| 676 |
+
global _NAME_PATTERN
|
| 677 |
+
if _NAME_PATTERN is None:
|
| 678 |
+
# Lazy import to speedup Python startup time
|
| 679 |
+
import re
|
| 680 |
+
dotted_words = r'(?!\d)(\w+)(\.(?!\d)(\w+))*'
|
| 681 |
+
_NAME_PATTERN = re.compile(f'^(?P<pkg>{dotted_words})'
|
| 682 |
+
f'(?P<cln>:(?P<obj>{dotted_words})?)?$',
|
| 683 |
+
re.UNICODE)
|
| 684 |
+
|
| 685 |
+
m = _NAME_PATTERN.match(name)
|
| 686 |
+
if not m:
|
| 687 |
+
raise ValueError(f'invalid format: {name!r}')
|
| 688 |
+
gd = m.groupdict()
|
| 689 |
+
if gd.get('cln'):
|
| 690 |
+
# there is a colon - a one-step import is all that's needed
|
| 691 |
+
mod = importlib.import_module(gd['pkg'])
|
| 692 |
+
parts = gd.get('obj')
|
| 693 |
+
parts = parts.split('.') if parts else []
|
| 694 |
+
else:
|
| 695 |
+
# no colon - have to iterate to find the package boundary
|
| 696 |
+
parts = name.split('.')
|
| 697 |
+
modname = parts.pop(0)
|
| 698 |
+
# first part *must* be a module/package.
|
| 699 |
+
mod = importlib.import_module(modname)
|
| 700 |
+
while parts:
|
| 701 |
+
p = parts[0]
|
| 702 |
+
s = f'{modname}.{p}'
|
| 703 |
+
try:
|
| 704 |
+
mod = importlib.import_module(s)
|
| 705 |
+
parts.pop(0)
|
| 706 |
+
modname = s
|
| 707 |
+
except ImportError:
|
| 708 |
+
break
|
| 709 |
+
# if we reach this point, mod is the module, already imported, and
|
| 710 |
+
# parts is the list of parts in the object hierarchy to be traversed, or
|
| 711 |
+
# an empty list if just the module is wanted.
|
| 712 |
+
result = mod
|
| 713 |
+
for p in parts:
|
| 714 |
+
result = getattr(result, p)
|
| 715 |
+
return result
|
llava/lib/python3.10/re.py
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Secret Labs' Regular Expression Engine
|
| 3 |
+
#
|
| 4 |
+
# re-compatible interface for the sre matching engine
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
|
| 7 |
+
#
|
| 8 |
+
# This version of the SRE library can be redistributed under CNRI's
|
| 9 |
+
# Python 1.6 license. For any other use, please contact Secret Labs
|
| 10 |
+
# AB (info@pythonware.com).
|
| 11 |
+
#
|
| 12 |
+
# Portions of this engine have been developed in cooperation with
|
| 13 |
+
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
|
| 14 |
+
# other compatibility work.
|
| 15 |
+
#
|
| 16 |
+
|
| 17 |
+
r"""Support for regular expressions (RE).
|
| 18 |
+
|
| 19 |
+
This module provides regular expression matching operations similar to
|
| 20 |
+
those found in Perl. It supports both 8-bit and Unicode strings; both
|
| 21 |
+
the pattern and the strings being processed can contain null bytes and
|
| 22 |
+
characters outside the US ASCII range.
|
| 23 |
+
|
| 24 |
+
Regular expressions can contain both special and ordinary characters.
|
| 25 |
+
Most ordinary characters, like "A", "a", or "0", are the simplest
|
| 26 |
+
regular expressions; they simply match themselves. You can
|
| 27 |
+
concatenate ordinary characters, so last matches the string 'last'.
|
| 28 |
+
|
| 29 |
+
The special characters are:
|
| 30 |
+
"." Matches any character except a newline.
|
| 31 |
+
"^" Matches the start of the string.
|
| 32 |
+
"$" Matches the end of the string or just before the newline at
|
| 33 |
+
the end of the string.
|
| 34 |
+
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
|
| 35 |
+
Greedy means that it will match as many repetitions as possible.
|
| 36 |
+
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
|
| 37 |
+
"?" Matches 0 or 1 (greedy) of the preceding RE.
|
| 38 |
+
*?,+?,?? Non-greedy versions of the previous three special characters.
|
| 39 |
+
{m,n} Matches from m to n repetitions of the preceding RE.
|
| 40 |
+
{m,n}? Non-greedy version of the above.
|
| 41 |
+
"\\" Either escapes special characters or signals a special sequence.
|
| 42 |
+
[] Indicates a set of characters.
|
| 43 |
+
A "^" as the first character indicates a complementing set.
|
| 44 |
+
"|" A|B, creates an RE that will match either A or B.
|
| 45 |
+
(...) Matches the RE inside the parentheses.
|
| 46 |
+
The contents can be retrieved or matched later in the string.
|
| 47 |
+
(?aiLmsux) The letters set the corresponding flags defined below.
|
| 48 |
+
(?:...) Non-grouping version of regular parentheses.
|
| 49 |
+
(?P<name>...) The substring matched by the group is accessible by name.
|
| 50 |
+
(?P=name) Matches the text matched earlier by the group named name.
|
| 51 |
+
(?#...) A comment; ignored.
|
| 52 |
+
(?=...) Matches if ... matches next, but doesn't consume the string.
|
| 53 |
+
(?!...) Matches if ... doesn't match next.
|
| 54 |
+
(?<=...) Matches if preceded by ... (must be fixed length).
|
| 55 |
+
(?<!...) Matches if not preceded by ... (must be fixed length).
|
| 56 |
+
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
|
| 57 |
+
the (optional) no pattern otherwise.
|
| 58 |
+
|
| 59 |
+
The special sequences consist of "\\" and a character from the list
|
| 60 |
+
below. If the ordinary character is not on the list, then the
|
| 61 |
+
resulting RE will match the second character.
|
| 62 |
+
\number Matches the contents of the group of the same number.
|
| 63 |
+
\A Matches only at the start of the string.
|
| 64 |
+
\Z Matches only at the end of the string.
|
| 65 |
+
\b Matches the empty string, but only at the start or end of a word.
|
| 66 |
+
\B Matches the empty string, but not at the start or end of a word.
|
| 67 |
+
\d Matches any decimal digit; equivalent to the set [0-9] in
|
| 68 |
+
bytes patterns or string patterns with the ASCII flag.
|
| 69 |
+
In string patterns without the ASCII flag, it will match the whole
|
| 70 |
+
range of Unicode digits.
|
| 71 |
+
\D Matches any non-digit character; equivalent to [^\d].
|
| 72 |
+
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v] in
|
| 73 |
+
bytes patterns or string patterns with the ASCII flag.
|
| 74 |
+
In string patterns without the ASCII flag, it will match the whole
|
| 75 |
+
range of Unicode whitespace characters.
|
| 76 |
+
\S Matches any non-whitespace character; equivalent to [^\s].
|
| 77 |
+
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]
|
| 78 |
+
in bytes patterns or string patterns with the ASCII flag.
|
| 79 |
+
In string patterns without the ASCII flag, it will match the
|
| 80 |
+
range of Unicode alphanumeric characters (letters plus digits
|
| 81 |
+
plus underscore).
|
| 82 |
+
With LOCALE, it will match the set [0-9_] plus characters defined
|
| 83 |
+
as letters for the current locale.
|
| 84 |
+
\W Matches the complement of \w.
|
| 85 |
+
\\ Matches a literal backslash.
|
| 86 |
+
|
| 87 |
+
This module exports the following functions:
|
| 88 |
+
match Match a regular expression pattern to the beginning of a string.
|
| 89 |
+
fullmatch Match a regular expression pattern to all of a string.
|
| 90 |
+
search Search a string for the presence of a pattern.
|
| 91 |
+
sub Substitute occurrences of a pattern found in a string.
|
| 92 |
+
subn Same as sub, but also return the number of substitutions made.
|
| 93 |
+
split Split a string by the occurrences of a pattern.
|
| 94 |
+
findall Find all occurrences of a pattern in a string.
|
| 95 |
+
finditer Return an iterator yielding a Match object for each match.
|
| 96 |
+
compile Compile a pattern into a Pattern object.
|
| 97 |
+
purge Clear the regular expression cache.
|
| 98 |
+
escape Backslash all non-alphanumerics in a string.
|
| 99 |
+
|
| 100 |
+
Each function other than purge and escape can take an optional 'flags' argument
|
| 101 |
+
consisting of one or more of the following module constants, joined by "|".
|
| 102 |
+
A, L, and U are mutually exclusive.
|
| 103 |
+
A ASCII For string patterns, make \w, \W, \b, \B, \d, \D
|
| 104 |
+
match the corresponding ASCII character categories
|
| 105 |
+
(rather than the whole Unicode categories, which is the
|
| 106 |
+
default).
|
| 107 |
+
For bytes patterns, this flag is the only available
|
| 108 |
+
behaviour and needn't be specified.
|
| 109 |
+
I IGNORECASE Perform case-insensitive matching.
|
| 110 |
+
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
|
| 111 |
+
M MULTILINE "^" matches the beginning of lines (after a newline)
|
| 112 |
+
as well as the string.
|
| 113 |
+
"$" matches the end of lines (before a newline) as well
|
| 114 |
+
as the end of the string.
|
| 115 |
+
S DOTALL "." matches any character at all, including the newline.
|
| 116 |
+
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
|
| 117 |
+
U UNICODE For compatibility only. Ignored for string patterns (it
|
| 118 |
+
is the default), and forbidden for bytes patterns.
|
| 119 |
+
|
| 120 |
+
This module also defines an exception 'error'.
|
| 121 |
+
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
import enum
|
| 125 |
+
import sre_compile
|
| 126 |
+
import sre_parse
|
| 127 |
+
import functools
|
| 128 |
+
try:
|
| 129 |
+
import _locale
|
| 130 |
+
except ImportError:
|
| 131 |
+
_locale = None
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# public symbols
|
| 135 |
+
__all__ = [
|
| 136 |
+
"match", "fullmatch", "search", "sub", "subn", "split",
|
| 137 |
+
"findall", "finditer", "compile", "purge", "template", "escape",
|
| 138 |
+
"error", "Pattern", "Match", "A", "I", "L", "M", "S", "X", "U",
|
| 139 |
+
"ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
|
| 140 |
+
"UNICODE",
|
| 141 |
+
]
|
| 142 |
+
|
| 143 |
+
__version__ = "2.2.1"
|
| 144 |
+
|
| 145 |
+
class RegexFlag(enum.IntFlag):
|
| 146 |
+
ASCII = A = sre_compile.SRE_FLAG_ASCII # assume ascii "locale"
|
| 147 |
+
IGNORECASE = I = sre_compile.SRE_FLAG_IGNORECASE # ignore case
|
| 148 |
+
LOCALE = L = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
|
| 149 |
+
UNICODE = U = sre_compile.SRE_FLAG_UNICODE # assume unicode "locale"
|
| 150 |
+
MULTILINE = M = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
|
| 151 |
+
DOTALL = S = sre_compile.SRE_FLAG_DOTALL # make dot match newline
|
| 152 |
+
VERBOSE = X = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
|
| 153 |
+
# sre extensions (experimental, don't rely on these)
|
| 154 |
+
TEMPLATE = T = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
|
| 155 |
+
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
|
| 156 |
+
|
| 157 |
+
def __repr__(self):
|
| 158 |
+
if self._name_ is not None:
|
| 159 |
+
return f're.{self._name_}'
|
| 160 |
+
value = self._value_
|
| 161 |
+
members = []
|
| 162 |
+
negative = value < 0
|
| 163 |
+
if negative:
|
| 164 |
+
value = ~value
|
| 165 |
+
for m in self.__class__:
|
| 166 |
+
if value & m._value_:
|
| 167 |
+
value &= ~m._value_
|
| 168 |
+
members.append(f're.{m._name_}')
|
| 169 |
+
if value:
|
| 170 |
+
members.append(hex(value))
|
| 171 |
+
res = '|'.join(members)
|
| 172 |
+
if negative:
|
| 173 |
+
if len(members) > 1:
|
| 174 |
+
res = f'~({res})'
|
| 175 |
+
else:
|
| 176 |
+
res = f'~{res}'
|
| 177 |
+
return res
|
| 178 |
+
__str__ = object.__str__
|
| 179 |
+
globals().update(RegexFlag.__members__)
|
| 180 |
+
|
| 181 |
+
# sre exception
|
| 182 |
+
error = sre_compile.error
|
| 183 |
+
|
| 184 |
+
# --------------------------------------------------------------------
|
| 185 |
+
# public interface
|
| 186 |
+
|
| 187 |
+
def match(pattern, string, flags=0):
|
| 188 |
+
"""Try to apply the pattern at the start of the string, returning
|
| 189 |
+
a Match object, or None if no match was found."""
|
| 190 |
+
return _compile(pattern, flags).match(string)
|
| 191 |
+
|
| 192 |
+
def fullmatch(pattern, string, flags=0):
|
| 193 |
+
"""Try to apply the pattern to all of the string, returning
|
| 194 |
+
a Match object, or None if no match was found."""
|
| 195 |
+
return _compile(pattern, flags).fullmatch(string)
|
| 196 |
+
|
| 197 |
+
def search(pattern, string, flags=0):
|
| 198 |
+
"""Scan through string looking for a match to the pattern, returning
|
| 199 |
+
a Match object, or None if no match was found."""
|
| 200 |
+
return _compile(pattern, flags).search(string)
|
| 201 |
+
|
| 202 |
+
def sub(pattern, repl, string, count=0, flags=0):
|
| 203 |
+
"""Return the string obtained by replacing the leftmost
|
| 204 |
+
non-overlapping occurrences of the pattern in string by the
|
| 205 |
+
replacement repl. repl can be either a string or a callable;
|
| 206 |
+
if a string, backslash escapes in it are processed. If it is
|
| 207 |
+
a callable, it's passed the Match object and must return
|
| 208 |
+
a replacement string to be used."""
|
| 209 |
+
return _compile(pattern, flags).sub(repl, string, count)
|
| 210 |
+
|
| 211 |
+
def subn(pattern, repl, string, count=0, flags=0):
|
| 212 |
+
"""Return a 2-tuple containing (new_string, number).
|
| 213 |
+
new_string is the string obtained by replacing the leftmost
|
| 214 |
+
non-overlapping occurrences of the pattern in the source
|
| 215 |
+
string by the replacement repl. number is the number of
|
| 216 |
+
substitutions that were made. repl can be either a string or a
|
| 217 |
+
callable; if a string, backslash escapes in it are processed.
|
| 218 |
+
If it is a callable, it's passed the Match object and must
|
| 219 |
+
return a replacement string to be used."""
|
| 220 |
+
return _compile(pattern, flags).subn(repl, string, count)
|
| 221 |
+
|
| 222 |
+
def split(pattern, string, maxsplit=0, flags=0):
|
| 223 |
+
"""Split the source string by the occurrences of the pattern,
|
| 224 |
+
returning a list containing the resulting substrings. If
|
| 225 |
+
capturing parentheses are used in pattern, then the text of all
|
| 226 |
+
groups in the pattern are also returned as part of the resulting
|
| 227 |
+
list. If maxsplit is nonzero, at most maxsplit splits occur,
|
| 228 |
+
and the remainder of the string is returned as the final element
|
| 229 |
+
of the list."""
|
| 230 |
+
return _compile(pattern, flags).split(string, maxsplit)
|
| 231 |
+
|
| 232 |
+
def findall(pattern, string, flags=0):
|
| 233 |
+
"""Return a list of all non-overlapping matches in the string.
|
| 234 |
+
|
| 235 |
+
If one or more capturing groups are present in the pattern, return
|
| 236 |
+
a list of groups; this will be a list of tuples if the pattern
|
| 237 |
+
has more than one group.
|
| 238 |
+
|
| 239 |
+
Empty matches are included in the result."""
|
| 240 |
+
return _compile(pattern, flags).findall(string)
|
| 241 |
+
|
| 242 |
+
def finditer(pattern, string, flags=0):
|
| 243 |
+
"""Return an iterator over all non-overlapping matches in the
|
| 244 |
+
string. For each match, the iterator returns a Match object.
|
| 245 |
+
|
| 246 |
+
Empty matches are included in the result."""
|
| 247 |
+
return _compile(pattern, flags).finditer(string)
|
| 248 |
+
|
| 249 |
+
def compile(pattern, flags=0):
|
| 250 |
+
"Compile a regular expression pattern, returning a Pattern object."
|
| 251 |
+
return _compile(pattern, flags)
|
| 252 |
+
|
| 253 |
+
def purge():
|
| 254 |
+
"Clear the regular expression caches"
|
| 255 |
+
_cache.clear()
|
| 256 |
+
_compile_repl.cache_clear()
|
| 257 |
+
|
| 258 |
+
def template(pattern, flags=0):
|
| 259 |
+
"Compile a template pattern, returning a Pattern object"
|
| 260 |
+
return _compile(pattern, flags|T)
|
| 261 |
+
|
| 262 |
+
# SPECIAL_CHARS
|
| 263 |
+
# closing ')', '}' and ']'
|
| 264 |
+
# '-' (a range in character set)
|
| 265 |
+
# '&', '~', (extended character set operations)
|
| 266 |
+
# '#' (comment) and WHITESPACE (ignored) in verbose mode
|
| 267 |
+
_special_chars_map = {i: '\\' + chr(i) for i in b'()[]{}?*+-|^$\\.&~# \t\n\r\v\f'}
|
| 268 |
+
|
| 269 |
+
def escape(pattern):
|
| 270 |
+
"""
|
| 271 |
+
Escape special characters in a string.
|
| 272 |
+
"""
|
| 273 |
+
if isinstance(pattern, str):
|
| 274 |
+
return pattern.translate(_special_chars_map)
|
| 275 |
+
else:
|
| 276 |
+
pattern = str(pattern, 'latin1')
|
| 277 |
+
return pattern.translate(_special_chars_map).encode('latin1')
|
| 278 |
+
|
| 279 |
+
Pattern = type(sre_compile.compile('', 0))
|
| 280 |
+
Match = type(sre_compile.compile('', 0).match(''))
|
| 281 |
+
|
| 282 |
+
# --------------------------------------------------------------------
|
| 283 |
+
# internals
|
| 284 |
+
|
| 285 |
+
_cache = {} # ordered!
|
| 286 |
+
|
| 287 |
+
_MAXCACHE = 512
|
| 288 |
+
def _compile(pattern, flags):
|
| 289 |
+
# internal: compile pattern
|
| 290 |
+
if isinstance(flags, RegexFlag):
|
| 291 |
+
flags = flags.value
|
| 292 |
+
try:
|
| 293 |
+
return _cache[type(pattern), pattern, flags]
|
| 294 |
+
except KeyError:
|
| 295 |
+
pass
|
| 296 |
+
if isinstance(pattern, Pattern):
|
| 297 |
+
if flags:
|
| 298 |
+
raise ValueError(
|
| 299 |
+
"cannot process flags argument with a compiled pattern")
|
| 300 |
+
return pattern
|
| 301 |
+
if not sre_compile.isstring(pattern):
|
| 302 |
+
raise TypeError("first argument must be string or compiled pattern")
|
| 303 |
+
p = sre_compile.compile(pattern, flags)
|
| 304 |
+
if not (flags & DEBUG):
|
| 305 |
+
if len(_cache) >= _MAXCACHE:
|
| 306 |
+
# Drop the oldest item
|
| 307 |
+
try:
|
| 308 |
+
del _cache[next(iter(_cache))]
|
| 309 |
+
except (StopIteration, RuntimeError, KeyError):
|
| 310 |
+
pass
|
| 311 |
+
_cache[type(pattern), pattern, flags] = p
|
| 312 |
+
return p
|
| 313 |
+
|
| 314 |
+
@functools.lru_cache(_MAXCACHE)
|
| 315 |
+
def _compile_repl(repl, pattern):
|
| 316 |
+
# internal: compile replacement pattern
|
| 317 |
+
return sre_parse.parse_template(repl, pattern)
|
| 318 |
+
|
| 319 |
+
def _expand(pattern, match, template):
|
| 320 |
+
# internal: Match.expand implementation hook
|
| 321 |
+
template = sre_parse.parse_template(template, pattern)
|
| 322 |
+
return sre_parse.expand_template(template, match)
|
| 323 |
+
|
| 324 |
+
def _subx(pattern, template):
|
| 325 |
+
# internal: Pattern.sub/subn implementation helper
|
| 326 |
+
template = _compile_repl(template, pattern)
|
| 327 |
+
if not template[0] and len(template[1]) == 1:
|
| 328 |
+
# literal replacement
|
| 329 |
+
return template[1][0]
|
| 330 |
+
def filter(match, template=template):
|
| 331 |
+
return sre_parse.expand_template(template, match)
|
| 332 |
+
return filter
|
| 333 |
+
|
| 334 |
+
# register myself for pickling
|
| 335 |
+
|
| 336 |
+
import copyreg
|
| 337 |
+
|
| 338 |
+
def _pickle(p):
|
| 339 |
+
return _compile, (p.pattern, p.flags)
|
| 340 |
+
|
| 341 |
+
copyreg.pickle(Pattern, _pickle, _compile)
|
| 342 |
+
|
| 343 |
+
# --------------------------------------------------------------------
|
| 344 |
+
# experimental stuff (see python-dev discussions for details)
|
| 345 |
+
|
| 346 |
+
class Scanner:
|
| 347 |
+
def __init__(self, lexicon, flags=0):
|
| 348 |
+
from sre_constants import BRANCH, SUBPATTERN
|
| 349 |
+
if isinstance(flags, RegexFlag):
|
| 350 |
+
flags = flags.value
|
| 351 |
+
self.lexicon = lexicon
|
| 352 |
+
# combine phrases into a compound pattern
|
| 353 |
+
p = []
|
| 354 |
+
s = sre_parse.State()
|
| 355 |
+
s.flags = flags
|
| 356 |
+
for phrase, action in lexicon:
|
| 357 |
+
gid = s.opengroup()
|
| 358 |
+
p.append(sre_parse.SubPattern(s, [
|
| 359 |
+
(SUBPATTERN, (gid, 0, 0, sre_parse.parse(phrase, flags))),
|
| 360 |
+
]))
|
| 361 |
+
s.closegroup(gid, p[-1])
|
| 362 |
+
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
|
| 363 |
+
self.scanner = sre_compile.compile(p)
|
| 364 |
+
def scan(self, string):
|
| 365 |
+
result = []
|
| 366 |
+
append = result.append
|
| 367 |
+
match = self.scanner.scanner(string).match
|
| 368 |
+
i = 0
|
| 369 |
+
while True:
|
| 370 |
+
m = match()
|
| 371 |
+
if not m:
|
| 372 |
+
break
|
| 373 |
+
j = m.end()
|
| 374 |
+
if i == j:
|
| 375 |
+
break
|
| 376 |
+
action = self.lexicon[m.lastindex-1][1]
|
| 377 |
+
if callable(action):
|
| 378 |
+
self.match = m
|
| 379 |
+
action = action(self, m.group())
|
| 380 |
+
if action is not None:
|
| 381 |
+
append(action)
|
| 382 |
+
i = j
|
| 383 |
+
return result, string[i:]
|
llava/lib/python3.10/selectors.py
ADDED
|
@@ -0,0 +1,619 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Selectors module.
|
| 2 |
+
|
| 3 |
+
This module allows high-level and efficient I/O multiplexing, built upon the
|
| 4 |
+
`select` module primitives.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
from abc import ABCMeta, abstractmethod
|
| 9 |
+
from collections import namedtuple
|
| 10 |
+
from collections.abc import Mapping
|
| 11 |
+
import math
|
| 12 |
+
import select
|
| 13 |
+
import sys
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# generic events, that must be mapped to implementation-specific ones
|
| 17 |
+
EVENT_READ = (1 << 0)
|
| 18 |
+
EVENT_WRITE = (1 << 1)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _fileobj_to_fd(fileobj):
|
| 22 |
+
"""Return a file descriptor from a file object.
|
| 23 |
+
|
| 24 |
+
Parameters:
|
| 25 |
+
fileobj -- file object or file descriptor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
corresponding file descriptor
|
| 29 |
+
|
| 30 |
+
Raises:
|
| 31 |
+
ValueError if the object is invalid
|
| 32 |
+
"""
|
| 33 |
+
if isinstance(fileobj, int):
|
| 34 |
+
fd = fileobj
|
| 35 |
+
else:
|
| 36 |
+
try:
|
| 37 |
+
fd = int(fileobj.fileno())
|
| 38 |
+
except (AttributeError, TypeError, ValueError):
|
| 39 |
+
raise ValueError("Invalid file object: "
|
| 40 |
+
"{!r}".format(fileobj)) from None
|
| 41 |
+
if fd < 0:
|
| 42 |
+
raise ValueError("Invalid file descriptor: {}".format(fd))
|
| 43 |
+
return fd
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
|
| 47 |
+
|
| 48 |
+
SelectorKey.__doc__ = """SelectorKey(fileobj, fd, events, data)
|
| 49 |
+
|
| 50 |
+
Object used to associate a file object to its backing
|
| 51 |
+
file descriptor, selected event mask, and attached data.
|
| 52 |
+
"""
|
| 53 |
+
if sys.version_info >= (3, 5):
|
| 54 |
+
SelectorKey.fileobj.__doc__ = 'File object registered.'
|
| 55 |
+
SelectorKey.fd.__doc__ = 'Underlying file descriptor.'
|
| 56 |
+
SelectorKey.events.__doc__ = 'Events that must be waited for on this file object.'
|
| 57 |
+
SelectorKey.data.__doc__ = ('''Optional opaque data associated to this file object.
|
| 58 |
+
For example, this could be used to store a per-client session ID.''')
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class _SelectorMapping(Mapping):
|
| 62 |
+
"""Mapping of file objects to selector keys."""
|
| 63 |
+
|
| 64 |
+
def __init__(self, selector):
|
| 65 |
+
self._selector = selector
|
| 66 |
+
|
| 67 |
+
def __len__(self):
|
| 68 |
+
return len(self._selector._fd_to_key)
|
| 69 |
+
|
| 70 |
+
def __getitem__(self, fileobj):
|
| 71 |
+
try:
|
| 72 |
+
fd = self._selector._fileobj_lookup(fileobj)
|
| 73 |
+
return self._selector._fd_to_key[fd]
|
| 74 |
+
except KeyError:
|
| 75 |
+
raise KeyError("{!r} is not registered".format(fileobj)) from None
|
| 76 |
+
|
| 77 |
+
def __iter__(self):
|
| 78 |
+
return iter(self._selector._fd_to_key)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class BaseSelector(metaclass=ABCMeta):
|
| 82 |
+
"""Selector abstract base class.
|
| 83 |
+
|
| 84 |
+
A selector supports registering file objects to be monitored for specific
|
| 85 |
+
I/O events.
|
| 86 |
+
|
| 87 |
+
A file object is a file descriptor or any object with a `fileno()` method.
|
| 88 |
+
An arbitrary object can be attached to the file object, which can be used
|
| 89 |
+
for example to store context information, a callback, etc.
|
| 90 |
+
|
| 91 |
+
A selector can use various implementations (select(), poll(), epoll()...)
|
| 92 |
+
depending on the platform. The default `Selector` class uses the most
|
| 93 |
+
efficient implementation on the current platform.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
@abstractmethod
|
| 97 |
+
def register(self, fileobj, events, data=None):
|
| 98 |
+
"""Register a file object.
|
| 99 |
+
|
| 100 |
+
Parameters:
|
| 101 |
+
fileobj -- file object or file descriptor
|
| 102 |
+
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
|
| 103 |
+
data -- attached data
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
SelectorKey instance
|
| 107 |
+
|
| 108 |
+
Raises:
|
| 109 |
+
ValueError if events is invalid
|
| 110 |
+
KeyError if fileobj is already registered
|
| 111 |
+
OSError if fileobj is closed or otherwise is unacceptable to
|
| 112 |
+
the underlying system call (if a system call is made)
|
| 113 |
+
|
| 114 |
+
Note:
|
| 115 |
+
OSError may or may not be raised
|
| 116 |
+
"""
|
| 117 |
+
raise NotImplementedError
|
| 118 |
+
|
| 119 |
+
@abstractmethod
|
| 120 |
+
def unregister(self, fileobj):
|
| 121 |
+
"""Unregister a file object.
|
| 122 |
+
|
| 123 |
+
Parameters:
|
| 124 |
+
fileobj -- file object or file descriptor
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
SelectorKey instance
|
| 128 |
+
|
| 129 |
+
Raises:
|
| 130 |
+
KeyError if fileobj is not registered
|
| 131 |
+
|
| 132 |
+
Note:
|
| 133 |
+
If fileobj is registered but has since been closed this does
|
| 134 |
+
*not* raise OSError (even if the wrapped syscall does)
|
| 135 |
+
"""
|
| 136 |
+
raise NotImplementedError
|
| 137 |
+
|
| 138 |
+
def modify(self, fileobj, events, data=None):
|
| 139 |
+
"""Change a registered file object monitored events or attached data.
|
| 140 |
+
|
| 141 |
+
Parameters:
|
| 142 |
+
fileobj -- file object or file descriptor
|
| 143 |
+
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
|
| 144 |
+
data -- attached data
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
SelectorKey instance
|
| 148 |
+
|
| 149 |
+
Raises:
|
| 150 |
+
Anything that unregister() or register() raises
|
| 151 |
+
"""
|
| 152 |
+
self.unregister(fileobj)
|
| 153 |
+
return self.register(fileobj, events, data)
|
| 154 |
+
|
| 155 |
+
@abstractmethod
|
| 156 |
+
def select(self, timeout=None):
|
| 157 |
+
"""Perform the actual selection, until some monitored file objects are
|
| 158 |
+
ready or a timeout expires.
|
| 159 |
+
|
| 160 |
+
Parameters:
|
| 161 |
+
timeout -- if timeout > 0, this specifies the maximum wait time, in
|
| 162 |
+
seconds
|
| 163 |
+
if timeout <= 0, the select() call won't block, and will
|
| 164 |
+
report the currently ready file objects
|
| 165 |
+
if timeout is None, select() will block until a monitored
|
| 166 |
+
file object becomes ready
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
list of (key, events) for ready file objects
|
| 170 |
+
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
|
| 171 |
+
"""
|
| 172 |
+
raise NotImplementedError
|
| 173 |
+
|
| 174 |
+
def close(self):
|
| 175 |
+
"""Close the selector.
|
| 176 |
+
|
| 177 |
+
This must be called to make sure that any underlying resource is freed.
|
| 178 |
+
"""
|
| 179 |
+
pass
|
| 180 |
+
|
| 181 |
+
def get_key(self, fileobj):
|
| 182 |
+
"""Return the key associated to a registered file object.
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
SelectorKey for this file object
|
| 186 |
+
"""
|
| 187 |
+
mapping = self.get_map()
|
| 188 |
+
if mapping is None:
|
| 189 |
+
raise RuntimeError('Selector is closed')
|
| 190 |
+
try:
|
| 191 |
+
return mapping[fileobj]
|
| 192 |
+
except KeyError:
|
| 193 |
+
raise KeyError("{!r} is not registered".format(fileobj)) from None
|
| 194 |
+
|
| 195 |
+
@abstractmethod
|
| 196 |
+
def get_map(self):
|
| 197 |
+
"""Return a mapping of file objects to selector keys."""
|
| 198 |
+
raise NotImplementedError
|
| 199 |
+
|
| 200 |
+
def __enter__(self):
|
| 201 |
+
return self
|
| 202 |
+
|
| 203 |
+
def __exit__(self, *args):
|
| 204 |
+
self.close()
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
class _BaseSelectorImpl(BaseSelector):
|
| 208 |
+
"""Base selector implementation."""
|
| 209 |
+
|
| 210 |
+
def __init__(self):
|
| 211 |
+
# this maps file descriptors to keys
|
| 212 |
+
self._fd_to_key = {}
|
| 213 |
+
# read-only mapping returned by get_map()
|
| 214 |
+
self._map = _SelectorMapping(self)
|
| 215 |
+
|
| 216 |
+
def _fileobj_lookup(self, fileobj):
|
| 217 |
+
"""Return a file descriptor from a file object.
|
| 218 |
+
|
| 219 |
+
This wraps _fileobj_to_fd() to do an exhaustive search in case
|
| 220 |
+
the object is invalid but we still have it in our map. This
|
| 221 |
+
is used by unregister() so we can unregister an object that
|
| 222 |
+
was previously registered even if it is closed. It is also
|
| 223 |
+
used by _SelectorMapping.
|
| 224 |
+
"""
|
| 225 |
+
try:
|
| 226 |
+
return _fileobj_to_fd(fileobj)
|
| 227 |
+
except ValueError:
|
| 228 |
+
# Do an exhaustive search.
|
| 229 |
+
for key in self._fd_to_key.values():
|
| 230 |
+
if key.fileobj is fileobj:
|
| 231 |
+
return key.fd
|
| 232 |
+
# Raise ValueError after all.
|
| 233 |
+
raise
|
| 234 |
+
|
| 235 |
+
def register(self, fileobj, events, data=None):
|
| 236 |
+
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
|
| 237 |
+
raise ValueError("Invalid events: {!r}".format(events))
|
| 238 |
+
|
| 239 |
+
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
|
| 240 |
+
|
| 241 |
+
if key.fd in self._fd_to_key:
|
| 242 |
+
raise KeyError("{!r} (FD {}) is already registered"
|
| 243 |
+
.format(fileobj, key.fd))
|
| 244 |
+
|
| 245 |
+
self._fd_to_key[key.fd] = key
|
| 246 |
+
return key
|
| 247 |
+
|
| 248 |
+
def unregister(self, fileobj):
|
| 249 |
+
try:
|
| 250 |
+
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
|
| 251 |
+
except KeyError:
|
| 252 |
+
raise KeyError("{!r} is not registered".format(fileobj)) from None
|
| 253 |
+
return key
|
| 254 |
+
|
| 255 |
+
def modify(self, fileobj, events, data=None):
|
| 256 |
+
try:
|
| 257 |
+
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
|
| 258 |
+
except KeyError:
|
| 259 |
+
raise KeyError("{!r} is not registered".format(fileobj)) from None
|
| 260 |
+
if events != key.events:
|
| 261 |
+
self.unregister(fileobj)
|
| 262 |
+
key = self.register(fileobj, events, data)
|
| 263 |
+
elif data != key.data:
|
| 264 |
+
# Use a shortcut to update the data.
|
| 265 |
+
key = key._replace(data=data)
|
| 266 |
+
self._fd_to_key[key.fd] = key
|
| 267 |
+
return key
|
| 268 |
+
|
| 269 |
+
def close(self):
|
| 270 |
+
self._fd_to_key.clear()
|
| 271 |
+
self._map = None
|
| 272 |
+
|
| 273 |
+
def get_map(self):
|
| 274 |
+
return self._map
|
| 275 |
+
|
| 276 |
+
def _key_from_fd(self, fd):
|
| 277 |
+
"""Return the key associated to a given file descriptor.
|
| 278 |
+
|
| 279 |
+
Parameters:
|
| 280 |
+
fd -- file descriptor
|
| 281 |
+
|
| 282 |
+
Returns:
|
| 283 |
+
corresponding key, or None if not found
|
| 284 |
+
"""
|
| 285 |
+
try:
|
| 286 |
+
return self._fd_to_key[fd]
|
| 287 |
+
except KeyError:
|
| 288 |
+
return None
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class SelectSelector(_BaseSelectorImpl):
|
| 292 |
+
"""Select-based selector."""
|
| 293 |
+
|
| 294 |
+
def __init__(self):
|
| 295 |
+
super().__init__()
|
| 296 |
+
self._readers = set()
|
| 297 |
+
self._writers = set()
|
| 298 |
+
|
| 299 |
+
def register(self, fileobj, events, data=None):
|
| 300 |
+
key = super().register(fileobj, events, data)
|
| 301 |
+
if events & EVENT_READ:
|
| 302 |
+
self._readers.add(key.fd)
|
| 303 |
+
if events & EVENT_WRITE:
|
| 304 |
+
self._writers.add(key.fd)
|
| 305 |
+
return key
|
| 306 |
+
|
| 307 |
+
def unregister(self, fileobj):
|
| 308 |
+
key = super().unregister(fileobj)
|
| 309 |
+
self._readers.discard(key.fd)
|
| 310 |
+
self._writers.discard(key.fd)
|
| 311 |
+
return key
|
| 312 |
+
|
| 313 |
+
if sys.platform == 'win32':
|
| 314 |
+
def _select(self, r, w, _, timeout=None):
|
| 315 |
+
r, w, x = select.select(r, w, w, timeout)
|
| 316 |
+
return r, w + x, []
|
| 317 |
+
else:
|
| 318 |
+
_select = select.select
|
| 319 |
+
|
| 320 |
+
def select(self, timeout=None):
|
| 321 |
+
timeout = None if timeout is None else max(timeout, 0)
|
| 322 |
+
ready = []
|
| 323 |
+
try:
|
| 324 |
+
r, w, _ = self._select(self._readers, self._writers, [], timeout)
|
| 325 |
+
except InterruptedError:
|
| 326 |
+
return ready
|
| 327 |
+
r = set(r)
|
| 328 |
+
w = set(w)
|
| 329 |
+
for fd in r | w:
|
| 330 |
+
events = 0
|
| 331 |
+
if fd in r:
|
| 332 |
+
events |= EVENT_READ
|
| 333 |
+
if fd in w:
|
| 334 |
+
events |= EVENT_WRITE
|
| 335 |
+
|
| 336 |
+
key = self._key_from_fd(fd)
|
| 337 |
+
if key:
|
| 338 |
+
ready.append((key, events & key.events))
|
| 339 |
+
return ready
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class _PollLikeSelector(_BaseSelectorImpl):
|
| 343 |
+
"""Base class shared between poll, epoll and devpoll selectors."""
|
| 344 |
+
_selector_cls = None
|
| 345 |
+
_EVENT_READ = None
|
| 346 |
+
_EVENT_WRITE = None
|
| 347 |
+
|
| 348 |
+
def __init__(self):
|
| 349 |
+
super().__init__()
|
| 350 |
+
self._selector = self._selector_cls()
|
| 351 |
+
|
| 352 |
+
def register(self, fileobj, events, data=None):
|
| 353 |
+
key = super().register(fileobj, events, data)
|
| 354 |
+
poller_events = 0
|
| 355 |
+
if events & EVENT_READ:
|
| 356 |
+
poller_events |= self._EVENT_READ
|
| 357 |
+
if events & EVENT_WRITE:
|
| 358 |
+
poller_events |= self._EVENT_WRITE
|
| 359 |
+
try:
|
| 360 |
+
self._selector.register(key.fd, poller_events)
|
| 361 |
+
except:
|
| 362 |
+
super().unregister(fileobj)
|
| 363 |
+
raise
|
| 364 |
+
return key
|
| 365 |
+
|
| 366 |
+
def unregister(self, fileobj):
|
| 367 |
+
key = super().unregister(fileobj)
|
| 368 |
+
try:
|
| 369 |
+
self._selector.unregister(key.fd)
|
| 370 |
+
except OSError:
|
| 371 |
+
# This can happen if the FD was closed since it
|
| 372 |
+
# was registered.
|
| 373 |
+
pass
|
| 374 |
+
return key
|
| 375 |
+
|
| 376 |
+
def modify(self, fileobj, events, data=None):
|
| 377 |
+
try:
|
| 378 |
+
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
|
| 379 |
+
except KeyError:
|
| 380 |
+
raise KeyError(f"{fileobj!r} is not registered") from None
|
| 381 |
+
|
| 382 |
+
changed = False
|
| 383 |
+
if events != key.events:
|
| 384 |
+
selector_events = 0
|
| 385 |
+
if events & EVENT_READ:
|
| 386 |
+
selector_events |= self._EVENT_READ
|
| 387 |
+
if events & EVENT_WRITE:
|
| 388 |
+
selector_events |= self._EVENT_WRITE
|
| 389 |
+
try:
|
| 390 |
+
self._selector.modify(key.fd, selector_events)
|
| 391 |
+
except:
|
| 392 |
+
super().unregister(fileobj)
|
| 393 |
+
raise
|
| 394 |
+
changed = True
|
| 395 |
+
if data != key.data:
|
| 396 |
+
changed = True
|
| 397 |
+
|
| 398 |
+
if changed:
|
| 399 |
+
key = key._replace(events=events, data=data)
|
| 400 |
+
self._fd_to_key[key.fd] = key
|
| 401 |
+
return key
|
| 402 |
+
|
| 403 |
+
def select(self, timeout=None):
|
| 404 |
+
# This is shared between poll() and epoll().
|
| 405 |
+
# epoll() has a different signature and handling of timeout parameter.
|
| 406 |
+
if timeout is None:
|
| 407 |
+
timeout = None
|
| 408 |
+
elif timeout <= 0:
|
| 409 |
+
timeout = 0
|
| 410 |
+
else:
|
| 411 |
+
# poll() has a resolution of 1 millisecond, round away from
|
| 412 |
+
# zero to wait *at least* timeout seconds.
|
| 413 |
+
timeout = math.ceil(timeout * 1e3)
|
| 414 |
+
ready = []
|
| 415 |
+
try:
|
| 416 |
+
fd_event_list = self._selector.poll(timeout)
|
| 417 |
+
except InterruptedError:
|
| 418 |
+
return ready
|
| 419 |
+
for fd, event in fd_event_list:
|
| 420 |
+
events = 0
|
| 421 |
+
if event & ~self._EVENT_READ:
|
| 422 |
+
events |= EVENT_WRITE
|
| 423 |
+
if event & ~self._EVENT_WRITE:
|
| 424 |
+
events |= EVENT_READ
|
| 425 |
+
|
| 426 |
+
key = self._key_from_fd(fd)
|
| 427 |
+
if key:
|
| 428 |
+
ready.append((key, events & key.events))
|
| 429 |
+
return ready
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
if hasattr(select, 'poll'):
|
| 433 |
+
|
| 434 |
+
class PollSelector(_PollLikeSelector):
|
| 435 |
+
"""Poll-based selector."""
|
| 436 |
+
_selector_cls = select.poll
|
| 437 |
+
_EVENT_READ = select.POLLIN
|
| 438 |
+
_EVENT_WRITE = select.POLLOUT
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
if hasattr(select, 'epoll'):
|
| 442 |
+
|
| 443 |
+
class EpollSelector(_PollLikeSelector):
|
| 444 |
+
"""Epoll-based selector."""
|
| 445 |
+
_selector_cls = select.epoll
|
| 446 |
+
_EVENT_READ = select.EPOLLIN
|
| 447 |
+
_EVENT_WRITE = select.EPOLLOUT
|
| 448 |
+
|
| 449 |
+
def fileno(self):
|
| 450 |
+
return self._selector.fileno()
|
| 451 |
+
|
| 452 |
+
def select(self, timeout=None):
|
| 453 |
+
if timeout is None:
|
| 454 |
+
timeout = -1
|
| 455 |
+
elif timeout <= 0:
|
| 456 |
+
timeout = 0
|
| 457 |
+
else:
|
| 458 |
+
# epoll_wait() has a resolution of 1 millisecond, round away
|
| 459 |
+
# from zero to wait *at least* timeout seconds.
|
| 460 |
+
timeout = math.ceil(timeout * 1e3) * 1e-3
|
| 461 |
+
|
| 462 |
+
# epoll_wait() expects `maxevents` to be greater than zero;
|
| 463 |
+
# we want to make sure that `select()` can be called when no
|
| 464 |
+
# FD is registered.
|
| 465 |
+
max_ev = max(len(self._fd_to_key), 1)
|
| 466 |
+
|
| 467 |
+
ready = []
|
| 468 |
+
try:
|
| 469 |
+
fd_event_list = self._selector.poll(timeout, max_ev)
|
| 470 |
+
except InterruptedError:
|
| 471 |
+
return ready
|
| 472 |
+
for fd, event in fd_event_list:
|
| 473 |
+
events = 0
|
| 474 |
+
if event & ~select.EPOLLIN:
|
| 475 |
+
events |= EVENT_WRITE
|
| 476 |
+
if event & ~select.EPOLLOUT:
|
| 477 |
+
events |= EVENT_READ
|
| 478 |
+
|
| 479 |
+
key = self._key_from_fd(fd)
|
| 480 |
+
if key:
|
| 481 |
+
ready.append((key, events & key.events))
|
| 482 |
+
return ready
|
| 483 |
+
|
| 484 |
+
def close(self):
|
| 485 |
+
self._selector.close()
|
| 486 |
+
super().close()
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
if hasattr(select, 'devpoll'):
|
| 490 |
+
|
| 491 |
+
class DevpollSelector(_PollLikeSelector):
|
| 492 |
+
"""Solaris /dev/poll selector."""
|
| 493 |
+
_selector_cls = select.devpoll
|
| 494 |
+
_EVENT_READ = select.POLLIN
|
| 495 |
+
_EVENT_WRITE = select.POLLOUT
|
| 496 |
+
|
| 497 |
+
def fileno(self):
|
| 498 |
+
return self._selector.fileno()
|
| 499 |
+
|
| 500 |
+
def close(self):
|
| 501 |
+
self._selector.close()
|
| 502 |
+
super().close()
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
if hasattr(select, 'kqueue'):
|
| 506 |
+
|
| 507 |
+
class KqueueSelector(_BaseSelectorImpl):
|
| 508 |
+
"""Kqueue-based selector."""
|
| 509 |
+
|
| 510 |
+
def __init__(self):
|
| 511 |
+
super().__init__()
|
| 512 |
+
self._selector = select.kqueue()
|
| 513 |
+
|
| 514 |
+
def fileno(self):
|
| 515 |
+
return self._selector.fileno()
|
| 516 |
+
|
| 517 |
+
def register(self, fileobj, events, data=None):
|
| 518 |
+
key = super().register(fileobj, events, data)
|
| 519 |
+
try:
|
| 520 |
+
if events & EVENT_READ:
|
| 521 |
+
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
|
| 522 |
+
select.KQ_EV_ADD)
|
| 523 |
+
self._selector.control([kev], 0, 0)
|
| 524 |
+
if events & EVENT_WRITE:
|
| 525 |
+
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
|
| 526 |
+
select.KQ_EV_ADD)
|
| 527 |
+
self._selector.control([kev], 0, 0)
|
| 528 |
+
except:
|
| 529 |
+
super().unregister(fileobj)
|
| 530 |
+
raise
|
| 531 |
+
return key
|
| 532 |
+
|
| 533 |
+
def unregister(self, fileobj):
|
| 534 |
+
key = super().unregister(fileobj)
|
| 535 |
+
if key.events & EVENT_READ:
|
| 536 |
+
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
|
| 537 |
+
select.KQ_EV_DELETE)
|
| 538 |
+
try:
|
| 539 |
+
self._selector.control([kev], 0, 0)
|
| 540 |
+
except OSError:
|
| 541 |
+
# This can happen if the FD was closed since it
|
| 542 |
+
# was registered.
|
| 543 |
+
pass
|
| 544 |
+
if key.events & EVENT_WRITE:
|
| 545 |
+
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
|
| 546 |
+
select.KQ_EV_DELETE)
|
| 547 |
+
try:
|
| 548 |
+
self._selector.control([kev], 0, 0)
|
| 549 |
+
except OSError:
|
| 550 |
+
# See comment above.
|
| 551 |
+
pass
|
| 552 |
+
return key
|
| 553 |
+
|
| 554 |
+
def select(self, timeout=None):
|
| 555 |
+
timeout = None if timeout is None else max(timeout, 0)
|
| 556 |
+
# If max_ev is 0, kqueue will ignore the timeout. For consistent
|
| 557 |
+
# behavior with the other selector classes, we prevent that here
|
| 558 |
+
# (using max). See https://bugs.python.org/issue29255
|
| 559 |
+
max_ev = max(len(self._fd_to_key), 1)
|
| 560 |
+
ready = []
|
| 561 |
+
try:
|
| 562 |
+
kev_list = self._selector.control(None, max_ev, timeout)
|
| 563 |
+
except InterruptedError:
|
| 564 |
+
return ready
|
| 565 |
+
for kev in kev_list:
|
| 566 |
+
fd = kev.ident
|
| 567 |
+
flag = kev.filter
|
| 568 |
+
events = 0
|
| 569 |
+
if flag == select.KQ_FILTER_READ:
|
| 570 |
+
events |= EVENT_READ
|
| 571 |
+
if flag == select.KQ_FILTER_WRITE:
|
| 572 |
+
events |= EVENT_WRITE
|
| 573 |
+
|
| 574 |
+
key = self._key_from_fd(fd)
|
| 575 |
+
if key:
|
| 576 |
+
ready.append((key, events & key.events))
|
| 577 |
+
return ready
|
| 578 |
+
|
| 579 |
+
def close(self):
|
| 580 |
+
self._selector.close()
|
| 581 |
+
super().close()
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
def _can_use(method):
|
| 585 |
+
"""Check if we can use the selector depending upon the
|
| 586 |
+
operating system. """
|
| 587 |
+
# Implementation based upon https://github.com/sethmlarson/selectors2/blob/master/selectors2.py
|
| 588 |
+
selector = getattr(select, method, None)
|
| 589 |
+
if selector is None:
|
| 590 |
+
# select module does not implement method
|
| 591 |
+
return False
|
| 592 |
+
# check if the OS and Kernel actually support the method. Call may fail with
|
| 593 |
+
# OSError: [Errno 38] Function not implemented
|
| 594 |
+
try:
|
| 595 |
+
selector_obj = selector()
|
| 596 |
+
if method == 'poll':
|
| 597 |
+
# check that poll actually works
|
| 598 |
+
selector_obj.poll(0)
|
| 599 |
+
else:
|
| 600 |
+
# close epoll, kqueue, and devpoll fd
|
| 601 |
+
selector_obj.close()
|
| 602 |
+
return True
|
| 603 |
+
except OSError:
|
| 604 |
+
return False
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
# Choose the best implementation, roughly:
|
| 608 |
+
# epoll|kqueue|devpoll > poll > select.
|
| 609 |
+
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
|
| 610 |
+
if _can_use('kqueue'):
|
| 611 |
+
DefaultSelector = KqueueSelector
|
| 612 |
+
elif _can_use('epoll'):
|
| 613 |
+
DefaultSelector = EpollSelector
|
| 614 |
+
elif _can_use('devpoll'):
|
| 615 |
+
DefaultSelector = DevpollSelector
|
| 616 |
+
elif _can_use('poll'):
|
| 617 |
+
DefaultSelector = PollSelector
|
| 618 |
+
else:
|
| 619 |
+
DefaultSelector = SelectSelector
|
llava/lib/python3.10/smtpd.py
ADDED
|
@@ -0,0 +1,979 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
"""An RFC 5321 smtp proxy with optional RFC 1870 and RFC 6531 extensions.
|
| 3 |
+
|
| 4 |
+
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
|
| 5 |
+
|
| 6 |
+
Options:
|
| 7 |
+
|
| 8 |
+
--nosetuid
|
| 9 |
+
-n
|
| 10 |
+
This program generally tries to setuid `nobody', unless this flag is
|
| 11 |
+
set. The setuid call will fail if this program is not run as root (in
|
| 12 |
+
which case, use this flag).
|
| 13 |
+
|
| 14 |
+
--version
|
| 15 |
+
-V
|
| 16 |
+
Print the version number and exit.
|
| 17 |
+
|
| 18 |
+
--class classname
|
| 19 |
+
-c classname
|
| 20 |
+
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
|
| 21 |
+
default.
|
| 22 |
+
|
| 23 |
+
--size limit
|
| 24 |
+
-s limit
|
| 25 |
+
Restrict the total size of the incoming message to "limit" number of
|
| 26 |
+
bytes via the RFC 1870 SIZE extension. Defaults to 33554432 bytes.
|
| 27 |
+
|
| 28 |
+
--smtputf8
|
| 29 |
+
-u
|
| 30 |
+
Enable the SMTPUTF8 extension and behave as an RFC 6531 smtp proxy.
|
| 31 |
+
|
| 32 |
+
--debug
|
| 33 |
+
-d
|
| 34 |
+
Turn on debugging prints.
|
| 35 |
+
|
| 36 |
+
--help
|
| 37 |
+
-h
|
| 38 |
+
Print this message and exit.
|
| 39 |
+
|
| 40 |
+
Version: %(__version__)s
|
| 41 |
+
|
| 42 |
+
If localhost is not given then `localhost' is used, and if localport is not
|
| 43 |
+
given then 8025 is used. If remotehost is not given then `localhost' is used,
|
| 44 |
+
and if remoteport is not given, then 25 is used.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
# Overview:
|
| 48 |
+
#
|
| 49 |
+
# This file implements the minimal SMTP protocol as defined in RFC 5321. It
|
| 50 |
+
# has a hierarchy of classes which implement the backend functionality for the
|
| 51 |
+
# smtpd. A number of classes are provided:
|
| 52 |
+
#
|
| 53 |
+
# SMTPServer - the base class for the backend. Raises NotImplementedError
|
| 54 |
+
# if you try to use it.
|
| 55 |
+
#
|
| 56 |
+
# DebuggingServer - simply prints each message it receives on stdout.
|
| 57 |
+
#
|
| 58 |
+
# PureProxy - Proxies all messages to a real smtpd which does final
|
| 59 |
+
# delivery. One known problem with this class is that it doesn't handle
|
| 60 |
+
# SMTP errors from the backend server at all. This should be fixed
|
| 61 |
+
# (contributions are welcome!).
|
| 62 |
+
#
|
| 63 |
+
# MailmanProxy - An experimental hack to work with GNU Mailman
|
| 64 |
+
# <www.list.org>. Using this server as your real incoming smtpd, your
|
| 65 |
+
# mailhost will automatically recognize and accept mail destined to Mailman
|
| 66 |
+
# lists when those lists are created. Every message not destined for a list
|
| 67 |
+
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
|
| 68 |
+
# are not handled correctly yet.
|
| 69 |
+
#
|
| 70 |
+
#
|
| 71 |
+
# Author: Barry Warsaw <barry@python.org>
|
| 72 |
+
#
|
| 73 |
+
# TODO:
|
| 74 |
+
#
|
| 75 |
+
# - support mailbox delivery
|
| 76 |
+
# - alias files
|
| 77 |
+
# - Handle more ESMTP extensions
|
| 78 |
+
# - handle error codes from the backend smtpd
|
| 79 |
+
|
| 80 |
+
import sys
|
| 81 |
+
import os
|
| 82 |
+
import errno
|
| 83 |
+
import getopt
|
| 84 |
+
import time
|
| 85 |
+
import socket
|
| 86 |
+
import collections
|
| 87 |
+
from warnings import warn
|
| 88 |
+
from email._header_value_parser import get_addr_spec, get_angle_addr
|
| 89 |
+
|
| 90 |
+
__all__ = [
|
| 91 |
+
"SMTPChannel", "SMTPServer", "DebuggingServer", "PureProxy",
|
| 92 |
+
"MailmanProxy",
|
| 93 |
+
]
|
| 94 |
+
|
| 95 |
+
warn(
|
| 96 |
+
'The smtpd module is deprecated and unmaintained and will be removed '
|
| 97 |
+
'in Python 3.12. Please see aiosmtpd '
|
| 98 |
+
'(https://aiosmtpd.readthedocs.io/) for the recommended replacement.',
|
| 99 |
+
DeprecationWarning,
|
| 100 |
+
stacklevel=2)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# These are imported after the above warning so that users get the correct
|
| 104 |
+
# deprecation warning.
|
| 105 |
+
import asyncore
|
| 106 |
+
import asynchat
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
program = sys.argv[0]
|
| 110 |
+
__version__ = 'Python SMTP proxy version 0.3'
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class Devnull:
|
| 114 |
+
def write(self, msg): pass
|
| 115 |
+
def flush(self): pass
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
DEBUGSTREAM = Devnull()
|
| 119 |
+
NEWLINE = '\n'
|
| 120 |
+
COMMASPACE = ', '
|
| 121 |
+
DATA_SIZE_DEFAULT = 33554432
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def usage(code, msg=''):
|
| 125 |
+
print(__doc__ % globals(), file=sys.stderr)
|
| 126 |
+
if msg:
|
| 127 |
+
print(msg, file=sys.stderr)
|
| 128 |
+
sys.exit(code)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class SMTPChannel(asynchat.async_chat):
|
| 132 |
+
COMMAND = 0
|
| 133 |
+
DATA = 1
|
| 134 |
+
|
| 135 |
+
command_size_limit = 512
|
| 136 |
+
command_size_limits = collections.defaultdict(lambda x=command_size_limit: x)
|
| 137 |
+
|
| 138 |
+
@property
|
| 139 |
+
def max_command_size_limit(self):
|
| 140 |
+
try:
|
| 141 |
+
return max(self.command_size_limits.values())
|
| 142 |
+
except ValueError:
|
| 143 |
+
return self.command_size_limit
|
| 144 |
+
|
| 145 |
+
def __init__(self, server, conn, addr, data_size_limit=DATA_SIZE_DEFAULT,
|
| 146 |
+
map=None, enable_SMTPUTF8=False, decode_data=False):
|
| 147 |
+
asynchat.async_chat.__init__(self, conn, map=map)
|
| 148 |
+
self.smtp_server = server
|
| 149 |
+
self.conn = conn
|
| 150 |
+
self.addr = addr
|
| 151 |
+
self.data_size_limit = data_size_limit
|
| 152 |
+
self.enable_SMTPUTF8 = enable_SMTPUTF8
|
| 153 |
+
self._decode_data = decode_data
|
| 154 |
+
if enable_SMTPUTF8 and decode_data:
|
| 155 |
+
raise ValueError("decode_data and enable_SMTPUTF8 cannot"
|
| 156 |
+
" be set to True at the same time")
|
| 157 |
+
if decode_data:
|
| 158 |
+
self._emptystring = ''
|
| 159 |
+
self._linesep = '\r\n'
|
| 160 |
+
self._dotsep = '.'
|
| 161 |
+
self._newline = NEWLINE
|
| 162 |
+
else:
|
| 163 |
+
self._emptystring = b''
|
| 164 |
+
self._linesep = b'\r\n'
|
| 165 |
+
self._dotsep = ord(b'.')
|
| 166 |
+
self._newline = b'\n'
|
| 167 |
+
self._set_rset_state()
|
| 168 |
+
self.seen_greeting = ''
|
| 169 |
+
self.extended_smtp = False
|
| 170 |
+
self.command_size_limits.clear()
|
| 171 |
+
self.fqdn = socket.getfqdn()
|
| 172 |
+
try:
|
| 173 |
+
self.peer = conn.getpeername()
|
| 174 |
+
except OSError as err:
|
| 175 |
+
# a race condition may occur if the other end is closing
|
| 176 |
+
# before we can get the peername
|
| 177 |
+
self.close()
|
| 178 |
+
if err.errno != errno.ENOTCONN:
|
| 179 |
+
raise
|
| 180 |
+
return
|
| 181 |
+
print('Peer:', repr(self.peer), file=DEBUGSTREAM)
|
| 182 |
+
self.push('220 %s %s' % (self.fqdn, __version__))
|
| 183 |
+
|
| 184 |
+
def _set_post_data_state(self):
|
| 185 |
+
"""Reset state variables to their post-DATA state."""
|
| 186 |
+
self.smtp_state = self.COMMAND
|
| 187 |
+
self.mailfrom = None
|
| 188 |
+
self.rcpttos = []
|
| 189 |
+
self.require_SMTPUTF8 = False
|
| 190 |
+
self.num_bytes = 0
|
| 191 |
+
self.set_terminator(b'\r\n')
|
| 192 |
+
|
| 193 |
+
def _set_rset_state(self):
|
| 194 |
+
"""Reset all state variables except the greeting."""
|
| 195 |
+
self._set_post_data_state()
|
| 196 |
+
self.received_data = ''
|
| 197 |
+
self.received_lines = []
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# properties for backwards-compatibility
|
| 201 |
+
@property
|
| 202 |
+
def __server(self):
|
| 203 |
+
warn("Access to __server attribute on SMTPChannel is deprecated, "
|
| 204 |
+
"use 'smtp_server' instead", DeprecationWarning, 2)
|
| 205 |
+
return self.smtp_server
|
| 206 |
+
@__server.setter
|
| 207 |
+
def __server(self, value):
|
| 208 |
+
warn("Setting __server attribute on SMTPChannel is deprecated, "
|
| 209 |
+
"set 'smtp_server' instead", DeprecationWarning, 2)
|
| 210 |
+
self.smtp_server = value
|
| 211 |
+
|
| 212 |
+
@property
|
| 213 |
+
def __line(self):
|
| 214 |
+
warn("Access to __line attribute on SMTPChannel is deprecated, "
|
| 215 |
+
"use 'received_lines' instead", DeprecationWarning, 2)
|
| 216 |
+
return self.received_lines
|
| 217 |
+
@__line.setter
|
| 218 |
+
def __line(self, value):
|
| 219 |
+
warn("Setting __line attribute on SMTPChannel is deprecated, "
|
| 220 |
+
"set 'received_lines' instead", DeprecationWarning, 2)
|
| 221 |
+
self.received_lines = value
|
| 222 |
+
|
| 223 |
+
@property
|
| 224 |
+
def __state(self):
|
| 225 |
+
warn("Access to __state attribute on SMTPChannel is deprecated, "
|
| 226 |
+
"use 'smtp_state' instead", DeprecationWarning, 2)
|
| 227 |
+
return self.smtp_state
|
| 228 |
+
@__state.setter
|
| 229 |
+
def __state(self, value):
|
| 230 |
+
warn("Setting __state attribute on SMTPChannel is deprecated, "
|
| 231 |
+
"set 'smtp_state' instead", DeprecationWarning, 2)
|
| 232 |
+
self.smtp_state = value
|
| 233 |
+
|
| 234 |
+
@property
|
| 235 |
+
def __greeting(self):
|
| 236 |
+
warn("Access to __greeting attribute on SMTPChannel is deprecated, "
|
| 237 |
+
"use 'seen_greeting' instead", DeprecationWarning, 2)
|
| 238 |
+
return self.seen_greeting
|
| 239 |
+
@__greeting.setter
|
| 240 |
+
def __greeting(self, value):
|
| 241 |
+
warn("Setting __greeting attribute on SMTPChannel is deprecated, "
|
| 242 |
+
"set 'seen_greeting' instead", DeprecationWarning, 2)
|
| 243 |
+
self.seen_greeting = value
|
| 244 |
+
|
| 245 |
+
@property
|
| 246 |
+
def __mailfrom(self):
|
| 247 |
+
warn("Access to __mailfrom attribute on SMTPChannel is deprecated, "
|
| 248 |
+
"use 'mailfrom' instead", DeprecationWarning, 2)
|
| 249 |
+
return self.mailfrom
|
| 250 |
+
@__mailfrom.setter
|
| 251 |
+
def __mailfrom(self, value):
|
| 252 |
+
warn("Setting __mailfrom attribute on SMTPChannel is deprecated, "
|
| 253 |
+
"set 'mailfrom' instead", DeprecationWarning, 2)
|
| 254 |
+
self.mailfrom = value
|
| 255 |
+
|
| 256 |
+
@property
|
| 257 |
+
def __rcpttos(self):
|
| 258 |
+
warn("Access to __rcpttos attribute on SMTPChannel is deprecated, "
|
| 259 |
+
"use 'rcpttos' instead", DeprecationWarning, 2)
|
| 260 |
+
return self.rcpttos
|
| 261 |
+
@__rcpttos.setter
|
| 262 |
+
def __rcpttos(self, value):
|
| 263 |
+
warn("Setting __rcpttos attribute on SMTPChannel is deprecated, "
|
| 264 |
+
"set 'rcpttos' instead", DeprecationWarning, 2)
|
| 265 |
+
self.rcpttos = value
|
| 266 |
+
|
| 267 |
+
@property
|
| 268 |
+
def __data(self):
|
| 269 |
+
warn("Access to __data attribute on SMTPChannel is deprecated, "
|
| 270 |
+
"use 'received_data' instead", DeprecationWarning, 2)
|
| 271 |
+
return self.received_data
|
| 272 |
+
@__data.setter
|
| 273 |
+
def __data(self, value):
|
| 274 |
+
warn("Setting __data attribute on SMTPChannel is deprecated, "
|
| 275 |
+
"set 'received_data' instead", DeprecationWarning, 2)
|
| 276 |
+
self.received_data = value
|
| 277 |
+
|
| 278 |
+
@property
|
| 279 |
+
def __fqdn(self):
|
| 280 |
+
warn("Access to __fqdn attribute on SMTPChannel is deprecated, "
|
| 281 |
+
"use 'fqdn' instead", DeprecationWarning, 2)
|
| 282 |
+
return self.fqdn
|
| 283 |
+
@__fqdn.setter
|
| 284 |
+
def __fqdn(self, value):
|
| 285 |
+
warn("Setting __fqdn attribute on SMTPChannel is deprecated, "
|
| 286 |
+
"set 'fqdn' instead", DeprecationWarning, 2)
|
| 287 |
+
self.fqdn = value
|
| 288 |
+
|
| 289 |
+
@property
|
| 290 |
+
def __peer(self):
|
| 291 |
+
warn("Access to __peer attribute on SMTPChannel is deprecated, "
|
| 292 |
+
"use 'peer' instead", DeprecationWarning, 2)
|
| 293 |
+
return self.peer
|
| 294 |
+
@__peer.setter
|
| 295 |
+
def __peer(self, value):
|
| 296 |
+
warn("Setting __peer attribute on SMTPChannel is deprecated, "
|
| 297 |
+
"set 'peer' instead", DeprecationWarning, 2)
|
| 298 |
+
self.peer = value
|
| 299 |
+
|
| 300 |
+
@property
|
| 301 |
+
def __conn(self):
|
| 302 |
+
warn("Access to __conn attribute on SMTPChannel is deprecated, "
|
| 303 |
+
"use 'conn' instead", DeprecationWarning, 2)
|
| 304 |
+
return self.conn
|
| 305 |
+
@__conn.setter
|
| 306 |
+
def __conn(self, value):
|
| 307 |
+
warn("Setting __conn attribute on SMTPChannel is deprecated, "
|
| 308 |
+
"set 'conn' instead", DeprecationWarning, 2)
|
| 309 |
+
self.conn = value
|
| 310 |
+
|
| 311 |
+
@property
|
| 312 |
+
def __addr(self):
|
| 313 |
+
warn("Access to __addr attribute on SMTPChannel is deprecated, "
|
| 314 |
+
"use 'addr' instead", DeprecationWarning, 2)
|
| 315 |
+
return self.addr
|
| 316 |
+
@__addr.setter
|
| 317 |
+
def __addr(self, value):
|
| 318 |
+
warn("Setting __addr attribute on SMTPChannel is deprecated, "
|
| 319 |
+
"set 'addr' instead", DeprecationWarning, 2)
|
| 320 |
+
self.addr = value
|
| 321 |
+
|
| 322 |
+
# Overrides base class for convenience.
|
| 323 |
+
def push(self, msg):
|
| 324 |
+
asynchat.async_chat.push(self, bytes(
|
| 325 |
+
msg + '\r\n', 'utf-8' if self.require_SMTPUTF8 else 'ascii'))
|
| 326 |
+
|
| 327 |
+
# Implementation of base class abstract method
|
| 328 |
+
def collect_incoming_data(self, data):
|
| 329 |
+
limit = None
|
| 330 |
+
if self.smtp_state == self.COMMAND:
|
| 331 |
+
limit = self.max_command_size_limit
|
| 332 |
+
elif self.smtp_state == self.DATA:
|
| 333 |
+
limit = self.data_size_limit
|
| 334 |
+
if limit and self.num_bytes > limit:
|
| 335 |
+
return
|
| 336 |
+
elif limit:
|
| 337 |
+
self.num_bytes += len(data)
|
| 338 |
+
if self._decode_data:
|
| 339 |
+
self.received_lines.append(str(data, 'utf-8'))
|
| 340 |
+
else:
|
| 341 |
+
self.received_lines.append(data)
|
| 342 |
+
|
| 343 |
+
# Implementation of base class abstract method
|
| 344 |
+
def found_terminator(self):
|
| 345 |
+
line = self._emptystring.join(self.received_lines)
|
| 346 |
+
print('Data:', repr(line), file=DEBUGSTREAM)
|
| 347 |
+
self.received_lines = []
|
| 348 |
+
if self.smtp_state == self.COMMAND:
|
| 349 |
+
sz, self.num_bytes = self.num_bytes, 0
|
| 350 |
+
if not line:
|
| 351 |
+
self.push('500 Error: bad syntax')
|
| 352 |
+
return
|
| 353 |
+
if not self._decode_data:
|
| 354 |
+
line = str(line, 'utf-8')
|
| 355 |
+
i = line.find(' ')
|
| 356 |
+
if i < 0:
|
| 357 |
+
command = line.upper()
|
| 358 |
+
arg = None
|
| 359 |
+
else:
|
| 360 |
+
command = line[:i].upper()
|
| 361 |
+
arg = line[i+1:].strip()
|
| 362 |
+
max_sz = (self.command_size_limits[command]
|
| 363 |
+
if self.extended_smtp else self.command_size_limit)
|
| 364 |
+
if sz > max_sz:
|
| 365 |
+
self.push('500 Error: line too long')
|
| 366 |
+
return
|
| 367 |
+
method = getattr(self, 'smtp_' + command, None)
|
| 368 |
+
if not method:
|
| 369 |
+
self.push('500 Error: command "%s" not recognized' % command)
|
| 370 |
+
return
|
| 371 |
+
method(arg)
|
| 372 |
+
return
|
| 373 |
+
else:
|
| 374 |
+
if self.smtp_state != self.DATA:
|
| 375 |
+
self.push('451 Internal confusion')
|
| 376 |
+
self.num_bytes = 0
|
| 377 |
+
return
|
| 378 |
+
if self.data_size_limit and self.num_bytes > self.data_size_limit:
|
| 379 |
+
self.push('552 Error: Too much mail data')
|
| 380 |
+
self.num_bytes = 0
|
| 381 |
+
return
|
| 382 |
+
# Remove extraneous carriage returns and de-transparency according
|
| 383 |
+
# to RFC 5321, Section 4.5.2.
|
| 384 |
+
data = []
|
| 385 |
+
for text in line.split(self._linesep):
|
| 386 |
+
if text and text[0] == self._dotsep:
|
| 387 |
+
data.append(text[1:])
|
| 388 |
+
else:
|
| 389 |
+
data.append(text)
|
| 390 |
+
self.received_data = self._newline.join(data)
|
| 391 |
+
args = (self.peer, self.mailfrom, self.rcpttos, self.received_data)
|
| 392 |
+
kwargs = {}
|
| 393 |
+
if not self._decode_data:
|
| 394 |
+
kwargs = {
|
| 395 |
+
'mail_options': self.mail_options,
|
| 396 |
+
'rcpt_options': self.rcpt_options,
|
| 397 |
+
}
|
| 398 |
+
status = self.smtp_server.process_message(*args, **kwargs)
|
| 399 |
+
self._set_post_data_state()
|
| 400 |
+
if not status:
|
| 401 |
+
self.push('250 OK')
|
| 402 |
+
else:
|
| 403 |
+
self.push(status)
|
| 404 |
+
|
| 405 |
+
# SMTP and ESMTP commands
|
| 406 |
+
def smtp_HELO(self, arg):
|
| 407 |
+
if not arg:
|
| 408 |
+
self.push('501 Syntax: HELO hostname')
|
| 409 |
+
return
|
| 410 |
+
# See issue #21783 for a discussion of this behavior.
|
| 411 |
+
if self.seen_greeting:
|
| 412 |
+
self.push('503 Duplicate HELO/EHLO')
|
| 413 |
+
return
|
| 414 |
+
self._set_rset_state()
|
| 415 |
+
self.seen_greeting = arg
|
| 416 |
+
self.push('250 %s' % self.fqdn)
|
| 417 |
+
|
| 418 |
+
def smtp_EHLO(self, arg):
|
| 419 |
+
if not arg:
|
| 420 |
+
self.push('501 Syntax: EHLO hostname')
|
| 421 |
+
return
|
| 422 |
+
# See issue #21783 for a discussion of this behavior.
|
| 423 |
+
if self.seen_greeting:
|
| 424 |
+
self.push('503 Duplicate HELO/EHLO')
|
| 425 |
+
return
|
| 426 |
+
self._set_rset_state()
|
| 427 |
+
self.seen_greeting = arg
|
| 428 |
+
self.extended_smtp = True
|
| 429 |
+
self.push('250-%s' % self.fqdn)
|
| 430 |
+
if self.data_size_limit:
|
| 431 |
+
self.push('250-SIZE %s' % self.data_size_limit)
|
| 432 |
+
self.command_size_limits['MAIL'] += 26
|
| 433 |
+
if not self._decode_data:
|
| 434 |
+
self.push('250-8BITMIME')
|
| 435 |
+
if self.enable_SMTPUTF8:
|
| 436 |
+
self.push('250-SMTPUTF8')
|
| 437 |
+
self.command_size_limits['MAIL'] += 10
|
| 438 |
+
self.push('250 HELP')
|
| 439 |
+
|
| 440 |
+
def smtp_NOOP(self, arg):
|
| 441 |
+
if arg:
|
| 442 |
+
self.push('501 Syntax: NOOP')
|
| 443 |
+
else:
|
| 444 |
+
self.push('250 OK')
|
| 445 |
+
|
| 446 |
+
def smtp_QUIT(self, arg):
|
| 447 |
+
# args is ignored
|
| 448 |
+
self.push('221 Bye')
|
| 449 |
+
self.close_when_done()
|
| 450 |
+
|
| 451 |
+
def _strip_command_keyword(self, keyword, arg):
|
| 452 |
+
keylen = len(keyword)
|
| 453 |
+
if arg[:keylen].upper() == keyword:
|
| 454 |
+
return arg[keylen:].strip()
|
| 455 |
+
return ''
|
| 456 |
+
|
| 457 |
+
def _getaddr(self, arg):
|
| 458 |
+
if not arg:
|
| 459 |
+
return '', ''
|
| 460 |
+
if arg.lstrip().startswith('<'):
|
| 461 |
+
address, rest = get_angle_addr(arg)
|
| 462 |
+
else:
|
| 463 |
+
address, rest = get_addr_spec(arg)
|
| 464 |
+
if not address:
|
| 465 |
+
return address, rest
|
| 466 |
+
return address.addr_spec, rest
|
| 467 |
+
|
| 468 |
+
def _getparams(self, params):
|
| 469 |
+
# Return params as dictionary. Return None if not all parameters
|
| 470 |
+
# appear to be syntactically valid according to RFC 1869.
|
| 471 |
+
result = {}
|
| 472 |
+
for param in params:
|
| 473 |
+
param, eq, value = param.partition('=')
|
| 474 |
+
if not param.isalnum() or eq and not value:
|
| 475 |
+
return None
|
| 476 |
+
result[param] = value if eq else True
|
| 477 |
+
return result
|
| 478 |
+
|
| 479 |
+
def smtp_HELP(self, arg):
|
| 480 |
+
if arg:
|
| 481 |
+
extended = ' [SP <mail-parameters>]'
|
| 482 |
+
lc_arg = arg.upper()
|
| 483 |
+
if lc_arg == 'EHLO':
|
| 484 |
+
self.push('250 Syntax: EHLO hostname')
|
| 485 |
+
elif lc_arg == 'HELO':
|
| 486 |
+
self.push('250 Syntax: HELO hostname')
|
| 487 |
+
elif lc_arg == 'MAIL':
|
| 488 |
+
msg = '250 Syntax: MAIL FROM: <address>'
|
| 489 |
+
if self.extended_smtp:
|
| 490 |
+
msg += extended
|
| 491 |
+
self.push(msg)
|
| 492 |
+
elif lc_arg == 'RCPT':
|
| 493 |
+
msg = '250 Syntax: RCPT TO: <address>'
|
| 494 |
+
if self.extended_smtp:
|
| 495 |
+
msg += extended
|
| 496 |
+
self.push(msg)
|
| 497 |
+
elif lc_arg == 'DATA':
|
| 498 |
+
self.push('250 Syntax: DATA')
|
| 499 |
+
elif lc_arg == 'RSET':
|
| 500 |
+
self.push('250 Syntax: RSET')
|
| 501 |
+
elif lc_arg == 'NOOP':
|
| 502 |
+
self.push('250 Syntax: NOOP')
|
| 503 |
+
elif lc_arg == 'QUIT':
|
| 504 |
+
self.push('250 Syntax: QUIT')
|
| 505 |
+
elif lc_arg == 'VRFY':
|
| 506 |
+
self.push('250 Syntax: VRFY <address>')
|
| 507 |
+
else:
|
| 508 |
+
self.push('501 Supported commands: EHLO HELO MAIL RCPT '
|
| 509 |
+
'DATA RSET NOOP QUIT VRFY')
|
| 510 |
+
else:
|
| 511 |
+
self.push('250 Supported commands: EHLO HELO MAIL RCPT DATA '
|
| 512 |
+
'RSET NOOP QUIT VRFY')
|
| 513 |
+
|
| 514 |
+
def smtp_VRFY(self, arg):
|
| 515 |
+
if arg:
|
| 516 |
+
address, params = self._getaddr(arg)
|
| 517 |
+
if address:
|
| 518 |
+
self.push('252 Cannot VRFY user, but will accept message '
|
| 519 |
+
'and attempt delivery')
|
| 520 |
+
else:
|
| 521 |
+
self.push('502 Could not VRFY %s' % arg)
|
| 522 |
+
else:
|
| 523 |
+
self.push('501 Syntax: VRFY <address>')
|
| 524 |
+
|
| 525 |
+
def smtp_MAIL(self, arg):
|
| 526 |
+
if not self.seen_greeting:
|
| 527 |
+
self.push('503 Error: send HELO first')
|
| 528 |
+
return
|
| 529 |
+
print('===> MAIL', arg, file=DEBUGSTREAM)
|
| 530 |
+
syntaxerr = '501 Syntax: MAIL FROM: <address>'
|
| 531 |
+
if self.extended_smtp:
|
| 532 |
+
syntaxerr += ' [SP <mail-parameters>]'
|
| 533 |
+
if arg is None:
|
| 534 |
+
self.push(syntaxerr)
|
| 535 |
+
return
|
| 536 |
+
arg = self._strip_command_keyword('FROM:', arg)
|
| 537 |
+
address, params = self._getaddr(arg)
|
| 538 |
+
if not address:
|
| 539 |
+
self.push(syntaxerr)
|
| 540 |
+
return
|
| 541 |
+
if not self.extended_smtp and params:
|
| 542 |
+
self.push(syntaxerr)
|
| 543 |
+
return
|
| 544 |
+
if self.mailfrom:
|
| 545 |
+
self.push('503 Error: nested MAIL command')
|
| 546 |
+
return
|
| 547 |
+
self.mail_options = params.upper().split()
|
| 548 |
+
params = self._getparams(self.mail_options)
|
| 549 |
+
if params is None:
|
| 550 |
+
self.push(syntaxerr)
|
| 551 |
+
return
|
| 552 |
+
if not self._decode_data:
|
| 553 |
+
body = params.pop('BODY', '7BIT')
|
| 554 |
+
if body not in ['7BIT', '8BITMIME']:
|
| 555 |
+
self.push('501 Error: BODY can only be one of 7BIT, 8BITMIME')
|
| 556 |
+
return
|
| 557 |
+
if self.enable_SMTPUTF8:
|
| 558 |
+
smtputf8 = params.pop('SMTPUTF8', False)
|
| 559 |
+
if smtputf8 is True:
|
| 560 |
+
self.require_SMTPUTF8 = True
|
| 561 |
+
elif smtputf8 is not False:
|
| 562 |
+
self.push('501 Error: SMTPUTF8 takes no arguments')
|
| 563 |
+
return
|
| 564 |
+
size = params.pop('SIZE', None)
|
| 565 |
+
if size:
|
| 566 |
+
if not size.isdigit():
|
| 567 |
+
self.push(syntaxerr)
|
| 568 |
+
return
|
| 569 |
+
elif self.data_size_limit and int(size) > self.data_size_limit:
|
| 570 |
+
self.push('552 Error: message size exceeds fixed maximum message size')
|
| 571 |
+
return
|
| 572 |
+
if len(params.keys()) > 0:
|
| 573 |
+
self.push('555 MAIL FROM parameters not recognized or not implemented')
|
| 574 |
+
return
|
| 575 |
+
self.mailfrom = address
|
| 576 |
+
print('sender:', self.mailfrom, file=DEBUGSTREAM)
|
| 577 |
+
self.push('250 OK')
|
| 578 |
+
|
| 579 |
+
def smtp_RCPT(self, arg):
|
| 580 |
+
if not self.seen_greeting:
|
| 581 |
+
self.push('503 Error: send HELO first');
|
| 582 |
+
return
|
| 583 |
+
print('===> RCPT', arg, file=DEBUGSTREAM)
|
| 584 |
+
if not self.mailfrom:
|
| 585 |
+
self.push('503 Error: need MAIL command')
|
| 586 |
+
return
|
| 587 |
+
syntaxerr = '501 Syntax: RCPT TO: <address>'
|
| 588 |
+
if self.extended_smtp:
|
| 589 |
+
syntaxerr += ' [SP <mail-parameters>]'
|
| 590 |
+
if arg is None:
|
| 591 |
+
self.push(syntaxerr)
|
| 592 |
+
return
|
| 593 |
+
arg = self._strip_command_keyword('TO:', arg)
|
| 594 |
+
address, params = self._getaddr(arg)
|
| 595 |
+
if not address:
|
| 596 |
+
self.push(syntaxerr)
|
| 597 |
+
return
|
| 598 |
+
if not self.extended_smtp and params:
|
| 599 |
+
self.push(syntaxerr)
|
| 600 |
+
return
|
| 601 |
+
self.rcpt_options = params.upper().split()
|
| 602 |
+
params = self._getparams(self.rcpt_options)
|
| 603 |
+
if params is None:
|
| 604 |
+
self.push(syntaxerr)
|
| 605 |
+
return
|
| 606 |
+
# XXX currently there are no options we recognize.
|
| 607 |
+
if len(params.keys()) > 0:
|
| 608 |
+
self.push('555 RCPT TO parameters not recognized or not implemented')
|
| 609 |
+
return
|
| 610 |
+
self.rcpttos.append(address)
|
| 611 |
+
print('recips:', self.rcpttos, file=DEBUGSTREAM)
|
| 612 |
+
self.push('250 OK')
|
| 613 |
+
|
| 614 |
+
def smtp_RSET(self, arg):
|
| 615 |
+
if arg:
|
| 616 |
+
self.push('501 Syntax: RSET')
|
| 617 |
+
return
|
| 618 |
+
self._set_rset_state()
|
| 619 |
+
self.push('250 OK')
|
| 620 |
+
|
| 621 |
+
def smtp_DATA(self, arg):
|
| 622 |
+
if not self.seen_greeting:
|
| 623 |
+
self.push('503 Error: send HELO first');
|
| 624 |
+
return
|
| 625 |
+
if not self.rcpttos:
|
| 626 |
+
self.push('503 Error: need RCPT command')
|
| 627 |
+
return
|
| 628 |
+
if arg:
|
| 629 |
+
self.push('501 Syntax: DATA')
|
| 630 |
+
return
|
| 631 |
+
self.smtp_state = self.DATA
|
| 632 |
+
self.set_terminator(b'\r\n.\r\n')
|
| 633 |
+
self.push('354 End data with <CR><LF>.<CR><LF>')
|
| 634 |
+
|
| 635 |
+
# Commands that have not been implemented
|
| 636 |
+
def smtp_EXPN(self, arg):
|
| 637 |
+
self.push('502 EXPN not implemented')
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
class SMTPServer(asyncore.dispatcher):
|
| 641 |
+
# SMTPChannel class to use for managing client connections
|
| 642 |
+
channel_class = SMTPChannel
|
| 643 |
+
|
| 644 |
+
def __init__(self, localaddr, remoteaddr,
|
| 645 |
+
data_size_limit=DATA_SIZE_DEFAULT, map=None,
|
| 646 |
+
enable_SMTPUTF8=False, decode_data=False):
|
| 647 |
+
self._localaddr = localaddr
|
| 648 |
+
self._remoteaddr = remoteaddr
|
| 649 |
+
self.data_size_limit = data_size_limit
|
| 650 |
+
self.enable_SMTPUTF8 = enable_SMTPUTF8
|
| 651 |
+
self._decode_data = decode_data
|
| 652 |
+
if enable_SMTPUTF8 and decode_data:
|
| 653 |
+
raise ValueError("decode_data and enable_SMTPUTF8 cannot"
|
| 654 |
+
" be set to True at the same time")
|
| 655 |
+
asyncore.dispatcher.__init__(self, map=map)
|
| 656 |
+
try:
|
| 657 |
+
gai_results = socket.getaddrinfo(*localaddr,
|
| 658 |
+
type=socket.SOCK_STREAM)
|
| 659 |
+
self.create_socket(gai_results[0][0], gai_results[0][1])
|
| 660 |
+
# try to re-use a server port if possible
|
| 661 |
+
self.set_reuse_addr()
|
| 662 |
+
self.bind(localaddr)
|
| 663 |
+
self.listen(5)
|
| 664 |
+
except:
|
| 665 |
+
self.close()
|
| 666 |
+
raise
|
| 667 |
+
else:
|
| 668 |
+
print('%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
|
| 669 |
+
self.__class__.__name__, time.ctime(time.time()),
|
| 670 |
+
localaddr, remoteaddr), file=DEBUGSTREAM)
|
| 671 |
+
|
| 672 |
+
def handle_accepted(self, conn, addr):
|
| 673 |
+
print('Incoming connection from %s' % repr(addr), file=DEBUGSTREAM)
|
| 674 |
+
channel = self.channel_class(self,
|
| 675 |
+
conn,
|
| 676 |
+
addr,
|
| 677 |
+
self.data_size_limit,
|
| 678 |
+
self._map,
|
| 679 |
+
self.enable_SMTPUTF8,
|
| 680 |
+
self._decode_data)
|
| 681 |
+
|
| 682 |
+
# API for "doing something useful with the message"
|
| 683 |
+
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
|
| 684 |
+
"""Override this abstract method to handle messages from the client.
|
| 685 |
+
|
| 686 |
+
peer is a tuple containing (ipaddr, port) of the client that made the
|
| 687 |
+
socket connection to our smtp port.
|
| 688 |
+
|
| 689 |
+
mailfrom is the raw address the client claims the message is coming
|
| 690 |
+
from.
|
| 691 |
+
|
| 692 |
+
rcpttos is a list of raw addresses the client wishes to deliver the
|
| 693 |
+
message to.
|
| 694 |
+
|
| 695 |
+
data is a string containing the entire full text of the message,
|
| 696 |
+
headers (if supplied) and all. It has been `de-transparencied'
|
| 697 |
+
according to RFC 821, Section 4.5.2. In other words, a line
|
| 698 |
+
containing a `.' followed by other text has had the leading dot
|
| 699 |
+
removed.
|
| 700 |
+
|
| 701 |
+
kwargs is a dictionary containing additional information. It is
|
| 702 |
+
empty if decode_data=True was given as init parameter, otherwise
|
| 703 |
+
it will contain the following keys:
|
| 704 |
+
'mail_options': list of parameters to the mail command. All
|
| 705 |
+
elements are uppercase strings. Example:
|
| 706 |
+
['BODY=8BITMIME', 'SMTPUTF8'].
|
| 707 |
+
'rcpt_options': same, for the rcpt command.
|
| 708 |
+
|
| 709 |
+
This function should return None for a normal `250 Ok' response;
|
| 710 |
+
otherwise, it should return the desired response string in RFC 821
|
| 711 |
+
format.
|
| 712 |
+
|
| 713 |
+
"""
|
| 714 |
+
raise NotImplementedError
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
class DebuggingServer(SMTPServer):
|
| 718 |
+
|
| 719 |
+
def _print_message_content(self, peer, data):
|
| 720 |
+
inheaders = 1
|
| 721 |
+
lines = data.splitlines()
|
| 722 |
+
for line in lines:
|
| 723 |
+
# headers first
|
| 724 |
+
if inheaders and not line:
|
| 725 |
+
peerheader = 'X-Peer: ' + peer[0]
|
| 726 |
+
if not isinstance(data, str):
|
| 727 |
+
# decoded_data=false; make header match other binary output
|
| 728 |
+
peerheader = repr(peerheader.encode('utf-8'))
|
| 729 |
+
print(peerheader)
|
| 730 |
+
inheaders = 0
|
| 731 |
+
if not isinstance(data, str):
|
| 732 |
+
# Avoid spurious 'str on bytes instance' warning.
|
| 733 |
+
line = repr(line)
|
| 734 |
+
print(line)
|
| 735 |
+
|
| 736 |
+
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
|
| 737 |
+
print('---------- MESSAGE FOLLOWS ----------')
|
| 738 |
+
if kwargs:
|
| 739 |
+
if kwargs.get('mail_options'):
|
| 740 |
+
print('mail options: %s' % kwargs['mail_options'])
|
| 741 |
+
if kwargs.get('rcpt_options'):
|
| 742 |
+
print('rcpt options: %s\n' % kwargs['rcpt_options'])
|
| 743 |
+
self._print_message_content(peer, data)
|
| 744 |
+
print('------------ END MESSAGE ------------')
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
class PureProxy(SMTPServer):
|
| 748 |
+
def __init__(self, *args, **kwargs):
|
| 749 |
+
if 'enable_SMTPUTF8' in kwargs and kwargs['enable_SMTPUTF8']:
|
| 750 |
+
raise ValueError("PureProxy does not support SMTPUTF8.")
|
| 751 |
+
super(PureProxy, self).__init__(*args, **kwargs)
|
| 752 |
+
|
| 753 |
+
def process_message(self, peer, mailfrom, rcpttos, data):
|
| 754 |
+
lines = data.split('\n')
|
| 755 |
+
# Look for the last header
|
| 756 |
+
i = 0
|
| 757 |
+
for line in lines:
|
| 758 |
+
if not line:
|
| 759 |
+
break
|
| 760 |
+
i += 1
|
| 761 |
+
lines.insert(i, 'X-Peer: %s' % peer[0])
|
| 762 |
+
data = NEWLINE.join(lines)
|
| 763 |
+
refused = self._deliver(mailfrom, rcpttos, data)
|
| 764 |
+
# TBD: what to do with refused addresses?
|
| 765 |
+
print('we got some refusals:', refused, file=DEBUGSTREAM)
|
| 766 |
+
|
| 767 |
+
def _deliver(self, mailfrom, rcpttos, data):
|
| 768 |
+
import smtplib
|
| 769 |
+
refused = {}
|
| 770 |
+
try:
|
| 771 |
+
s = smtplib.SMTP()
|
| 772 |
+
s.connect(self._remoteaddr[0], self._remoteaddr[1])
|
| 773 |
+
try:
|
| 774 |
+
refused = s.sendmail(mailfrom, rcpttos, data)
|
| 775 |
+
finally:
|
| 776 |
+
s.quit()
|
| 777 |
+
except smtplib.SMTPRecipientsRefused as e:
|
| 778 |
+
print('got SMTPRecipientsRefused', file=DEBUGSTREAM)
|
| 779 |
+
refused = e.recipients
|
| 780 |
+
except (OSError, smtplib.SMTPException) as e:
|
| 781 |
+
print('got', e.__class__, file=DEBUGSTREAM)
|
| 782 |
+
# All recipients were refused. If the exception had an associated
|
| 783 |
+
# error code, use it. Otherwise,fake it with a non-triggering
|
| 784 |
+
# exception code.
|
| 785 |
+
errcode = getattr(e, 'smtp_code', -1)
|
| 786 |
+
errmsg = getattr(e, 'smtp_error', 'ignore')
|
| 787 |
+
for r in rcpttos:
|
| 788 |
+
refused[r] = (errcode, errmsg)
|
| 789 |
+
return refused
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
class MailmanProxy(PureProxy):
|
| 793 |
+
def __init__(self, *args, **kwargs):
|
| 794 |
+
warn('MailmanProxy is deprecated and will be removed '
|
| 795 |
+
'in future', DeprecationWarning, 2)
|
| 796 |
+
if 'enable_SMTPUTF8' in kwargs and kwargs['enable_SMTPUTF8']:
|
| 797 |
+
raise ValueError("MailmanProxy does not support SMTPUTF8.")
|
| 798 |
+
super(PureProxy, self).__init__(*args, **kwargs)
|
| 799 |
+
|
| 800 |
+
def process_message(self, peer, mailfrom, rcpttos, data):
|
| 801 |
+
from io import StringIO
|
| 802 |
+
from Mailman import Utils
|
| 803 |
+
from Mailman import Message
|
| 804 |
+
from Mailman import MailList
|
| 805 |
+
# If the message is to a Mailman mailing list, then we'll invoke the
|
| 806 |
+
# Mailman script directly, without going through the real smtpd.
|
| 807 |
+
# Otherwise we'll forward it to the local proxy for disposition.
|
| 808 |
+
listnames = []
|
| 809 |
+
for rcpt in rcpttos:
|
| 810 |
+
local = rcpt.lower().split('@')[0]
|
| 811 |
+
# We allow the following variations on the theme
|
| 812 |
+
# listname
|
| 813 |
+
# listname-admin
|
| 814 |
+
# listname-owner
|
| 815 |
+
# listname-request
|
| 816 |
+
# listname-join
|
| 817 |
+
# listname-leave
|
| 818 |
+
parts = local.split('-')
|
| 819 |
+
if len(parts) > 2:
|
| 820 |
+
continue
|
| 821 |
+
listname = parts[0]
|
| 822 |
+
if len(parts) == 2:
|
| 823 |
+
command = parts[1]
|
| 824 |
+
else:
|
| 825 |
+
command = ''
|
| 826 |
+
if not Utils.list_exists(listname) or command not in (
|
| 827 |
+
'', 'admin', 'owner', 'request', 'join', 'leave'):
|
| 828 |
+
continue
|
| 829 |
+
listnames.append((rcpt, listname, command))
|
| 830 |
+
# Remove all list recipients from rcpttos and forward what we're not
|
| 831 |
+
# going to take care of ourselves. Linear removal should be fine
|
| 832 |
+
# since we don't expect a large number of recipients.
|
| 833 |
+
for rcpt, listname, command in listnames:
|
| 834 |
+
rcpttos.remove(rcpt)
|
| 835 |
+
# If there's any non-list destined recipients left,
|
| 836 |
+
print('forwarding recips:', ' '.join(rcpttos), file=DEBUGSTREAM)
|
| 837 |
+
if rcpttos:
|
| 838 |
+
refused = self._deliver(mailfrom, rcpttos, data)
|
| 839 |
+
# TBD: what to do with refused addresses?
|
| 840 |
+
print('we got refusals:', refused, file=DEBUGSTREAM)
|
| 841 |
+
# Now deliver directly to the list commands
|
| 842 |
+
mlists = {}
|
| 843 |
+
s = StringIO(data)
|
| 844 |
+
msg = Message.Message(s)
|
| 845 |
+
# These headers are required for the proper execution of Mailman. All
|
| 846 |
+
# MTAs in existence seem to add these if the original message doesn't
|
| 847 |
+
# have them.
|
| 848 |
+
if not msg.get('from'):
|
| 849 |
+
msg['From'] = mailfrom
|
| 850 |
+
if not msg.get('date'):
|
| 851 |
+
msg['Date'] = time.ctime(time.time())
|
| 852 |
+
for rcpt, listname, command in listnames:
|
| 853 |
+
print('sending message to', rcpt, file=DEBUGSTREAM)
|
| 854 |
+
mlist = mlists.get(listname)
|
| 855 |
+
if not mlist:
|
| 856 |
+
mlist = MailList.MailList(listname, lock=0)
|
| 857 |
+
mlists[listname] = mlist
|
| 858 |
+
# dispatch on the type of command
|
| 859 |
+
if command == '':
|
| 860 |
+
# post
|
| 861 |
+
msg.Enqueue(mlist, tolist=1)
|
| 862 |
+
elif command == 'admin':
|
| 863 |
+
msg.Enqueue(mlist, toadmin=1)
|
| 864 |
+
elif command == 'owner':
|
| 865 |
+
msg.Enqueue(mlist, toowner=1)
|
| 866 |
+
elif command == 'request':
|
| 867 |
+
msg.Enqueue(mlist, torequest=1)
|
| 868 |
+
elif command in ('join', 'leave'):
|
| 869 |
+
# TBD: this is a hack!
|
| 870 |
+
if command == 'join':
|
| 871 |
+
msg['Subject'] = 'subscribe'
|
| 872 |
+
else:
|
| 873 |
+
msg['Subject'] = 'unsubscribe'
|
| 874 |
+
msg.Enqueue(mlist, torequest=1)
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
class Options:
|
| 878 |
+
setuid = True
|
| 879 |
+
classname = 'PureProxy'
|
| 880 |
+
size_limit = None
|
| 881 |
+
enable_SMTPUTF8 = False
|
| 882 |
+
|
| 883 |
+
|
| 884 |
+
def parseargs():
|
| 885 |
+
global DEBUGSTREAM
|
| 886 |
+
try:
|
| 887 |
+
opts, args = getopt.getopt(
|
| 888 |
+
sys.argv[1:], 'nVhc:s:du',
|
| 889 |
+
['class=', 'nosetuid', 'version', 'help', 'size=', 'debug',
|
| 890 |
+
'smtputf8'])
|
| 891 |
+
except getopt.error as e:
|
| 892 |
+
usage(1, e)
|
| 893 |
+
|
| 894 |
+
options = Options()
|
| 895 |
+
for opt, arg in opts:
|
| 896 |
+
if opt in ('-h', '--help'):
|
| 897 |
+
usage(0)
|
| 898 |
+
elif opt in ('-V', '--version'):
|
| 899 |
+
print(__version__)
|
| 900 |
+
sys.exit(0)
|
| 901 |
+
elif opt in ('-n', '--nosetuid'):
|
| 902 |
+
options.setuid = False
|
| 903 |
+
elif opt in ('-c', '--class'):
|
| 904 |
+
options.classname = arg
|
| 905 |
+
elif opt in ('-d', '--debug'):
|
| 906 |
+
DEBUGSTREAM = sys.stderr
|
| 907 |
+
elif opt in ('-u', '--smtputf8'):
|
| 908 |
+
options.enable_SMTPUTF8 = True
|
| 909 |
+
elif opt in ('-s', '--size'):
|
| 910 |
+
try:
|
| 911 |
+
int_size = int(arg)
|
| 912 |
+
options.size_limit = int_size
|
| 913 |
+
except:
|
| 914 |
+
print('Invalid size: ' + arg, file=sys.stderr)
|
| 915 |
+
sys.exit(1)
|
| 916 |
+
|
| 917 |
+
# parse the rest of the arguments
|
| 918 |
+
if len(args) < 1:
|
| 919 |
+
localspec = 'localhost:8025'
|
| 920 |
+
remotespec = 'localhost:25'
|
| 921 |
+
elif len(args) < 2:
|
| 922 |
+
localspec = args[0]
|
| 923 |
+
remotespec = 'localhost:25'
|
| 924 |
+
elif len(args) < 3:
|
| 925 |
+
localspec = args[0]
|
| 926 |
+
remotespec = args[1]
|
| 927 |
+
else:
|
| 928 |
+
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
|
| 929 |
+
|
| 930 |
+
# split into host/port pairs
|
| 931 |
+
i = localspec.find(':')
|
| 932 |
+
if i < 0:
|
| 933 |
+
usage(1, 'Bad local spec: %s' % localspec)
|
| 934 |
+
options.localhost = localspec[:i]
|
| 935 |
+
try:
|
| 936 |
+
options.localport = int(localspec[i+1:])
|
| 937 |
+
except ValueError:
|
| 938 |
+
usage(1, 'Bad local port: %s' % localspec)
|
| 939 |
+
i = remotespec.find(':')
|
| 940 |
+
if i < 0:
|
| 941 |
+
usage(1, 'Bad remote spec: %s' % remotespec)
|
| 942 |
+
options.remotehost = remotespec[:i]
|
| 943 |
+
try:
|
| 944 |
+
options.remoteport = int(remotespec[i+1:])
|
| 945 |
+
except ValueError:
|
| 946 |
+
usage(1, 'Bad remote port: %s' % remotespec)
|
| 947 |
+
return options
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
if __name__ == '__main__':
|
| 951 |
+
options = parseargs()
|
| 952 |
+
# Become nobody
|
| 953 |
+
classname = options.classname
|
| 954 |
+
if "." in classname:
|
| 955 |
+
lastdot = classname.rfind(".")
|
| 956 |
+
mod = __import__(classname[:lastdot], globals(), locals(), [""])
|
| 957 |
+
classname = classname[lastdot+1:]
|
| 958 |
+
else:
|
| 959 |
+
import __main__ as mod
|
| 960 |
+
class_ = getattr(mod, classname)
|
| 961 |
+
proxy = class_((options.localhost, options.localport),
|
| 962 |
+
(options.remotehost, options.remoteport),
|
| 963 |
+
options.size_limit, enable_SMTPUTF8=options.enable_SMTPUTF8)
|
| 964 |
+
if options.setuid:
|
| 965 |
+
try:
|
| 966 |
+
import pwd
|
| 967 |
+
except ImportError:
|
| 968 |
+
print('Cannot import module "pwd"; try running with -n option.', file=sys.stderr)
|
| 969 |
+
sys.exit(1)
|
| 970 |
+
nobody = pwd.getpwnam('nobody')[2]
|
| 971 |
+
try:
|
| 972 |
+
os.setuid(nobody)
|
| 973 |
+
except PermissionError:
|
| 974 |
+
print('Cannot setuid "nobody"; try running with -n option.', file=sys.stderr)
|
| 975 |
+
sys.exit(1)
|
| 976 |
+
try:
|
| 977 |
+
asyncore.loop()
|
| 978 |
+
except KeyboardInterrupt:
|
| 979 |
+
pass
|
llava/lib/python3.10/sre_parse.py
ADDED
|
@@ -0,0 +1,1076 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Secret Labs' Regular Expression Engine
|
| 3 |
+
#
|
| 4 |
+
# convert re-style regular expression to sre pattern
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
|
| 7 |
+
#
|
| 8 |
+
# See the sre.py file for information on usage and redistribution.
|
| 9 |
+
#
|
| 10 |
+
|
| 11 |
+
"""Internal support module for sre"""
|
| 12 |
+
|
| 13 |
+
# XXX: show string offset and offending character for all errors
|
| 14 |
+
|
| 15 |
+
from sre_constants import *
|
| 16 |
+
|
| 17 |
+
SPECIAL_CHARS = ".\\[{()*+?^$|"
|
| 18 |
+
REPEAT_CHARS = "*+?{"
|
| 19 |
+
|
| 20 |
+
DIGITS = frozenset("0123456789")
|
| 21 |
+
|
| 22 |
+
OCTDIGITS = frozenset("01234567")
|
| 23 |
+
HEXDIGITS = frozenset("0123456789abcdefABCDEF")
|
| 24 |
+
ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
| 25 |
+
|
| 26 |
+
WHITESPACE = frozenset(" \t\n\r\v\f")
|
| 27 |
+
|
| 28 |
+
_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT})
|
| 29 |
+
_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY})
|
| 30 |
+
|
| 31 |
+
ESCAPES = {
|
| 32 |
+
r"\a": (LITERAL, ord("\a")),
|
| 33 |
+
r"\b": (LITERAL, ord("\b")),
|
| 34 |
+
r"\f": (LITERAL, ord("\f")),
|
| 35 |
+
r"\n": (LITERAL, ord("\n")),
|
| 36 |
+
r"\r": (LITERAL, ord("\r")),
|
| 37 |
+
r"\t": (LITERAL, ord("\t")),
|
| 38 |
+
r"\v": (LITERAL, ord("\v")),
|
| 39 |
+
r"\\": (LITERAL, ord("\\"))
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
CATEGORIES = {
|
| 43 |
+
r"\A": (AT, AT_BEGINNING_STRING), # start of string
|
| 44 |
+
r"\b": (AT, AT_BOUNDARY),
|
| 45 |
+
r"\B": (AT, AT_NON_BOUNDARY),
|
| 46 |
+
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
|
| 47 |
+
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
|
| 48 |
+
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
|
| 49 |
+
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
|
| 50 |
+
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
|
| 51 |
+
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
|
| 52 |
+
r"\Z": (AT, AT_END_STRING), # end of string
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
FLAGS = {
|
| 56 |
+
# standard flags
|
| 57 |
+
"i": SRE_FLAG_IGNORECASE,
|
| 58 |
+
"L": SRE_FLAG_LOCALE,
|
| 59 |
+
"m": SRE_FLAG_MULTILINE,
|
| 60 |
+
"s": SRE_FLAG_DOTALL,
|
| 61 |
+
"x": SRE_FLAG_VERBOSE,
|
| 62 |
+
# extensions
|
| 63 |
+
"a": SRE_FLAG_ASCII,
|
| 64 |
+
"t": SRE_FLAG_TEMPLATE,
|
| 65 |
+
"u": SRE_FLAG_UNICODE,
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
TYPE_FLAGS = SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE
|
| 69 |
+
GLOBAL_FLAGS = SRE_FLAG_DEBUG | SRE_FLAG_TEMPLATE
|
| 70 |
+
|
| 71 |
+
class Verbose(Exception):
|
| 72 |
+
pass
|
| 73 |
+
|
| 74 |
+
class State:
|
| 75 |
+
# keeps track of state for parsing
|
| 76 |
+
def __init__(self):
|
| 77 |
+
self.flags = 0
|
| 78 |
+
self.groupdict = {}
|
| 79 |
+
self.groupwidths = [None] # group 0
|
| 80 |
+
self.lookbehindgroups = None
|
| 81 |
+
self.grouprefpos = {}
|
| 82 |
+
@property
|
| 83 |
+
def groups(self):
|
| 84 |
+
return len(self.groupwidths)
|
| 85 |
+
def opengroup(self, name=None):
|
| 86 |
+
gid = self.groups
|
| 87 |
+
self.groupwidths.append(None)
|
| 88 |
+
if self.groups > MAXGROUPS:
|
| 89 |
+
raise error("too many groups")
|
| 90 |
+
if name is not None:
|
| 91 |
+
ogid = self.groupdict.get(name, None)
|
| 92 |
+
if ogid is not None:
|
| 93 |
+
raise error("redefinition of group name %r as group %d; "
|
| 94 |
+
"was group %d" % (name, gid, ogid))
|
| 95 |
+
self.groupdict[name] = gid
|
| 96 |
+
return gid
|
| 97 |
+
def closegroup(self, gid, p):
|
| 98 |
+
self.groupwidths[gid] = p.getwidth()
|
| 99 |
+
def checkgroup(self, gid):
|
| 100 |
+
return gid < self.groups and self.groupwidths[gid] is not None
|
| 101 |
+
|
| 102 |
+
def checklookbehindgroup(self, gid, source):
|
| 103 |
+
if self.lookbehindgroups is not None:
|
| 104 |
+
if not self.checkgroup(gid):
|
| 105 |
+
raise source.error('cannot refer to an open group')
|
| 106 |
+
if gid >= self.lookbehindgroups:
|
| 107 |
+
raise source.error('cannot refer to group defined in the same '
|
| 108 |
+
'lookbehind subpattern')
|
| 109 |
+
|
| 110 |
+
class SubPattern:
|
| 111 |
+
# a subpattern, in intermediate form
|
| 112 |
+
def __init__(self, state, data=None):
|
| 113 |
+
self.state = state
|
| 114 |
+
if data is None:
|
| 115 |
+
data = []
|
| 116 |
+
self.data = data
|
| 117 |
+
self.width = None
|
| 118 |
+
|
| 119 |
+
def dump(self, level=0):
|
| 120 |
+
nl = True
|
| 121 |
+
seqtypes = (tuple, list)
|
| 122 |
+
for op, av in self.data:
|
| 123 |
+
print(level*" " + str(op), end='')
|
| 124 |
+
if op is IN:
|
| 125 |
+
# member sublanguage
|
| 126 |
+
print()
|
| 127 |
+
for op, a in av:
|
| 128 |
+
print((level+1)*" " + str(op), a)
|
| 129 |
+
elif op is BRANCH:
|
| 130 |
+
print()
|
| 131 |
+
for i, a in enumerate(av[1]):
|
| 132 |
+
if i:
|
| 133 |
+
print(level*" " + "OR")
|
| 134 |
+
a.dump(level+1)
|
| 135 |
+
elif op is GROUPREF_EXISTS:
|
| 136 |
+
condgroup, item_yes, item_no = av
|
| 137 |
+
print('', condgroup)
|
| 138 |
+
item_yes.dump(level+1)
|
| 139 |
+
if item_no:
|
| 140 |
+
print(level*" " + "ELSE")
|
| 141 |
+
item_no.dump(level+1)
|
| 142 |
+
elif isinstance(av, seqtypes):
|
| 143 |
+
nl = False
|
| 144 |
+
for a in av:
|
| 145 |
+
if isinstance(a, SubPattern):
|
| 146 |
+
if not nl:
|
| 147 |
+
print()
|
| 148 |
+
a.dump(level+1)
|
| 149 |
+
nl = True
|
| 150 |
+
else:
|
| 151 |
+
if not nl:
|
| 152 |
+
print(' ', end='')
|
| 153 |
+
print(a, end='')
|
| 154 |
+
nl = False
|
| 155 |
+
if not nl:
|
| 156 |
+
print()
|
| 157 |
+
else:
|
| 158 |
+
print('', av)
|
| 159 |
+
def __repr__(self):
|
| 160 |
+
return repr(self.data)
|
| 161 |
+
def __len__(self):
|
| 162 |
+
return len(self.data)
|
| 163 |
+
def __delitem__(self, index):
|
| 164 |
+
del self.data[index]
|
| 165 |
+
def __getitem__(self, index):
|
| 166 |
+
if isinstance(index, slice):
|
| 167 |
+
return SubPattern(self.state, self.data[index])
|
| 168 |
+
return self.data[index]
|
| 169 |
+
def __setitem__(self, index, code):
|
| 170 |
+
self.data[index] = code
|
| 171 |
+
def insert(self, index, code):
|
| 172 |
+
self.data.insert(index, code)
|
| 173 |
+
def append(self, code):
|
| 174 |
+
self.data.append(code)
|
| 175 |
+
def getwidth(self):
|
| 176 |
+
# determine the width (min, max) for this subpattern
|
| 177 |
+
if self.width is not None:
|
| 178 |
+
return self.width
|
| 179 |
+
lo = hi = 0
|
| 180 |
+
for op, av in self.data:
|
| 181 |
+
if op is BRANCH:
|
| 182 |
+
i = MAXREPEAT - 1
|
| 183 |
+
j = 0
|
| 184 |
+
for av in av[1]:
|
| 185 |
+
l, h = av.getwidth()
|
| 186 |
+
i = min(i, l)
|
| 187 |
+
j = max(j, h)
|
| 188 |
+
lo = lo + i
|
| 189 |
+
hi = hi + j
|
| 190 |
+
elif op is CALL:
|
| 191 |
+
i, j = av.getwidth()
|
| 192 |
+
lo = lo + i
|
| 193 |
+
hi = hi + j
|
| 194 |
+
elif op is SUBPATTERN:
|
| 195 |
+
i, j = av[-1].getwidth()
|
| 196 |
+
lo = lo + i
|
| 197 |
+
hi = hi + j
|
| 198 |
+
elif op in _REPEATCODES:
|
| 199 |
+
i, j = av[2].getwidth()
|
| 200 |
+
lo = lo + i * av[0]
|
| 201 |
+
hi = hi + j * av[1]
|
| 202 |
+
elif op in _UNITCODES:
|
| 203 |
+
lo = lo + 1
|
| 204 |
+
hi = hi + 1
|
| 205 |
+
elif op is GROUPREF:
|
| 206 |
+
i, j = self.state.groupwidths[av]
|
| 207 |
+
lo = lo + i
|
| 208 |
+
hi = hi + j
|
| 209 |
+
elif op is GROUPREF_EXISTS:
|
| 210 |
+
i, j = av[1].getwidth()
|
| 211 |
+
if av[2] is not None:
|
| 212 |
+
l, h = av[2].getwidth()
|
| 213 |
+
i = min(i, l)
|
| 214 |
+
j = max(j, h)
|
| 215 |
+
else:
|
| 216 |
+
i = 0
|
| 217 |
+
lo = lo + i
|
| 218 |
+
hi = hi + j
|
| 219 |
+
elif op is SUCCESS:
|
| 220 |
+
break
|
| 221 |
+
self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
|
| 222 |
+
return self.width
|
| 223 |
+
|
| 224 |
+
class Tokenizer:
|
| 225 |
+
def __init__(self, string):
|
| 226 |
+
self.istext = isinstance(string, str)
|
| 227 |
+
self.string = string
|
| 228 |
+
if not self.istext:
|
| 229 |
+
string = str(string, 'latin1')
|
| 230 |
+
self.decoded_string = string
|
| 231 |
+
self.index = 0
|
| 232 |
+
self.next = None
|
| 233 |
+
self.__next()
|
| 234 |
+
def __next(self):
|
| 235 |
+
index = self.index
|
| 236 |
+
try:
|
| 237 |
+
char = self.decoded_string[index]
|
| 238 |
+
except IndexError:
|
| 239 |
+
self.next = None
|
| 240 |
+
return
|
| 241 |
+
if char == "\\":
|
| 242 |
+
index += 1
|
| 243 |
+
try:
|
| 244 |
+
char += self.decoded_string[index]
|
| 245 |
+
except IndexError:
|
| 246 |
+
raise error("bad escape (end of pattern)",
|
| 247 |
+
self.string, len(self.string) - 1) from None
|
| 248 |
+
self.index = index + 1
|
| 249 |
+
self.next = char
|
| 250 |
+
def match(self, char):
|
| 251 |
+
if char == self.next:
|
| 252 |
+
self.__next()
|
| 253 |
+
return True
|
| 254 |
+
return False
|
| 255 |
+
def get(self):
|
| 256 |
+
this = self.next
|
| 257 |
+
self.__next()
|
| 258 |
+
return this
|
| 259 |
+
def getwhile(self, n, charset):
|
| 260 |
+
result = ''
|
| 261 |
+
for _ in range(n):
|
| 262 |
+
c = self.next
|
| 263 |
+
if c not in charset:
|
| 264 |
+
break
|
| 265 |
+
result += c
|
| 266 |
+
self.__next()
|
| 267 |
+
return result
|
| 268 |
+
def getuntil(self, terminator, name):
|
| 269 |
+
result = ''
|
| 270 |
+
while True:
|
| 271 |
+
c = self.next
|
| 272 |
+
self.__next()
|
| 273 |
+
if c is None:
|
| 274 |
+
if not result:
|
| 275 |
+
raise self.error("missing " + name)
|
| 276 |
+
raise self.error("missing %s, unterminated name" % terminator,
|
| 277 |
+
len(result))
|
| 278 |
+
if c == terminator:
|
| 279 |
+
if not result:
|
| 280 |
+
raise self.error("missing " + name, 1)
|
| 281 |
+
break
|
| 282 |
+
result += c
|
| 283 |
+
return result
|
| 284 |
+
@property
|
| 285 |
+
def pos(self):
|
| 286 |
+
return self.index - len(self.next or '')
|
| 287 |
+
def tell(self):
|
| 288 |
+
return self.index - len(self.next or '')
|
| 289 |
+
def seek(self, index):
|
| 290 |
+
self.index = index
|
| 291 |
+
self.__next()
|
| 292 |
+
|
| 293 |
+
def error(self, msg, offset=0):
|
| 294 |
+
return error(msg, self.string, self.tell() - offset)
|
| 295 |
+
|
| 296 |
+
def _class_escape(source, escape):
|
| 297 |
+
# handle escape code inside character class
|
| 298 |
+
code = ESCAPES.get(escape)
|
| 299 |
+
if code:
|
| 300 |
+
return code
|
| 301 |
+
code = CATEGORIES.get(escape)
|
| 302 |
+
if code and code[0] is IN:
|
| 303 |
+
return code
|
| 304 |
+
try:
|
| 305 |
+
c = escape[1:2]
|
| 306 |
+
if c == "x":
|
| 307 |
+
# hexadecimal escape (exactly two digits)
|
| 308 |
+
escape += source.getwhile(2, HEXDIGITS)
|
| 309 |
+
if len(escape) != 4:
|
| 310 |
+
raise source.error("incomplete escape %s" % escape, len(escape))
|
| 311 |
+
return LITERAL, int(escape[2:], 16)
|
| 312 |
+
elif c == "u" and source.istext:
|
| 313 |
+
# unicode escape (exactly four digits)
|
| 314 |
+
escape += source.getwhile(4, HEXDIGITS)
|
| 315 |
+
if len(escape) != 6:
|
| 316 |
+
raise source.error("incomplete escape %s" % escape, len(escape))
|
| 317 |
+
return LITERAL, int(escape[2:], 16)
|
| 318 |
+
elif c == "U" and source.istext:
|
| 319 |
+
# unicode escape (exactly eight digits)
|
| 320 |
+
escape += source.getwhile(8, HEXDIGITS)
|
| 321 |
+
if len(escape) != 10:
|
| 322 |
+
raise source.error("incomplete escape %s" % escape, len(escape))
|
| 323 |
+
c = int(escape[2:], 16)
|
| 324 |
+
chr(c) # raise ValueError for invalid code
|
| 325 |
+
return LITERAL, c
|
| 326 |
+
elif c == "N" and source.istext:
|
| 327 |
+
import unicodedata
|
| 328 |
+
# named unicode escape e.g. \N{EM DASH}
|
| 329 |
+
if not source.match('{'):
|
| 330 |
+
raise source.error("missing {")
|
| 331 |
+
charname = source.getuntil('}', 'character name')
|
| 332 |
+
try:
|
| 333 |
+
c = ord(unicodedata.lookup(charname))
|
| 334 |
+
except (KeyError, TypeError):
|
| 335 |
+
raise source.error("undefined character name %r" % charname,
|
| 336 |
+
len(charname) + len(r'\N{}'))
|
| 337 |
+
return LITERAL, c
|
| 338 |
+
elif c in OCTDIGITS:
|
| 339 |
+
# octal escape (up to three digits)
|
| 340 |
+
escape += source.getwhile(2, OCTDIGITS)
|
| 341 |
+
c = int(escape[1:], 8)
|
| 342 |
+
if c > 0o377:
|
| 343 |
+
raise source.error('octal escape value %s outside of '
|
| 344 |
+
'range 0-0o377' % escape, len(escape))
|
| 345 |
+
return LITERAL, c
|
| 346 |
+
elif c in DIGITS:
|
| 347 |
+
raise ValueError
|
| 348 |
+
if len(escape) == 2:
|
| 349 |
+
if c in ASCIILETTERS:
|
| 350 |
+
raise source.error('bad escape %s' % escape, len(escape))
|
| 351 |
+
return LITERAL, ord(escape[1])
|
| 352 |
+
except ValueError:
|
| 353 |
+
pass
|
| 354 |
+
raise source.error("bad escape %s" % escape, len(escape))
|
| 355 |
+
|
| 356 |
+
def _escape(source, escape, state):
|
| 357 |
+
# handle escape code in expression
|
| 358 |
+
code = CATEGORIES.get(escape)
|
| 359 |
+
if code:
|
| 360 |
+
return code
|
| 361 |
+
code = ESCAPES.get(escape)
|
| 362 |
+
if code:
|
| 363 |
+
return code
|
| 364 |
+
try:
|
| 365 |
+
c = escape[1:2]
|
| 366 |
+
if c == "x":
|
| 367 |
+
# hexadecimal escape
|
| 368 |
+
escape += source.getwhile(2, HEXDIGITS)
|
| 369 |
+
if len(escape) != 4:
|
| 370 |
+
raise source.error("incomplete escape %s" % escape, len(escape))
|
| 371 |
+
return LITERAL, int(escape[2:], 16)
|
| 372 |
+
elif c == "u" and source.istext:
|
| 373 |
+
# unicode escape (exactly four digits)
|
| 374 |
+
escape += source.getwhile(4, HEXDIGITS)
|
| 375 |
+
if len(escape) != 6:
|
| 376 |
+
raise source.error("incomplete escape %s" % escape, len(escape))
|
| 377 |
+
return LITERAL, int(escape[2:], 16)
|
| 378 |
+
elif c == "U" and source.istext:
|
| 379 |
+
# unicode escape (exactly eight digits)
|
| 380 |
+
escape += source.getwhile(8, HEXDIGITS)
|
| 381 |
+
if len(escape) != 10:
|
| 382 |
+
raise source.error("incomplete escape %s" % escape, len(escape))
|
| 383 |
+
c = int(escape[2:], 16)
|
| 384 |
+
chr(c) # raise ValueError for invalid code
|
| 385 |
+
return LITERAL, c
|
| 386 |
+
elif c == "N" and source.istext:
|
| 387 |
+
import unicodedata
|
| 388 |
+
# named unicode escape e.g. \N{EM DASH}
|
| 389 |
+
if not source.match('{'):
|
| 390 |
+
raise source.error("missing {")
|
| 391 |
+
charname = source.getuntil('}', 'character name')
|
| 392 |
+
try:
|
| 393 |
+
c = ord(unicodedata.lookup(charname))
|
| 394 |
+
except (KeyError, TypeError):
|
| 395 |
+
raise source.error("undefined character name %r" % charname,
|
| 396 |
+
len(charname) + len(r'\N{}'))
|
| 397 |
+
return LITERAL, c
|
| 398 |
+
elif c == "0":
|
| 399 |
+
# octal escape
|
| 400 |
+
escape += source.getwhile(2, OCTDIGITS)
|
| 401 |
+
return LITERAL, int(escape[1:], 8)
|
| 402 |
+
elif c in DIGITS:
|
| 403 |
+
# octal escape *or* decimal group reference (sigh)
|
| 404 |
+
if source.next in DIGITS:
|
| 405 |
+
escape += source.get()
|
| 406 |
+
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
|
| 407 |
+
source.next in OCTDIGITS):
|
| 408 |
+
# got three octal digits; this is an octal escape
|
| 409 |
+
escape += source.get()
|
| 410 |
+
c = int(escape[1:], 8)
|
| 411 |
+
if c > 0o377:
|
| 412 |
+
raise source.error('octal escape value %s outside of '
|
| 413 |
+
'range 0-0o377' % escape,
|
| 414 |
+
len(escape))
|
| 415 |
+
return LITERAL, c
|
| 416 |
+
# not an octal escape, so this is a group reference
|
| 417 |
+
group = int(escape[1:])
|
| 418 |
+
if group < state.groups:
|
| 419 |
+
if not state.checkgroup(group):
|
| 420 |
+
raise source.error("cannot refer to an open group",
|
| 421 |
+
len(escape))
|
| 422 |
+
state.checklookbehindgroup(group, source)
|
| 423 |
+
return GROUPREF, group
|
| 424 |
+
raise source.error("invalid group reference %d" % group, len(escape) - 1)
|
| 425 |
+
if len(escape) == 2:
|
| 426 |
+
if c in ASCIILETTERS:
|
| 427 |
+
raise source.error("bad escape %s" % escape, len(escape))
|
| 428 |
+
return LITERAL, ord(escape[1])
|
| 429 |
+
except ValueError:
|
| 430 |
+
pass
|
| 431 |
+
raise source.error("bad escape %s" % escape, len(escape))
|
| 432 |
+
|
| 433 |
+
def _uniq(items):
|
| 434 |
+
return list(dict.fromkeys(items))
|
| 435 |
+
|
| 436 |
+
def _parse_sub(source, state, verbose, nested):
|
| 437 |
+
# parse an alternation: a|b|c
|
| 438 |
+
|
| 439 |
+
items = []
|
| 440 |
+
itemsappend = items.append
|
| 441 |
+
sourcematch = source.match
|
| 442 |
+
start = source.tell()
|
| 443 |
+
while True:
|
| 444 |
+
itemsappend(_parse(source, state, verbose, nested + 1,
|
| 445 |
+
not nested and not items))
|
| 446 |
+
if not sourcematch("|"):
|
| 447 |
+
break
|
| 448 |
+
|
| 449 |
+
if len(items) == 1:
|
| 450 |
+
return items[0]
|
| 451 |
+
|
| 452 |
+
subpattern = SubPattern(state)
|
| 453 |
+
|
| 454 |
+
# check if all items share a common prefix
|
| 455 |
+
while True:
|
| 456 |
+
prefix = None
|
| 457 |
+
for item in items:
|
| 458 |
+
if not item:
|
| 459 |
+
break
|
| 460 |
+
if prefix is None:
|
| 461 |
+
prefix = item[0]
|
| 462 |
+
elif item[0] != prefix:
|
| 463 |
+
break
|
| 464 |
+
else:
|
| 465 |
+
# all subitems start with a common "prefix".
|
| 466 |
+
# move it out of the branch
|
| 467 |
+
for item in items:
|
| 468 |
+
del item[0]
|
| 469 |
+
subpattern.append(prefix)
|
| 470 |
+
continue # check next one
|
| 471 |
+
break
|
| 472 |
+
|
| 473 |
+
# check if the branch can be replaced by a character set
|
| 474 |
+
set = []
|
| 475 |
+
for item in items:
|
| 476 |
+
if len(item) != 1:
|
| 477 |
+
break
|
| 478 |
+
op, av = item[0]
|
| 479 |
+
if op is LITERAL:
|
| 480 |
+
set.append((op, av))
|
| 481 |
+
elif op is IN and av[0][0] is not NEGATE:
|
| 482 |
+
set.extend(av)
|
| 483 |
+
else:
|
| 484 |
+
break
|
| 485 |
+
else:
|
| 486 |
+
# we can store this as a character set instead of a
|
| 487 |
+
# branch (the compiler may optimize this even more)
|
| 488 |
+
subpattern.append((IN, _uniq(set)))
|
| 489 |
+
return subpattern
|
| 490 |
+
|
| 491 |
+
subpattern.append((BRANCH, (None, items)))
|
| 492 |
+
return subpattern
|
| 493 |
+
|
| 494 |
+
def _parse(source, state, verbose, nested, first=False):
|
| 495 |
+
# parse a simple pattern
|
| 496 |
+
subpattern = SubPattern(state)
|
| 497 |
+
|
| 498 |
+
# precompute constants into local variables
|
| 499 |
+
subpatternappend = subpattern.append
|
| 500 |
+
sourceget = source.get
|
| 501 |
+
sourcematch = source.match
|
| 502 |
+
_len = len
|
| 503 |
+
_ord = ord
|
| 504 |
+
|
| 505 |
+
while True:
|
| 506 |
+
|
| 507 |
+
this = source.next
|
| 508 |
+
if this is None:
|
| 509 |
+
break # end of pattern
|
| 510 |
+
if this in "|)":
|
| 511 |
+
break # end of subpattern
|
| 512 |
+
sourceget()
|
| 513 |
+
|
| 514 |
+
if verbose:
|
| 515 |
+
# skip whitespace and comments
|
| 516 |
+
if this in WHITESPACE:
|
| 517 |
+
continue
|
| 518 |
+
if this == "#":
|
| 519 |
+
while True:
|
| 520 |
+
this = sourceget()
|
| 521 |
+
if this is None or this == "\n":
|
| 522 |
+
break
|
| 523 |
+
continue
|
| 524 |
+
|
| 525 |
+
if this[0] == "\\":
|
| 526 |
+
code = _escape(source, this, state)
|
| 527 |
+
subpatternappend(code)
|
| 528 |
+
|
| 529 |
+
elif this not in SPECIAL_CHARS:
|
| 530 |
+
subpatternappend((LITERAL, _ord(this)))
|
| 531 |
+
|
| 532 |
+
elif this == "[":
|
| 533 |
+
here = source.tell() - 1
|
| 534 |
+
# character set
|
| 535 |
+
set = []
|
| 536 |
+
setappend = set.append
|
| 537 |
+
## if sourcematch(":"):
|
| 538 |
+
## pass # handle character classes
|
| 539 |
+
if source.next == '[':
|
| 540 |
+
import warnings
|
| 541 |
+
warnings.warn(
|
| 542 |
+
'Possible nested set at position %d' % source.tell(),
|
| 543 |
+
FutureWarning, stacklevel=nested + 6
|
| 544 |
+
)
|
| 545 |
+
negate = sourcematch("^")
|
| 546 |
+
# check remaining characters
|
| 547 |
+
while True:
|
| 548 |
+
this = sourceget()
|
| 549 |
+
if this is None:
|
| 550 |
+
raise source.error("unterminated character set",
|
| 551 |
+
source.tell() - here)
|
| 552 |
+
if this == "]" and set:
|
| 553 |
+
break
|
| 554 |
+
elif this[0] == "\\":
|
| 555 |
+
code1 = _class_escape(source, this)
|
| 556 |
+
else:
|
| 557 |
+
if set and this in '-&~|' and source.next == this:
|
| 558 |
+
import warnings
|
| 559 |
+
warnings.warn(
|
| 560 |
+
'Possible set %s at position %d' % (
|
| 561 |
+
'difference' if this == '-' else
|
| 562 |
+
'intersection' if this == '&' else
|
| 563 |
+
'symmetric difference' if this == '~' else
|
| 564 |
+
'union',
|
| 565 |
+
source.tell() - 1),
|
| 566 |
+
FutureWarning, stacklevel=nested + 6
|
| 567 |
+
)
|
| 568 |
+
code1 = LITERAL, _ord(this)
|
| 569 |
+
if sourcematch("-"):
|
| 570 |
+
# potential range
|
| 571 |
+
that = sourceget()
|
| 572 |
+
if that is None:
|
| 573 |
+
raise source.error("unterminated character set",
|
| 574 |
+
source.tell() - here)
|
| 575 |
+
if that == "]":
|
| 576 |
+
if code1[0] is IN:
|
| 577 |
+
code1 = code1[1][0]
|
| 578 |
+
setappend(code1)
|
| 579 |
+
setappend((LITERAL, _ord("-")))
|
| 580 |
+
break
|
| 581 |
+
if that[0] == "\\":
|
| 582 |
+
code2 = _class_escape(source, that)
|
| 583 |
+
else:
|
| 584 |
+
if that == '-':
|
| 585 |
+
import warnings
|
| 586 |
+
warnings.warn(
|
| 587 |
+
'Possible set difference at position %d' % (
|
| 588 |
+
source.tell() - 2),
|
| 589 |
+
FutureWarning, stacklevel=nested + 6
|
| 590 |
+
)
|
| 591 |
+
code2 = LITERAL, _ord(that)
|
| 592 |
+
if code1[0] != LITERAL or code2[0] != LITERAL:
|
| 593 |
+
msg = "bad character range %s-%s" % (this, that)
|
| 594 |
+
raise source.error(msg, len(this) + 1 + len(that))
|
| 595 |
+
lo = code1[1]
|
| 596 |
+
hi = code2[1]
|
| 597 |
+
if hi < lo:
|
| 598 |
+
msg = "bad character range %s-%s" % (this, that)
|
| 599 |
+
raise source.error(msg, len(this) + 1 + len(that))
|
| 600 |
+
setappend((RANGE, (lo, hi)))
|
| 601 |
+
else:
|
| 602 |
+
if code1[0] is IN:
|
| 603 |
+
code1 = code1[1][0]
|
| 604 |
+
setappend(code1)
|
| 605 |
+
|
| 606 |
+
set = _uniq(set)
|
| 607 |
+
# XXX: <fl> should move set optimization to compiler!
|
| 608 |
+
if _len(set) == 1 and set[0][0] is LITERAL:
|
| 609 |
+
# optimization
|
| 610 |
+
if negate:
|
| 611 |
+
subpatternappend((NOT_LITERAL, set[0][1]))
|
| 612 |
+
else:
|
| 613 |
+
subpatternappend(set[0])
|
| 614 |
+
else:
|
| 615 |
+
if negate:
|
| 616 |
+
set.insert(0, (NEGATE, None))
|
| 617 |
+
# charmap optimization can't be added here because
|
| 618 |
+
# global flags still are not known
|
| 619 |
+
subpatternappend((IN, set))
|
| 620 |
+
|
| 621 |
+
elif this in REPEAT_CHARS:
|
| 622 |
+
# repeat previous item
|
| 623 |
+
here = source.tell()
|
| 624 |
+
if this == "?":
|
| 625 |
+
min, max = 0, 1
|
| 626 |
+
elif this == "*":
|
| 627 |
+
min, max = 0, MAXREPEAT
|
| 628 |
+
|
| 629 |
+
elif this == "+":
|
| 630 |
+
min, max = 1, MAXREPEAT
|
| 631 |
+
elif this == "{":
|
| 632 |
+
if source.next == "}":
|
| 633 |
+
subpatternappend((LITERAL, _ord(this)))
|
| 634 |
+
continue
|
| 635 |
+
|
| 636 |
+
min, max = 0, MAXREPEAT
|
| 637 |
+
lo = hi = ""
|
| 638 |
+
while source.next in DIGITS:
|
| 639 |
+
lo += sourceget()
|
| 640 |
+
if sourcematch(","):
|
| 641 |
+
while source.next in DIGITS:
|
| 642 |
+
hi += sourceget()
|
| 643 |
+
else:
|
| 644 |
+
hi = lo
|
| 645 |
+
if not sourcematch("}"):
|
| 646 |
+
subpatternappend((LITERAL, _ord(this)))
|
| 647 |
+
source.seek(here)
|
| 648 |
+
continue
|
| 649 |
+
|
| 650 |
+
if lo:
|
| 651 |
+
min = int(lo)
|
| 652 |
+
if min >= MAXREPEAT:
|
| 653 |
+
raise OverflowError("the repetition number is too large")
|
| 654 |
+
if hi:
|
| 655 |
+
max = int(hi)
|
| 656 |
+
if max >= MAXREPEAT:
|
| 657 |
+
raise OverflowError("the repetition number is too large")
|
| 658 |
+
if max < min:
|
| 659 |
+
raise source.error("min repeat greater than max repeat",
|
| 660 |
+
source.tell() - here)
|
| 661 |
+
else:
|
| 662 |
+
raise AssertionError("unsupported quantifier %r" % (char,))
|
| 663 |
+
# figure out which item to repeat
|
| 664 |
+
if subpattern:
|
| 665 |
+
item = subpattern[-1:]
|
| 666 |
+
else:
|
| 667 |
+
item = None
|
| 668 |
+
if not item or item[0][0] is AT:
|
| 669 |
+
raise source.error("nothing to repeat",
|
| 670 |
+
source.tell() - here + len(this))
|
| 671 |
+
if item[0][0] in _REPEATCODES:
|
| 672 |
+
raise source.error("multiple repeat",
|
| 673 |
+
source.tell() - here + len(this))
|
| 674 |
+
if item[0][0] is SUBPATTERN:
|
| 675 |
+
group, add_flags, del_flags, p = item[0][1]
|
| 676 |
+
if group is None and not add_flags and not del_flags:
|
| 677 |
+
item = p
|
| 678 |
+
if sourcematch("?"):
|
| 679 |
+
subpattern[-1] = (MIN_REPEAT, (min, max, item))
|
| 680 |
+
else:
|
| 681 |
+
subpattern[-1] = (MAX_REPEAT, (min, max, item))
|
| 682 |
+
|
| 683 |
+
elif this == ".":
|
| 684 |
+
subpatternappend((ANY, None))
|
| 685 |
+
|
| 686 |
+
elif this == "(":
|
| 687 |
+
start = source.tell() - 1
|
| 688 |
+
group = True
|
| 689 |
+
name = None
|
| 690 |
+
add_flags = 0
|
| 691 |
+
del_flags = 0
|
| 692 |
+
if sourcematch("?"):
|
| 693 |
+
# options
|
| 694 |
+
char = sourceget()
|
| 695 |
+
if char is None:
|
| 696 |
+
raise source.error("unexpected end of pattern")
|
| 697 |
+
if char == "P":
|
| 698 |
+
# python extensions
|
| 699 |
+
if sourcematch("<"):
|
| 700 |
+
# named group: skip forward to end of name
|
| 701 |
+
name = source.getuntil(">", "group name")
|
| 702 |
+
if not name.isidentifier():
|
| 703 |
+
msg = "bad character in group name %r" % name
|
| 704 |
+
raise source.error(msg, len(name) + 1)
|
| 705 |
+
elif sourcematch("="):
|
| 706 |
+
# named backreference
|
| 707 |
+
name = source.getuntil(")", "group name")
|
| 708 |
+
if not name.isidentifier():
|
| 709 |
+
msg = "bad character in group name %r" % name
|
| 710 |
+
raise source.error(msg, len(name) + 1)
|
| 711 |
+
gid = state.groupdict.get(name)
|
| 712 |
+
if gid is None:
|
| 713 |
+
msg = "unknown group name %r" % name
|
| 714 |
+
raise source.error(msg, len(name) + 1)
|
| 715 |
+
if not state.checkgroup(gid):
|
| 716 |
+
raise source.error("cannot refer to an open group",
|
| 717 |
+
len(name) + 1)
|
| 718 |
+
state.checklookbehindgroup(gid, source)
|
| 719 |
+
subpatternappend((GROUPREF, gid))
|
| 720 |
+
continue
|
| 721 |
+
|
| 722 |
+
else:
|
| 723 |
+
char = sourceget()
|
| 724 |
+
if char is None:
|
| 725 |
+
raise source.error("unexpected end of pattern")
|
| 726 |
+
raise source.error("unknown extension ?P" + char,
|
| 727 |
+
len(char) + 2)
|
| 728 |
+
elif char == ":":
|
| 729 |
+
# non-capturing group
|
| 730 |
+
group = None
|
| 731 |
+
elif char == "#":
|
| 732 |
+
# comment
|
| 733 |
+
while True:
|
| 734 |
+
if source.next is None:
|
| 735 |
+
raise source.error("missing ), unterminated comment",
|
| 736 |
+
source.tell() - start)
|
| 737 |
+
if sourceget() == ")":
|
| 738 |
+
break
|
| 739 |
+
continue
|
| 740 |
+
|
| 741 |
+
elif char in "=!<":
|
| 742 |
+
# lookahead assertions
|
| 743 |
+
dir = 1
|
| 744 |
+
if char == "<":
|
| 745 |
+
char = sourceget()
|
| 746 |
+
if char is None:
|
| 747 |
+
raise source.error("unexpected end of pattern")
|
| 748 |
+
if char not in "=!":
|
| 749 |
+
raise source.error("unknown extension ?<" + char,
|
| 750 |
+
len(char) + 2)
|
| 751 |
+
dir = -1 # lookbehind
|
| 752 |
+
lookbehindgroups = state.lookbehindgroups
|
| 753 |
+
if lookbehindgroups is None:
|
| 754 |
+
state.lookbehindgroups = state.groups
|
| 755 |
+
p = _parse_sub(source, state, verbose, nested + 1)
|
| 756 |
+
if dir < 0:
|
| 757 |
+
if lookbehindgroups is None:
|
| 758 |
+
state.lookbehindgroups = None
|
| 759 |
+
if not sourcematch(")"):
|
| 760 |
+
raise source.error("missing ), unterminated subpattern",
|
| 761 |
+
source.tell() - start)
|
| 762 |
+
if char == "=":
|
| 763 |
+
subpatternappend((ASSERT, (dir, p)))
|
| 764 |
+
else:
|
| 765 |
+
subpatternappend((ASSERT_NOT, (dir, p)))
|
| 766 |
+
continue
|
| 767 |
+
|
| 768 |
+
elif char == "(":
|
| 769 |
+
# conditional backreference group
|
| 770 |
+
condname = source.getuntil(")", "group name")
|
| 771 |
+
if condname.isidentifier():
|
| 772 |
+
condgroup = state.groupdict.get(condname)
|
| 773 |
+
if condgroup is None:
|
| 774 |
+
msg = "unknown group name %r" % condname
|
| 775 |
+
raise source.error(msg, len(condname) + 1)
|
| 776 |
+
else:
|
| 777 |
+
try:
|
| 778 |
+
condgroup = int(condname)
|
| 779 |
+
if condgroup < 0:
|
| 780 |
+
raise ValueError
|
| 781 |
+
except ValueError:
|
| 782 |
+
msg = "bad character in group name %r" % condname
|
| 783 |
+
raise source.error(msg, len(condname) + 1) from None
|
| 784 |
+
if not condgroup:
|
| 785 |
+
raise source.error("bad group number",
|
| 786 |
+
len(condname) + 1)
|
| 787 |
+
if condgroup >= MAXGROUPS:
|
| 788 |
+
msg = "invalid group reference %d" % condgroup
|
| 789 |
+
raise source.error(msg, len(condname) + 1)
|
| 790 |
+
if condgroup not in state.grouprefpos:
|
| 791 |
+
state.grouprefpos[condgroup] = (
|
| 792 |
+
source.tell() - len(condname) - 1
|
| 793 |
+
)
|
| 794 |
+
state.checklookbehindgroup(condgroup, source)
|
| 795 |
+
item_yes = _parse(source, state, verbose, nested + 1)
|
| 796 |
+
if source.match("|"):
|
| 797 |
+
item_no = _parse(source, state, verbose, nested + 1)
|
| 798 |
+
if source.next == "|":
|
| 799 |
+
raise source.error("conditional backref with more than two branches")
|
| 800 |
+
else:
|
| 801 |
+
item_no = None
|
| 802 |
+
if not source.match(")"):
|
| 803 |
+
raise source.error("missing ), unterminated subpattern",
|
| 804 |
+
source.tell() - start)
|
| 805 |
+
subpatternappend((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
|
| 806 |
+
continue
|
| 807 |
+
|
| 808 |
+
elif char in FLAGS or char == "-":
|
| 809 |
+
# flags
|
| 810 |
+
flags = _parse_flags(source, state, char)
|
| 811 |
+
if flags is None: # global flags
|
| 812 |
+
if not first or subpattern:
|
| 813 |
+
import warnings
|
| 814 |
+
warnings.warn(
|
| 815 |
+
'Flags not at the start of the expression %r%s'
|
| 816 |
+
' but at position %d' % (
|
| 817 |
+
source.string[:20], # truncate long regexes
|
| 818 |
+
' (truncated)' if len(source.string) > 20 else '',
|
| 819 |
+
start,
|
| 820 |
+
),
|
| 821 |
+
DeprecationWarning, stacklevel=nested + 6
|
| 822 |
+
)
|
| 823 |
+
if (state.flags & SRE_FLAG_VERBOSE) and not verbose:
|
| 824 |
+
raise Verbose
|
| 825 |
+
continue
|
| 826 |
+
|
| 827 |
+
add_flags, del_flags = flags
|
| 828 |
+
group = None
|
| 829 |
+
else:
|
| 830 |
+
raise source.error("unknown extension ?" + char,
|
| 831 |
+
len(char) + 1)
|
| 832 |
+
|
| 833 |
+
# parse group contents
|
| 834 |
+
if group is not None:
|
| 835 |
+
try:
|
| 836 |
+
group = state.opengroup(name)
|
| 837 |
+
except error as err:
|
| 838 |
+
raise source.error(err.msg, len(name) + 1) from None
|
| 839 |
+
sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and
|
| 840 |
+
not (del_flags & SRE_FLAG_VERBOSE))
|
| 841 |
+
p = _parse_sub(source, state, sub_verbose, nested + 1)
|
| 842 |
+
if not source.match(")"):
|
| 843 |
+
raise source.error("missing ), unterminated subpattern",
|
| 844 |
+
source.tell() - start)
|
| 845 |
+
if group is not None:
|
| 846 |
+
state.closegroup(group, p)
|
| 847 |
+
subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p)))
|
| 848 |
+
|
| 849 |
+
elif this == "^":
|
| 850 |
+
subpatternappend((AT, AT_BEGINNING))
|
| 851 |
+
|
| 852 |
+
elif this == "$":
|
| 853 |
+
subpatternappend((AT, AT_END))
|
| 854 |
+
|
| 855 |
+
else:
|
| 856 |
+
raise AssertionError("unsupported special character %r" % (char,))
|
| 857 |
+
|
| 858 |
+
# unpack non-capturing groups
|
| 859 |
+
for i in range(len(subpattern))[::-1]:
|
| 860 |
+
op, av = subpattern[i]
|
| 861 |
+
if op is SUBPATTERN:
|
| 862 |
+
group, add_flags, del_flags, p = av
|
| 863 |
+
if group is None and not add_flags and not del_flags:
|
| 864 |
+
subpattern[i: i+1] = p
|
| 865 |
+
|
| 866 |
+
return subpattern
|
| 867 |
+
|
| 868 |
+
def _parse_flags(source, state, char):
|
| 869 |
+
sourceget = source.get
|
| 870 |
+
add_flags = 0
|
| 871 |
+
del_flags = 0
|
| 872 |
+
if char != "-":
|
| 873 |
+
while True:
|
| 874 |
+
flag = FLAGS[char]
|
| 875 |
+
if source.istext:
|
| 876 |
+
if char == 'L':
|
| 877 |
+
msg = "bad inline flags: cannot use 'L' flag with a str pattern"
|
| 878 |
+
raise source.error(msg)
|
| 879 |
+
else:
|
| 880 |
+
if char == 'u':
|
| 881 |
+
msg = "bad inline flags: cannot use 'u' flag with a bytes pattern"
|
| 882 |
+
raise source.error(msg)
|
| 883 |
+
add_flags |= flag
|
| 884 |
+
if (flag & TYPE_FLAGS) and (add_flags & TYPE_FLAGS) != flag:
|
| 885 |
+
msg = "bad inline flags: flags 'a', 'u' and 'L' are incompatible"
|
| 886 |
+
raise source.error(msg)
|
| 887 |
+
char = sourceget()
|
| 888 |
+
if char is None:
|
| 889 |
+
raise source.error("missing -, : or )")
|
| 890 |
+
if char in ")-:":
|
| 891 |
+
break
|
| 892 |
+
if char not in FLAGS:
|
| 893 |
+
msg = "unknown flag" if char.isalpha() else "missing -, : or )"
|
| 894 |
+
raise source.error(msg, len(char))
|
| 895 |
+
if char == ")":
|
| 896 |
+
state.flags |= add_flags
|
| 897 |
+
return None
|
| 898 |
+
if add_flags & GLOBAL_FLAGS:
|
| 899 |
+
raise source.error("bad inline flags: cannot turn on global flag", 1)
|
| 900 |
+
if char == "-":
|
| 901 |
+
char = sourceget()
|
| 902 |
+
if char is None:
|
| 903 |
+
raise source.error("missing flag")
|
| 904 |
+
if char not in FLAGS:
|
| 905 |
+
msg = "unknown flag" if char.isalpha() else "missing flag"
|
| 906 |
+
raise source.error(msg, len(char))
|
| 907 |
+
while True:
|
| 908 |
+
flag = FLAGS[char]
|
| 909 |
+
if flag & TYPE_FLAGS:
|
| 910 |
+
msg = "bad inline flags: cannot turn off flags 'a', 'u' and 'L'"
|
| 911 |
+
raise source.error(msg)
|
| 912 |
+
del_flags |= flag
|
| 913 |
+
char = sourceget()
|
| 914 |
+
if char is None:
|
| 915 |
+
raise source.error("missing :")
|
| 916 |
+
if char == ":":
|
| 917 |
+
break
|
| 918 |
+
if char not in FLAGS:
|
| 919 |
+
msg = "unknown flag" if char.isalpha() else "missing :"
|
| 920 |
+
raise source.error(msg, len(char))
|
| 921 |
+
assert char == ":"
|
| 922 |
+
if del_flags & GLOBAL_FLAGS:
|
| 923 |
+
raise source.error("bad inline flags: cannot turn off global flag", 1)
|
| 924 |
+
if add_flags & del_flags:
|
| 925 |
+
raise source.error("bad inline flags: flag turned on and off", 1)
|
| 926 |
+
return add_flags, del_flags
|
| 927 |
+
|
| 928 |
+
def fix_flags(src, flags):
|
| 929 |
+
# Check and fix flags according to the type of pattern (str or bytes)
|
| 930 |
+
if isinstance(src, str):
|
| 931 |
+
if flags & SRE_FLAG_LOCALE:
|
| 932 |
+
raise ValueError("cannot use LOCALE flag with a str pattern")
|
| 933 |
+
if not flags & SRE_FLAG_ASCII:
|
| 934 |
+
flags |= SRE_FLAG_UNICODE
|
| 935 |
+
elif flags & SRE_FLAG_UNICODE:
|
| 936 |
+
raise ValueError("ASCII and UNICODE flags are incompatible")
|
| 937 |
+
else:
|
| 938 |
+
if flags & SRE_FLAG_UNICODE:
|
| 939 |
+
raise ValueError("cannot use UNICODE flag with a bytes pattern")
|
| 940 |
+
if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII:
|
| 941 |
+
raise ValueError("ASCII and LOCALE flags are incompatible")
|
| 942 |
+
return flags
|
| 943 |
+
|
| 944 |
+
def parse(str, flags=0, state=None):
|
| 945 |
+
# parse 're' pattern into list of (opcode, argument) tuples
|
| 946 |
+
|
| 947 |
+
source = Tokenizer(str)
|
| 948 |
+
|
| 949 |
+
if state is None:
|
| 950 |
+
state = State()
|
| 951 |
+
state.flags = flags
|
| 952 |
+
state.str = str
|
| 953 |
+
|
| 954 |
+
try:
|
| 955 |
+
p = _parse_sub(source, state, flags & SRE_FLAG_VERBOSE, 0)
|
| 956 |
+
except Verbose:
|
| 957 |
+
# the VERBOSE flag was switched on inside the pattern. to be
|
| 958 |
+
# on the safe side, we'll parse the whole thing again...
|
| 959 |
+
state = State()
|
| 960 |
+
state.flags = flags | SRE_FLAG_VERBOSE
|
| 961 |
+
state.str = str
|
| 962 |
+
source.seek(0)
|
| 963 |
+
p = _parse_sub(source, state, True, 0)
|
| 964 |
+
|
| 965 |
+
p.state.flags = fix_flags(str, p.state.flags)
|
| 966 |
+
|
| 967 |
+
if source.next is not None:
|
| 968 |
+
assert source.next == ")"
|
| 969 |
+
raise source.error("unbalanced parenthesis")
|
| 970 |
+
|
| 971 |
+
for g in p.state.grouprefpos:
|
| 972 |
+
if g >= p.state.groups:
|
| 973 |
+
msg = "invalid group reference %d" % g
|
| 974 |
+
raise error(msg, str, p.state.grouprefpos[g])
|
| 975 |
+
|
| 976 |
+
if flags & SRE_FLAG_DEBUG:
|
| 977 |
+
p.dump()
|
| 978 |
+
|
| 979 |
+
return p
|
| 980 |
+
|
| 981 |
+
def parse_template(source, state):
|
| 982 |
+
# parse 're' replacement string into list of literals and
|
| 983 |
+
# group references
|
| 984 |
+
s = Tokenizer(source)
|
| 985 |
+
sget = s.get
|
| 986 |
+
groups = []
|
| 987 |
+
literals = []
|
| 988 |
+
literal = []
|
| 989 |
+
lappend = literal.append
|
| 990 |
+
def addgroup(index, pos):
|
| 991 |
+
if index > state.groups:
|
| 992 |
+
raise s.error("invalid group reference %d" % index, pos)
|
| 993 |
+
if literal:
|
| 994 |
+
literals.append(''.join(literal))
|
| 995 |
+
del literal[:]
|
| 996 |
+
groups.append((len(literals), index))
|
| 997 |
+
literals.append(None)
|
| 998 |
+
groupindex = state.groupindex
|
| 999 |
+
while True:
|
| 1000 |
+
this = sget()
|
| 1001 |
+
if this is None:
|
| 1002 |
+
break # end of replacement string
|
| 1003 |
+
if this[0] == "\\":
|
| 1004 |
+
# group
|
| 1005 |
+
c = this[1]
|
| 1006 |
+
if c == "g":
|
| 1007 |
+
name = ""
|
| 1008 |
+
if not s.match("<"):
|
| 1009 |
+
raise s.error("missing <")
|
| 1010 |
+
name = s.getuntil(">", "group name")
|
| 1011 |
+
if name.isidentifier():
|
| 1012 |
+
try:
|
| 1013 |
+
index = groupindex[name]
|
| 1014 |
+
except KeyError:
|
| 1015 |
+
raise IndexError("unknown group name %r" % name)
|
| 1016 |
+
else:
|
| 1017 |
+
try:
|
| 1018 |
+
index = int(name)
|
| 1019 |
+
if index < 0:
|
| 1020 |
+
raise ValueError
|
| 1021 |
+
except ValueError:
|
| 1022 |
+
raise s.error("bad character in group name %r" % name,
|
| 1023 |
+
len(name) + 1) from None
|
| 1024 |
+
if index >= MAXGROUPS:
|
| 1025 |
+
raise s.error("invalid group reference %d" % index,
|
| 1026 |
+
len(name) + 1)
|
| 1027 |
+
addgroup(index, len(name) + 1)
|
| 1028 |
+
elif c == "0":
|
| 1029 |
+
if s.next in OCTDIGITS:
|
| 1030 |
+
this += sget()
|
| 1031 |
+
if s.next in OCTDIGITS:
|
| 1032 |
+
this += sget()
|
| 1033 |
+
lappend(chr(int(this[1:], 8) & 0xff))
|
| 1034 |
+
elif c in DIGITS:
|
| 1035 |
+
isoctal = False
|
| 1036 |
+
if s.next in DIGITS:
|
| 1037 |
+
this += sget()
|
| 1038 |
+
if (c in OCTDIGITS and this[2] in OCTDIGITS and
|
| 1039 |
+
s.next in OCTDIGITS):
|
| 1040 |
+
this += sget()
|
| 1041 |
+
isoctal = True
|
| 1042 |
+
c = int(this[1:], 8)
|
| 1043 |
+
if c > 0o377:
|
| 1044 |
+
raise s.error('octal escape value %s outside of '
|
| 1045 |
+
'range 0-0o377' % this, len(this))
|
| 1046 |
+
lappend(chr(c))
|
| 1047 |
+
if not isoctal:
|
| 1048 |
+
addgroup(int(this[1:]), len(this) - 1)
|
| 1049 |
+
else:
|
| 1050 |
+
try:
|
| 1051 |
+
this = chr(ESCAPES[this][1])
|
| 1052 |
+
except KeyError:
|
| 1053 |
+
if c in ASCIILETTERS:
|
| 1054 |
+
raise s.error('bad escape %s' % this, len(this))
|
| 1055 |
+
lappend(this)
|
| 1056 |
+
else:
|
| 1057 |
+
lappend(this)
|
| 1058 |
+
if literal:
|
| 1059 |
+
literals.append(''.join(literal))
|
| 1060 |
+
if not isinstance(source, str):
|
| 1061 |
+
# The tokenizer implicitly decodes bytes objects as latin-1, we must
|
| 1062 |
+
# therefore re-encode the final representation.
|
| 1063 |
+
literals = [None if s is None else s.encode('latin-1') for s in literals]
|
| 1064 |
+
return groups, literals
|
| 1065 |
+
|
| 1066 |
+
def expand_template(template, match):
|
| 1067 |
+
g = match.group
|
| 1068 |
+
empty = match.string[:0]
|
| 1069 |
+
groups, literals = template
|
| 1070 |
+
literals = literals[:]
|
| 1071 |
+
try:
|
| 1072 |
+
for index, group in groups:
|
| 1073 |
+
literals[index] = g(group) or empty
|
| 1074 |
+
except IndexError:
|
| 1075 |
+
raise error("invalid group reference %d" % index)
|
| 1076 |
+
return empty.join(literals)
|
llava/lib/python3.10/tokenize.py
ADDED
|
@@ -0,0 +1,684 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tokenization help for Python programs.
|
| 2 |
+
|
| 3 |
+
tokenize(readline) is a generator that breaks a stream of bytes into
|
| 4 |
+
Python tokens. It decodes the bytes according to PEP-0263 for
|
| 5 |
+
determining source file encoding.
|
| 6 |
+
|
| 7 |
+
It accepts a readline-like method which is called repeatedly to get the
|
| 8 |
+
next line of input (or b"" for EOF). It generates 5-tuples with these
|
| 9 |
+
members:
|
| 10 |
+
|
| 11 |
+
the token type (see token.py)
|
| 12 |
+
the token (a string)
|
| 13 |
+
the starting (row, column) indices of the token (a 2-tuple of ints)
|
| 14 |
+
the ending (row, column) indices of the token (a 2-tuple of ints)
|
| 15 |
+
the original line (string)
|
| 16 |
+
|
| 17 |
+
It is designed to match the working of the Python tokenizer exactly, except
|
| 18 |
+
that it produces COMMENT tokens for comments and gives type OP for all
|
| 19 |
+
operators. Additionally, all token lists start with an ENCODING token
|
| 20 |
+
which tells you which encoding was used to decode the bytes stream.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
|
| 24 |
+
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
|
| 25 |
+
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
|
| 26 |
+
'Michael Foord')
|
| 27 |
+
from builtins import open as _builtin_open
|
| 28 |
+
from codecs import lookup, BOM_UTF8
|
| 29 |
+
import collections
|
| 30 |
+
import functools
|
| 31 |
+
from io import TextIOWrapper
|
| 32 |
+
import itertools as _itertools
|
| 33 |
+
import re
|
| 34 |
+
import sys
|
| 35 |
+
from token import *
|
| 36 |
+
from token import EXACT_TOKEN_TYPES
|
| 37 |
+
|
| 38 |
+
cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
|
| 39 |
+
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
|
| 40 |
+
|
| 41 |
+
import token
|
| 42 |
+
__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
|
| 43 |
+
"untokenize", "TokenInfo"]
|
| 44 |
+
del token
|
| 45 |
+
|
| 46 |
+
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
|
| 47 |
+
def __repr__(self):
|
| 48 |
+
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
|
| 49 |
+
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
|
| 50 |
+
self._replace(type=annotated_type))
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def exact_type(self):
|
| 54 |
+
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
|
| 55 |
+
return EXACT_TOKEN_TYPES[self.string]
|
| 56 |
+
else:
|
| 57 |
+
return self.type
|
| 58 |
+
|
| 59 |
+
def group(*choices): return '(' + '|'.join(choices) + ')'
|
| 60 |
+
def any(*choices): return group(*choices) + '*'
|
| 61 |
+
def maybe(*choices): return group(*choices) + '?'
|
| 62 |
+
|
| 63 |
+
# Note: we use unicode matching for names ("\w") but ascii matching for
|
| 64 |
+
# number literals.
|
| 65 |
+
Whitespace = r'[ \f\t]*'
|
| 66 |
+
Comment = r'#[^\r\n]*'
|
| 67 |
+
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
|
| 68 |
+
Name = r'\w+'
|
| 69 |
+
|
| 70 |
+
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
|
| 71 |
+
Binnumber = r'0[bB](?:_?[01])+'
|
| 72 |
+
Octnumber = r'0[oO](?:_?[0-7])+'
|
| 73 |
+
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
|
| 74 |
+
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
|
| 75 |
+
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
|
| 76 |
+
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
|
| 77 |
+
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
|
| 78 |
+
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
|
| 79 |
+
Floatnumber = group(Pointfloat, Expfloat)
|
| 80 |
+
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
|
| 81 |
+
Number = group(Imagnumber, Floatnumber, Intnumber)
|
| 82 |
+
|
| 83 |
+
# Return the empty string, plus all of the valid string prefixes.
|
| 84 |
+
def _all_string_prefixes():
|
| 85 |
+
# The valid string prefixes. Only contain the lower case versions,
|
| 86 |
+
# and don't contain any permutations (include 'fr', but not
|
| 87 |
+
# 'rf'). The various permutations will be generated.
|
| 88 |
+
_valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
|
| 89 |
+
# if we add binary f-strings, add: ['fb', 'fbr']
|
| 90 |
+
result = {''}
|
| 91 |
+
for prefix in _valid_string_prefixes:
|
| 92 |
+
for t in _itertools.permutations(prefix):
|
| 93 |
+
# create a list with upper and lower versions of each
|
| 94 |
+
# character
|
| 95 |
+
for u in _itertools.product(*[(c, c.upper()) for c in t]):
|
| 96 |
+
result.add(''.join(u))
|
| 97 |
+
return result
|
| 98 |
+
|
| 99 |
+
@functools.lru_cache
|
| 100 |
+
def _compile(expr):
|
| 101 |
+
return re.compile(expr, re.UNICODE)
|
| 102 |
+
|
| 103 |
+
# Note that since _all_string_prefixes includes the empty string,
|
| 104 |
+
# StringPrefix can be the empty string (making it optional).
|
| 105 |
+
StringPrefix = group(*_all_string_prefixes())
|
| 106 |
+
|
| 107 |
+
# Tail end of ' string.
|
| 108 |
+
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
|
| 109 |
+
# Tail end of " string.
|
| 110 |
+
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
|
| 111 |
+
# Tail end of ''' string.
|
| 112 |
+
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
|
| 113 |
+
# Tail end of """ string.
|
| 114 |
+
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
|
| 115 |
+
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
|
| 116 |
+
# Single-line ' or " string.
|
| 117 |
+
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
|
| 118 |
+
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
|
| 119 |
+
|
| 120 |
+
# Sorting in reverse order puts the long operators before their prefixes.
|
| 121 |
+
# Otherwise if = came before ==, == would get recognized as two instances
|
| 122 |
+
# of =.
|
| 123 |
+
Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True)))
|
| 124 |
+
Funny = group(r'\r?\n', Special)
|
| 125 |
+
|
| 126 |
+
PlainToken = group(Number, Funny, String, Name)
|
| 127 |
+
Token = Ignore + PlainToken
|
| 128 |
+
|
| 129 |
+
# First (or only) line of ' or " string.
|
| 130 |
+
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
|
| 131 |
+
group("'", r'\\\r?\n'),
|
| 132 |
+
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
|
| 133 |
+
group('"', r'\\\r?\n'))
|
| 134 |
+
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
|
| 135 |
+
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
| 136 |
+
|
| 137 |
+
# For a given string prefix plus quotes, endpats maps it to a regex
|
| 138 |
+
# to match the remainder of that string. _prefix can be empty, for
|
| 139 |
+
# a normal single or triple quoted string (with no prefix).
|
| 140 |
+
endpats = {}
|
| 141 |
+
for _prefix in _all_string_prefixes():
|
| 142 |
+
endpats[_prefix + "'"] = Single
|
| 143 |
+
endpats[_prefix + '"'] = Double
|
| 144 |
+
endpats[_prefix + "'''"] = Single3
|
| 145 |
+
endpats[_prefix + '"""'] = Double3
|
| 146 |
+
|
| 147 |
+
# A set of all of the single and triple quoted string prefixes,
|
| 148 |
+
# including the opening quotes.
|
| 149 |
+
single_quoted = set()
|
| 150 |
+
triple_quoted = set()
|
| 151 |
+
for t in _all_string_prefixes():
|
| 152 |
+
for u in (t + '"', t + "'"):
|
| 153 |
+
single_quoted.add(u)
|
| 154 |
+
for u in (t + '"""', t + "'''"):
|
| 155 |
+
triple_quoted.add(u)
|
| 156 |
+
|
| 157 |
+
tabsize = 8
|
| 158 |
+
|
| 159 |
+
class TokenError(Exception): pass
|
| 160 |
+
|
| 161 |
+
class StopTokenizing(Exception): pass
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class Untokenizer:
|
| 165 |
+
|
| 166 |
+
def __init__(self):
|
| 167 |
+
self.tokens = []
|
| 168 |
+
self.prev_row = 1
|
| 169 |
+
self.prev_col = 0
|
| 170 |
+
self.encoding = None
|
| 171 |
+
|
| 172 |
+
def add_whitespace(self, start):
|
| 173 |
+
row, col = start
|
| 174 |
+
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
|
| 175 |
+
raise ValueError("start ({},{}) precedes previous end ({},{})"
|
| 176 |
+
.format(row, col, self.prev_row, self.prev_col))
|
| 177 |
+
row_offset = row - self.prev_row
|
| 178 |
+
if row_offset:
|
| 179 |
+
self.tokens.append("\\\n" * row_offset)
|
| 180 |
+
self.prev_col = 0
|
| 181 |
+
col_offset = col - self.prev_col
|
| 182 |
+
if col_offset:
|
| 183 |
+
self.tokens.append(" " * col_offset)
|
| 184 |
+
|
| 185 |
+
def untokenize(self, iterable):
|
| 186 |
+
it = iter(iterable)
|
| 187 |
+
indents = []
|
| 188 |
+
startline = False
|
| 189 |
+
for t in it:
|
| 190 |
+
if len(t) == 2:
|
| 191 |
+
self.compat(t, it)
|
| 192 |
+
break
|
| 193 |
+
tok_type, token, start, end, line = t
|
| 194 |
+
if tok_type == ENCODING:
|
| 195 |
+
self.encoding = token
|
| 196 |
+
continue
|
| 197 |
+
if tok_type == ENDMARKER:
|
| 198 |
+
break
|
| 199 |
+
if tok_type == INDENT:
|
| 200 |
+
indents.append(token)
|
| 201 |
+
continue
|
| 202 |
+
elif tok_type == DEDENT:
|
| 203 |
+
indents.pop()
|
| 204 |
+
self.prev_row, self.prev_col = end
|
| 205 |
+
continue
|
| 206 |
+
elif tok_type in (NEWLINE, NL):
|
| 207 |
+
startline = True
|
| 208 |
+
elif startline and indents:
|
| 209 |
+
indent = indents[-1]
|
| 210 |
+
if start[1] >= len(indent):
|
| 211 |
+
self.tokens.append(indent)
|
| 212 |
+
self.prev_col = len(indent)
|
| 213 |
+
startline = False
|
| 214 |
+
self.add_whitespace(start)
|
| 215 |
+
self.tokens.append(token)
|
| 216 |
+
self.prev_row, self.prev_col = end
|
| 217 |
+
if tok_type in (NEWLINE, NL):
|
| 218 |
+
self.prev_row += 1
|
| 219 |
+
self.prev_col = 0
|
| 220 |
+
return "".join(self.tokens)
|
| 221 |
+
|
| 222 |
+
def compat(self, token, iterable):
|
| 223 |
+
indents = []
|
| 224 |
+
toks_append = self.tokens.append
|
| 225 |
+
startline = token[0] in (NEWLINE, NL)
|
| 226 |
+
prevstring = False
|
| 227 |
+
|
| 228 |
+
for tok in _itertools.chain([token], iterable):
|
| 229 |
+
toknum, tokval = tok[:2]
|
| 230 |
+
if toknum == ENCODING:
|
| 231 |
+
self.encoding = tokval
|
| 232 |
+
continue
|
| 233 |
+
|
| 234 |
+
if toknum in (NAME, NUMBER):
|
| 235 |
+
tokval += ' '
|
| 236 |
+
|
| 237 |
+
# Insert a space between two consecutive strings
|
| 238 |
+
if toknum == STRING:
|
| 239 |
+
if prevstring:
|
| 240 |
+
tokval = ' ' + tokval
|
| 241 |
+
prevstring = True
|
| 242 |
+
else:
|
| 243 |
+
prevstring = False
|
| 244 |
+
|
| 245 |
+
if toknum == INDENT:
|
| 246 |
+
indents.append(tokval)
|
| 247 |
+
continue
|
| 248 |
+
elif toknum == DEDENT:
|
| 249 |
+
indents.pop()
|
| 250 |
+
continue
|
| 251 |
+
elif toknum in (NEWLINE, NL):
|
| 252 |
+
startline = True
|
| 253 |
+
elif startline and indents:
|
| 254 |
+
toks_append(indents[-1])
|
| 255 |
+
startline = False
|
| 256 |
+
toks_append(tokval)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def untokenize(iterable):
|
| 260 |
+
"""Transform tokens back into Python source code.
|
| 261 |
+
It returns a bytes object, encoded using the ENCODING
|
| 262 |
+
token, which is the first token sequence output by tokenize.
|
| 263 |
+
|
| 264 |
+
Each element returned by the iterable must be a token sequence
|
| 265 |
+
with at least two elements, a token number and token value. If
|
| 266 |
+
only two tokens are passed, the resulting output is poor.
|
| 267 |
+
|
| 268 |
+
Round-trip invariant for full input:
|
| 269 |
+
Untokenized source will match input source exactly
|
| 270 |
+
|
| 271 |
+
Round-trip invariant for limited input:
|
| 272 |
+
# Output bytes will tokenize back to the input
|
| 273 |
+
t1 = [tok[:2] for tok in tokenize(f.readline)]
|
| 274 |
+
newcode = untokenize(t1)
|
| 275 |
+
readline = BytesIO(newcode).readline
|
| 276 |
+
t2 = [tok[:2] for tok in tokenize(readline)]
|
| 277 |
+
assert t1 == t2
|
| 278 |
+
"""
|
| 279 |
+
ut = Untokenizer()
|
| 280 |
+
out = ut.untokenize(iterable)
|
| 281 |
+
if ut.encoding is not None:
|
| 282 |
+
out = out.encode(ut.encoding)
|
| 283 |
+
return out
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def _get_normal_name(orig_enc):
|
| 287 |
+
"""Imitates get_normal_name in tokenizer.c."""
|
| 288 |
+
# Only care about the first 12 characters.
|
| 289 |
+
enc = orig_enc[:12].lower().replace("_", "-")
|
| 290 |
+
if enc == "utf-8" or enc.startswith("utf-8-"):
|
| 291 |
+
return "utf-8"
|
| 292 |
+
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
|
| 293 |
+
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
|
| 294 |
+
return "iso-8859-1"
|
| 295 |
+
return orig_enc
|
| 296 |
+
|
| 297 |
+
def detect_encoding(readline):
|
| 298 |
+
"""
|
| 299 |
+
The detect_encoding() function is used to detect the encoding that should
|
| 300 |
+
be used to decode a Python source file. It requires one argument, readline,
|
| 301 |
+
in the same way as the tokenize() generator.
|
| 302 |
+
|
| 303 |
+
It will call readline a maximum of twice, and return the encoding used
|
| 304 |
+
(as a string) and a list of any lines (left as bytes) it has read in.
|
| 305 |
+
|
| 306 |
+
It detects the encoding from the presence of a utf-8 bom or an encoding
|
| 307 |
+
cookie as specified in pep-0263. If both a bom and a cookie are present,
|
| 308 |
+
but disagree, a SyntaxError will be raised. If the encoding cookie is an
|
| 309 |
+
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
|
| 310 |
+
'utf-8-sig' is returned.
|
| 311 |
+
|
| 312 |
+
If no encoding is specified, then the default of 'utf-8' will be returned.
|
| 313 |
+
"""
|
| 314 |
+
try:
|
| 315 |
+
filename = readline.__self__.name
|
| 316 |
+
except AttributeError:
|
| 317 |
+
filename = None
|
| 318 |
+
bom_found = False
|
| 319 |
+
encoding = None
|
| 320 |
+
default = 'utf-8'
|
| 321 |
+
def read_or_stop():
|
| 322 |
+
try:
|
| 323 |
+
return readline()
|
| 324 |
+
except StopIteration:
|
| 325 |
+
return b''
|
| 326 |
+
|
| 327 |
+
def find_cookie(line):
|
| 328 |
+
try:
|
| 329 |
+
# Decode as UTF-8. Either the line is an encoding declaration,
|
| 330 |
+
# in which case it should be pure ASCII, or it must be UTF-8
|
| 331 |
+
# per default encoding.
|
| 332 |
+
line_string = line.decode('utf-8')
|
| 333 |
+
except UnicodeDecodeError:
|
| 334 |
+
msg = "invalid or missing encoding declaration"
|
| 335 |
+
if filename is not None:
|
| 336 |
+
msg = '{} for {!r}'.format(msg, filename)
|
| 337 |
+
raise SyntaxError(msg)
|
| 338 |
+
|
| 339 |
+
match = cookie_re.match(line_string)
|
| 340 |
+
if not match:
|
| 341 |
+
return None
|
| 342 |
+
encoding = _get_normal_name(match.group(1))
|
| 343 |
+
try:
|
| 344 |
+
codec = lookup(encoding)
|
| 345 |
+
except LookupError:
|
| 346 |
+
# This behaviour mimics the Python interpreter
|
| 347 |
+
if filename is None:
|
| 348 |
+
msg = "unknown encoding: " + encoding
|
| 349 |
+
else:
|
| 350 |
+
msg = "unknown encoding for {!r}: {}".format(filename,
|
| 351 |
+
encoding)
|
| 352 |
+
raise SyntaxError(msg)
|
| 353 |
+
|
| 354 |
+
if bom_found:
|
| 355 |
+
if encoding != 'utf-8':
|
| 356 |
+
# This behaviour mimics the Python interpreter
|
| 357 |
+
if filename is None:
|
| 358 |
+
msg = 'encoding problem: utf-8'
|
| 359 |
+
else:
|
| 360 |
+
msg = 'encoding problem for {!r}: utf-8'.format(filename)
|
| 361 |
+
raise SyntaxError(msg)
|
| 362 |
+
encoding += '-sig'
|
| 363 |
+
return encoding
|
| 364 |
+
|
| 365 |
+
first = read_or_stop()
|
| 366 |
+
if first.startswith(BOM_UTF8):
|
| 367 |
+
bom_found = True
|
| 368 |
+
first = first[3:]
|
| 369 |
+
default = 'utf-8-sig'
|
| 370 |
+
if not first:
|
| 371 |
+
return default, []
|
| 372 |
+
|
| 373 |
+
encoding = find_cookie(first)
|
| 374 |
+
if encoding:
|
| 375 |
+
return encoding, [first]
|
| 376 |
+
if not blank_re.match(first):
|
| 377 |
+
return default, [first]
|
| 378 |
+
|
| 379 |
+
second = read_or_stop()
|
| 380 |
+
if not second:
|
| 381 |
+
return default, [first]
|
| 382 |
+
|
| 383 |
+
encoding = find_cookie(second)
|
| 384 |
+
if encoding:
|
| 385 |
+
return encoding, [first, second]
|
| 386 |
+
|
| 387 |
+
return default, [first, second]
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def open(filename):
|
| 391 |
+
"""Open a file in read only mode using the encoding detected by
|
| 392 |
+
detect_encoding().
|
| 393 |
+
"""
|
| 394 |
+
buffer = _builtin_open(filename, 'rb')
|
| 395 |
+
try:
|
| 396 |
+
encoding, lines = detect_encoding(buffer.readline)
|
| 397 |
+
buffer.seek(0)
|
| 398 |
+
text = TextIOWrapper(buffer, encoding, line_buffering=True)
|
| 399 |
+
text.mode = 'r'
|
| 400 |
+
return text
|
| 401 |
+
except:
|
| 402 |
+
buffer.close()
|
| 403 |
+
raise
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def tokenize(readline):
|
| 407 |
+
"""
|
| 408 |
+
The tokenize() generator requires one argument, readline, which
|
| 409 |
+
must be a callable object which provides the same interface as the
|
| 410 |
+
readline() method of built-in file objects. Each call to the function
|
| 411 |
+
should return one line of input as bytes. Alternatively, readline
|
| 412 |
+
can be a callable function terminating with StopIteration:
|
| 413 |
+
readline = open(myfile, 'rb').__next__ # Example of alternate readline
|
| 414 |
+
|
| 415 |
+
The generator produces 5-tuples with these members: the token type; the
|
| 416 |
+
token string; a 2-tuple (srow, scol) of ints specifying the row and
|
| 417 |
+
column where the token begins in the source; a 2-tuple (erow, ecol) of
|
| 418 |
+
ints specifying the row and column where the token ends in the source;
|
| 419 |
+
and the line on which the token was found. The line passed is the
|
| 420 |
+
physical line.
|
| 421 |
+
|
| 422 |
+
The first token sequence will always be an ENCODING token
|
| 423 |
+
which tells you which encoding was used to decode the bytes stream.
|
| 424 |
+
"""
|
| 425 |
+
encoding, consumed = detect_encoding(readline)
|
| 426 |
+
empty = _itertools.repeat(b"")
|
| 427 |
+
rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
|
| 428 |
+
return _tokenize(rl_gen.__next__, encoding)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
def _tokenize(readline, encoding):
|
| 432 |
+
lnum = parenlev = continued = 0
|
| 433 |
+
numchars = '0123456789'
|
| 434 |
+
contstr, needcont = '', 0
|
| 435 |
+
contline = None
|
| 436 |
+
indents = [0]
|
| 437 |
+
|
| 438 |
+
if encoding is not None:
|
| 439 |
+
if encoding == "utf-8-sig":
|
| 440 |
+
# BOM will already have been stripped.
|
| 441 |
+
encoding = "utf-8"
|
| 442 |
+
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
|
| 443 |
+
last_line = b''
|
| 444 |
+
line = b''
|
| 445 |
+
while True: # loop over lines in stream
|
| 446 |
+
try:
|
| 447 |
+
# We capture the value of the line variable here because
|
| 448 |
+
# readline uses the empty string '' to signal end of input,
|
| 449 |
+
# hence `line` itself will always be overwritten at the end
|
| 450 |
+
# of this loop.
|
| 451 |
+
last_line = line
|
| 452 |
+
line = readline()
|
| 453 |
+
except StopIteration:
|
| 454 |
+
line = b''
|
| 455 |
+
|
| 456 |
+
if encoding is not None:
|
| 457 |
+
line = line.decode(encoding)
|
| 458 |
+
lnum += 1
|
| 459 |
+
pos, max = 0, len(line)
|
| 460 |
+
|
| 461 |
+
if contstr: # continued string
|
| 462 |
+
if not line:
|
| 463 |
+
raise TokenError("EOF in multi-line string", strstart)
|
| 464 |
+
endmatch = endprog.match(line)
|
| 465 |
+
if endmatch:
|
| 466 |
+
pos = end = endmatch.end(0)
|
| 467 |
+
yield TokenInfo(STRING, contstr + line[:end],
|
| 468 |
+
strstart, (lnum, end), contline + line)
|
| 469 |
+
contstr, needcont = '', 0
|
| 470 |
+
contline = None
|
| 471 |
+
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
|
| 472 |
+
yield TokenInfo(ERRORTOKEN, contstr + line,
|
| 473 |
+
strstart, (lnum, len(line)), contline)
|
| 474 |
+
contstr = ''
|
| 475 |
+
contline = None
|
| 476 |
+
continue
|
| 477 |
+
else:
|
| 478 |
+
contstr = contstr + line
|
| 479 |
+
contline = contline + line
|
| 480 |
+
continue
|
| 481 |
+
|
| 482 |
+
elif parenlev == 0 and not continued: # new statement
|
| 483 |
+
if not line: break
|
| 484 |
+
column = 0
|
| 485 |
+
while pos < max: # measure leading whitespace
|
| 486 |
+
if line[pos] == ' ':
|
| 487 |
+
column += 1
|
| 488 |
+
elif line[pos] == '\t':
|
| 489 |
+
column = (column//tabsize + 1)*tabsize
|
| 490 |
+
elif line[pos] == '\f':
|
| 491 |
+
column = 0
|
| 492 |
+
else:
|
| 493 |
+
break
|
| 494 |
+
pos += 1
|
| 495 |
+
if pos == max:
|
| 496 |
+
break
|
| 497 |
+
|
| 498 |
+
if line[pos] in '#\r\n': # skip comments or blank lines
|
| 499 |
+
if line[pos] == '#':
|
| 500 |
+
comment_token = line[pos:].rstrip('\r\n')
|
| 501 |
+
yield TokenInfo(COMMENT, comment_token,
|
| 502 |
+
(lnum, pos), (lnum, pos + len(comment_token)), line)
|
| 503 |
+
pos += len(comment_token)
|
| 504 |
+
|
| 505 |
+
yield TokenInfo(NL, line[pos:],
|
| 506 |
+
(lnum, pos), (lnum, len(line)), line)
|
| 507 |
+
continue
|
| 508 |
+
|
| 509 |
+
if column > indents[-1]: # count indents or dedents
|
| 510 |
+
indents.append(column)
|
| 511 |
+
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
|
| 512 |
+
while column < indents[-1]:
|
| 513 |
+
if column not in indents:
|
| 514 |
+
raise IndentationError(
|
| 515 |
+
"unindent does not match any outer indentation level",
|
| 516 |
+
("<tokenize>", lnum, pos, line))
|
| 517 |
+
indents = indents[:-1]
|
| 518 |
+
|
| 519 |
+
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
|
| 520 |
+
|
| 521 |
+
else: # continued statement
|
| 522 |
+
if not line:
|
| 523 |
+
raise TokenError("EOF in multi-line statement", (lnum, 0))
|
| 524 |
+
continued = 0
|
| 525 |
+
|
| 526 |
+
while pos < max:
|
| 527 |
+
pseudomatch = _compile(PseudoToken).match(line, pos)
|
| 528 |
+
if pseudomatch: # scan for tokens
|
| 529 |
+
start, end = pseudomatch.span(1)
|
| 530 |
+
spos, epos, pos = (lnum, start), (lnum, end), end
|
| 531 |
+
if start == end:
|
| 532 |
+
continue
|
| 533 |
+
token, initial = line[start:end], line[start]
|
| 534 |
+
|
| 535 |
+
if (initial in numchars or # ordinary number
|
| 536 |
+
(initial == '.' and token != '.' and token != '...')):
|
| 537 |
+
yield TokenInfo(NUMBER, token, spos, epos, line)
|
| 538 |
+
elif initial in '\r\n':
|
| 539 |
+
if parenlev > 0:
|
| 540 |
+
yield TokenInfo(NL, token, spos, epos, line)
|
| 541 |
+
else:
|
| 542 |
+
yield TokenInfo(NEWLINE, token, spos, epos, line)
|
| 543 |
+
|
| 544 |
+
elif initial == '#':
|
| 545 |
+
assert not token.endswith("\n")
|
| 546 |
+
yield TokenInfo(COMMENT, token, spos, epos, line)
|
| 547 |
+
|
| 548 |
+
elif token in triple_quoted:
|
| 549 |
+
endprog = _compile(endpats[token])
|
| 550 |
+
endmatch = endprog.match(line, pos)
|
| 551 |
+
if endmatch: # all on one line
|
| 552 |
+
pos = endmatch.end(0)
|
| 553 |
+
token = line[start:pos]
|
| 554 |
+
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
|
| 555 |
+
else:
|
| 556 |
+
strstart = (lnum, start) # multiple lines
|
| 557 |
+
contstr = line[start:]
|
| 558 |
+
contline = line
|
| 559 |
+
break
|
| 560 |
+
|
| 561 |
+
# Check up to the first 3 chars of the token to see if
|
| 562 |
+
# they're in the single_quoted set. If so, they start
|
| 563 |
+
# a string.
|
| 564 |
+
# We're using the first 3, because we're looking for
|
| 565 |
+
# "rb'" (for example) at the start of the token. If
|
| 566 |
+
# we switch to longer prefixes, this needs to be
|
| 567 |
+
# adjusted.
|
| 568 |
+
# Note that initial == token[:1].
|
| 569 |
+
# Also note that single quote checking must come after
|
| 570 |
+
# triple quote checking (above).
|
| 571 |
+
elif (initial in single_quoted or
|
| 572 |
+
token[:2] in single_quoted or
|
| 573 |
+
token[:3] in single_quoted):
|
| 574 |
+
if token[-1] == '\n': # continued string
|
| 575 |
+
strstart = (lnum, start)
|
| 576 |
+
# Again, using the first 3 chars of the
|
| 577 |
+
# token. This is looking for the matching end
|
| 578 |
+
# regex for the correct type of quote
|
| 579 |
+
# character. So it's really looking for
|
| 580 |
+
# endpats["'"] or endpats['"'], by trying to
|
| 581 |
+
# skip string prefix characters, if any.
|
| 582 |
+
endprog = _compile(endpats.get(initial) or
|
| 583 |
+
endpats.get(token[1]) or
|
| 584 |
+
endpats.get(token[2]))
|
| 585 |
+
contstr, needcont = line[start:], 1
|
| 586 |
+
contline = line
|
| 587 |
+
break
|
| 588 |
+
else: # ordinary string
|
| 589 |
+
yield TokenInfo(STRING, token, spos, epos, line)
|
| 590 |
+
|
| 591 |
+
elif initial.isidentifier(): # ordinary name
|
| 592 |
+
yield TokenInfo(NAME, token, spos, epos, line)
|
| 593 |
+
elif initial == '\\': # continued stmt
|
| 594 |
+
continued = 1
|
| 595 |
+
else:
|
| 596 |
+
if initial in '([{':
|
| 597 |
+
parenlev += 1
|
| 598 |
+
elif initial in ')]}':
|
| 599 |
+
parenlev -= 1
|
| 600 |
+
yield TokenInfo(OP, token, spos, epos, line)
|
| 601 |
+
else:
|
| 602 |
+
yield TokenInfo(ERRORTOKEN, line[pos],
|
| 603 |
+
(lnum, pos), (lnum, pos+1), line)
|
| 604 |
+
pos += 1
|
| 605 |
+
|
| 606 |
+
# Add an implicit NEWLINE if the input doesn't end in one
|
| 607 |
+
if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"):
|
| 608 |
+
yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
|
| 609 |
+
for indent in indents[1:]: # pop remaining indent levels
|
| 610 |
+
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
|
| 611 |
+
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
def generate_tokens(readline):
|
| 615 |
+
"""Tokenize a source reading Python code as unicode strings.
|
| 616 |
+
|
| 617 |
+
This has the same API as tokenize(), except that it expects the *readline*
|
| 618 |
+
callable to return str objects instead of bytes.
|
| 619 |
+
"""
|
| 620 |
+
return _tokenize(readline, None)
|
| 621 |
+
|
| 622 |
+
def main():
|
| 623 |
+
import argparse
|
| 624 |
+
|
| 625 |
+
# Helper error handling routines
|
| 626 |
+
def perror(message):
|
| 627 |
+
sys.stderr.write(message)
|
| 628 |
+
sys.stderr.write('\n')
|
| 629 |
+
|
| 630 |
+
def error(message, filename=None, location=None):
|
| 631 |
+
if location:
|
| 632 |
+
args = (filename,) + location + (message,)
|
| 633 |
+
perror("%s:%d:%d: error: %s" % args)
|
| 634 |
+
elif filename:
|
| 635 |
+
perror("%s: error: %s" % (filename, message))
|
| 636 |
+
else:
|
| 637 |
+
perror("error: %s" % message)
|
| 638 |
+
sys.exit(1)
|
| 639 |
+
|
| 640 |
+
# Parse the arguments and options
|
| 641 |
+
parser = argparse.ArgumentParser(prog='python -m tokenize')
|
| 642 |
+
parser.add_argument(dest='filename', nargs='?',
|
| 643 |
+
metavar='filename.py',
|
| 644 |
+
help='the file to tokenize; defaults to stdin')
|
| 645 |
+
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
|
| 646 |
+
help='display token names using the exact type')
|
| 647 |
+
args = parser.parse_args()
|
| 648 |
+
|
| 649 |
+
try:
|
| 650 |
+
# Tokenize the input
|
| 651 |
+
if args.filename:
|
| 652 |
+
filename = args.filename
|
| 653 |
+
with _builtin_open(filename, 'rb') as f:
|
| 654 |
+
tokens = list(tokenize(f.readline))
|
| 655 |
+
else:
|
| 656 |
+
filename = "<stdin>"
|
| 657 |
+
tokens = _tokenize(sys.stdin.readline, None)
|
| 658 |
+
|
| 659 |
+
# Output the tokenization
|
| 660 |
+
for token in tokens:
|
| 661 |
+
token_type = token.type
|
| 662 |
+
if args.exact:
|
| 663 |
+
token_type = token.exact_type
|
| 664 |
+
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
|
| 665 |
+
print("%-20s%-15s%-15r" %
|
| 666 |
+
(token_range, tok_name[token_type], token.string))
|
| 667 |
+
except IndentationError as err:
|
| 668 |
+
line, column = err.args[1][1:3]
|
| 669 |
+
error(err.args[0], filename, (line, column))
|
| 670 |
+
except TokenError as err:
|
| 671 |
+
line, column = err.args[1]
|
| 672 |
+
error(err.args[0], filename, (line, column))
|
| 673 |
+
except SyntaxError as err:
|
| 674 |
+
error(err, filename)
|
| 675 |
+
except OSError as err:
|
| 676 |
+
error(err)
|
| 677 |
+
except KeyboardInterrupt:
|
| 678 |
+
print("interrupted\n")
|
| 679 |
+
except Exception as err:
|
| 680 |
+
perror("unexpected error: %s" % err)
|
| 681 |
+
raise
|
| 682 |
+
|
| 683 |
+
if __name__ == "__main__":
|
| 684 |
+
main()
|
llava/lib/python3.10/zipapp.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import os
|
| 3 |
+
import pathlib
|
| 4 |
+
import shutil
|
| 5 |
+
import stat
|
| 6 |
+
import sys
|
| 7 |
+
import zipfile
|
| 8 |
+
|
| 9 |
+
__all__ = ['ZipAppError', 'create_archive', 'get_interpreter']
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# The __main__.py used if the users specifies "-m module:fn".
|
| 13 |
+
# Note that this will always be written as UTF-8 (module and
|
| 14 |
+
# function names can be non-ASCII in Python 3).
|
| 15 |
+
# We add a coding cookie even though UTF-8 is the default in Python 3
|
| 16 |
+
# because the resulting archive may be intended to be run under Python 2.
|
| 17 |
+
MAIN_TEMPLATE = """\
|
| 18 |
+
# -*- coding: utf-8 -*-
|
| 19 |
+
import {module}
|
| 20 |
+
{module}.{fn}()
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# The Windows launcher defaults to UTF-8 when parsing shebang lines if the
|
| 25 |
+
# file has no BOM. So use UTF-8 on Windows.
|
| 26 |
+
# On Unix, use the filesystem encoding.
|
| 27 |
+
if sys.platform.startswith('win'):
|
| 28 |
+
shebang_encoding = 'utf-8'
|
| 29 |
+
else:
|
| 30 |
+
shebang_encoding = sys.getfilesystemencoding()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class ZipAppError(ValueError):
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@contextlib.contextmanager
|
| 38 |
+
def _maybe_open(archive, mode):
|
| 39 |
+
if isinstance(archive, (str, os.PathLike)):
|
| 40 |
+
with open(archive, mode) as f:
|
| 41 |
+
yield f
|
| 42 |
+
else:
|
| 43 |
+
yield archive
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _write_file_prefix(f, interpreter):
|
| 47 |
+
"""Write a shebang line."""
|
| 48 |
+
if interpreter:
|
| 49 |
+
shebang = b'#!' + interpreter.encode(shebang_encoding) + b'\n'
|
| 50 |
+
f.write(shebang)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _copy_archive(archive, new_archive, interpreter=None):
|
| 54 |
+
"""Copy an application archive, modifying the shebang line."""
|
| 55 |
+
with _maybe_open(archive, 'rb') as src:
|
| 56 |
+
# Skip the shebang line from the source.
|
| 57 |
+
# Read 2 bytes of the source and check if they are #!.
|
| 58 |
+
first_2 = src.read(2)
|
| 59 |
+
if first_2 == b'#!':
|
| 60 |
+
# Discard the initial 2 bytes and the rest of the shebang line.
|
| 61 |
+
first_2 = b''
|
| 62 |
+
src.readline()
|
| 63 |
+
|
| 64 |
+
with _maybe_open(new_archive, 'wb') as dst:
|
| 65 |
+
_write_file_prefix(dst, interpreter)
|
| 66 |
+
# If there was no shebang, "first_2" contains the first 2 bytes
|
| 67 |
+
# of the source file, so write them before copying the rest
|
| 68 |
+
# of the file.
|
| 69 |
+
dst.write(first_2)
|
| 70 |
+
shutil.copyfileobj(src, dst)
|
| 71 |
+
|
| 72 |
+
if interpreter and isinstance(new_archive, str):
|
| 73 |
+
os.chmod(new_archive, os.stat(new_archive).st_mode | stat.S_IEXEC)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def create_archive(source, target=None, interpreter=None, main=None,
|
| 77 |
+
filter=None, compressed=False):
|
| 78 |
+
"""Create an application archive from SOURCE.
|
| 79 |
+
|
| 80 |
+
The SOURCE can be the name of a directory, or a filename or a file-like
|
| 81 |
+
object referring to an existing archive.
|
| 82 |
+
|
| 83 |
+
The content of SOURCE is packed into an application archive in TARGET,
|
| 84 |
+
which can be a filename or a file-like object. If SOURCE is a directory,
|
| 85 |
+
TARGET can be omitted and will default to the name of SOURCE with .pyz
|
| 86 |
+
appended.
|
| 87 |
+
|
| 88 |
+
The created application archive will have a shebang line specifying
|
| 89 |
+
that it should run with INTERPRETER (there will be no shebang line if
|
| 90 |
+
INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is
|
| 91 |
+
not specified, an existing __main__.py will be used). It is an error
|
| 92 |
+
to specify MAIN for anything other than a directory source with no
|
| 93 |
+
__main__.py, and it is an error to omit MAIN if the directory has no
|
| 94 |
+
__main__.py.
|
| 95 |
+
"""
|
| 96 |
+
# Are we copying an existing archive?
|
| 97 |
+
source_is_file = False
|
| 98 |
+
if hasattr(source, 'read') and hasattr(source, 'readline'):
|
| 99 |
+
source_is_file = True
|
| 100 |
+
else:
|
| 101 |
+
source = pathlib.Path(source)
|
| 102 |
+
if source.is_file():
|
| 103 |
+
source_is_file = True
|
| 104 |
+
|
| 105 |
+
if source_is_file:
|
| 106 |
+
_copy_archive(source, target, interpreter)
|
| 107 |
+
return
|
| 108 |
+
|
| 109 |
+
# We are creating a new archive from a directory.
|
| 110 |
+
if not source.exists():
|
| 111 |
+
raise ZipAppError("Source does not exist")
|
| 112 |
+
has_main = (source / '__main__.py').is_file()
|
| 113 |
+
if main and has_main:
|
| 114 |
+
raise ZipAppError(
|
| 115 |
+
"Cannot specify entry point if the source has __main__.py")
|
| 116 |
+
if not (main or has_main):
|
| 117 |
+
raise ZipAppError("Archive has no entry point")
|
| 118 |
+
|
| 119 |
+
main_py = None
|
| 120 |
+
if main:
|
| 121 |
+
# Check that main has the right format.
|
| 122 |
+
mod, sep, fn = main.partition(':')
|
| 123 |
+
mod_ok = all(part.isidentifier() for part in mod.split('.'))
|
| 124 |
+
fn_ok = all(part.isidentifier() for part in fn.split('.'))
|
| 125 |
+
if not (sep == ':' and mod_ok and fn_ok):
|
| 126 |
+
raise ZipAppError("Invalid entry point: " + main)
|
| 127 |
+
main_py = MAIN_TEMPLATE.format(module=mod, fn=fn)
|
| 128 |
+
|
| 129 |
+
if target is None:
|
| 130 |
+
target = source.with_suffix('.pyz')
|
| 131 |
+
elif not hasattr(target, 'write'):
|
| 132 |
+
target = pathlib.Path(target)
|
| 133 |
+
|
| 134 |
+
with _maybe_open(target, 'wb') as fd:
|
| 135 |
+
_write_file_prefix(fd, interpreter)
|
| 136 |
+
compression = (zipfile.ZIP_DEFLATED if compressed else
|
| 137 |
+
zipfile.ZIP_STORED)
|
| 138 |
+
with zipfile.ZipFile(fd, 'w', compression=compression) as z:
|
| 139 |
+
for child in source.rglob('*'):
|
| 140 |
+
arcname = child.relative_to(source)
|
| 141 |
+
if filter is None or filter(arcname):
|
| 142 |
+
z.write(child, arcname.as_posix())
|
| 143 |
+
if main_py:
|
| 144 |
+
z.writestr('__main__.py', main_py.encode('utf-8'))
|
| 145 |
+
|
| 146 |
+
if interpreter and not hasattr(target, 'write'):
|
| 147 |
+
target.chmod(target.stat().st_mode | stat.S_IEXEC)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def get_interpreter(archive):
|
| 151 |
+
with _maybe_open(archive, 'rb') as f:
|
| 152 |
+
if f.read(2) == b'#!':
|
| 153 |
+
return f.readline().strip().decode(shebang_encoding)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def main(args=None):
|
| 157 |
+
"""Run the zipapp command line interface.
|
| 158 |
+
|
| 159 |
+
The ARGS parameter lets you specify the argument list directly.
|
| 160 |
+
Omitting ARGS (or setting it to None) works as for argparse, using
|
| 161 |
+
sys.argv[1:] as the argument list.
|
| 162 |
+
"""
|
| 163 |
+
import argparse
|
| 164 |
+
|
| 165 |
+
parser = argparse.ArgumentParser()
|
| 166 |
+
parser.add_argument('--output', '-o', default=None,
|
| 167 |
+
help="The name of the output archive. "
|
| 168 |
+
"Required if SOURCE is an archive.")
|
| 169 |
+
parser.add_argument('--python', '-p', default=None,
|
| 170 |
+
help="The name of the Python interpreter to use "
|
| 171 |
+
"(default: no shebang line).")
|
| 172 |
+
parser.add_argument('--main', '-m', default=None,
|
| 173 |
+
help="The main function of the application "
|
| 174 |
+
"(default: use an existing __main__.py).")
|
| 175 |
+
parser.add_argument('--compress', '-c', action='store_true',
|
| 176 |
+
help="Compress files with the deflate method. "
|
| 177 |
+
"Files are stored uncompressed by default.")
|
| 178 |
+
parser.add_argument('--info', default=False, action='store_true',
|
| 179 |
+
help="Display the interpreter from the archive.")
|
| 180 |
+
parser.add_argument('source',
|
| 181 |
+
help="Source directory (or existing archive).")
|
| 182 |
+
|
| 183 |
+
args = parser.parse_args(args)
|
| 184 |
+
|
| 185 |
+
# Handle `python -m zipapp archive.pyz --info`.
|
| 186 |
+
if args.info:
|
| 187 |
+
if not os.path.isfile(args.source):
|
| 188 |
+
raise SystemExit("Can only get info for an archive file")
|
| 189 |
+
interpreter = get_interpreter(args.source)
|
| 190 |
+
print("Interpreter: {}".format(interpreter or "<none>"))
|
| 191 |
+
sys.exit(0)
|
| 192 |
+
|
| 193 |
+
if os.path.isfile(args.source):
|
| 194 |
+
if args.output is None or (os.path.exists(args.output) and
|
| 195 |
+
os.path.samefile(args.source, args.output)):
|
| 196 |
+
raise SystemExit("In-place editing of archives is not supported")
|
| 197 |
+
if args.main:
|
| 198 |
+
raise SystemExit("Cannot change the main function when copying")
|
| 199 |
+
|
| 200 |
+
create_archive(args.source, args.output,
|
| 201 |
+
interpreter=args.python, main=args.main,
|
| 202 |
+
compressed=args.compress)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
if __name__ == '__main__':
|
| 206 |
+
main()
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _chunk_cat(at::TensorList tensors, int64_t dim, int64_t num_chunks);
|
| 21 |
+
TORCH_API at::Tensor & _chunk_cat_out(at::Tensor & out, at::TensorList tensors, int64_t dim, int64_t num_chunks);
|
| 22 |
+
TORCH_API at::Tensor & _chunk_cat_outf(at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API void _cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_foreach_ceil_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_foreach_ceil(Tensor[] self) -> Tensor[]
|
| 26 |
+
inline ::std::vector<at::Tensor> _foreach_ceil(at::TensorList self) {
|
| 27 |
+
return at::_ops::_foreach_ceil::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_foreach_ceil_(Tensor(a!)[] self) -> ()
|
| 31 |
+
inline void _foreach_ceil_(at::TensorList self) {
|
| 32 |
+
return at::_ops::_foreach_ceil_::call(self);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
|
| 36 |
+
inline void _foreach_ceil_out(at::TensorList out, at::TensorList self) {
|
| 37 |
+
return at::_ops::_foreach_ceil_out::call(self, out);
|
| 38 |
+
}
|
| 39 |
+
// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
|
| 40 |
+
inline void _foreach_ceil_outf(at::TensorList self, at::TensorList out) {
|
| 41 |
+
return at::_ops::_foreach_ceil_out::call(self, out);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _foreach_erf {
|
| 18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erf")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erf(Tensor[] self) -> Tensor[]")
|
| 24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
| 25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _foreach_erf_ {
|
| 29 |
+
using schema = void (at::TensorList);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erf_")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erf_(Tensor(a!)[] self) -> ()")
|
| 35 |
+
static void call(at::TensorList self);
|
| 36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _foreach_erf_out {
|
| 40 |
+
using schema = void (at::TensorList, at::TensorList);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erf")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
|
| 46 |
+
static void call(at::TensorList self, at::TensorList out);
|
| 47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_max.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_foreach_max_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_foreach_max(Tensor[] self) -> Tensor[]
|
| 26 |
+
inline ::std::vector<at::Tensor> _foreach_max(at::TensorList self) {
|
| 27 |
+
return at::_ops::_foreach_max::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_foreach_max.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
|
| 31 |
+
inline void _foreach_max_out(at::TensorList out, at::TensorList self) {
|
| 32 |
+
return at::_ops::_foreach_max_out::call(self, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_foreach_max.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
|
| 35 |
+
inline void _foreach_max_outf(at::TensorList self, at::TensorList out) {
|
| 36 |
+
return at::_ops::_foreach_max_out::call(self, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_histogramdd_bin_edges_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
|
| 26 |
+
inline ::std::vector<at::Tensor> _histogramdd_bin_edges(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range=::std::nullopt, const ::std::optional<at::Tensor> & weight={}, bool density=false) {
|
| 27 |
+
return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
|
| 31 |
+
inline void _histogramdd_bin_edges_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range=::std::nullopt, const ::std::optional<at::Tensor> & weight={}, bool density=false) {
|
| 32 |
+
return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
|
| 35 |
+
inline void _histogramdd_bin_edges_outf(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::TensorList out) {
|
| 36 |
+
return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float);
|
| 21 |
+
TORCH_API at::Tensor & _log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float);
|
| 22 |
+
TORCH_API at::Tensor & _log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cpu
|
| 25 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_logcumsumexp.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_logcumsumexp_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_logcumsumexp(Tensor self, int dim) -> Tensor
|
| 26 |
+
inline at::Tensor _logcumsumexp(const at::Tensor & self, int64_t dim) {
|
| 27 |
+
return at::_ops::_logcumsumexp::call(self, dim);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim) {
|
| 32 |
+
return at::_ops::_logcumsumexp_out::call(self, dim, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_logcumsumexp_out::call(self, dim, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _neg_view_copy {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_neg_view_copy")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_neg_view_copy(Tensor self) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _neg_view_copy_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_neg_view_copy")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_enum_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _pad_enum {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, int64_t, ::std::optional<double>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pad_enum")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, ::std::optional<double> value);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, ::std::optional<double> value);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_test_optional_intlist_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
|
| 26 |
+
inline at::Tensor _test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
|
| 27 |
+
return at::_ops::_test_optional_intlist::call(values, addends);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _test_optional_intlist_out(at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) {
|
| 32 |
+
return at::_ops::_test_optional_intlist_out::call(values, addends, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _test_optional_intlist_outf(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_test_optional_intlist_out::call(values, addends, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _to_copy {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, bool, ::std::optional<at::MemoryFormat>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_copy")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _to_copy_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, bool, ::std::optional<at::MemoryFormat>, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_copy")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_unsafe_view_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
|
| 26 |
+
inline at::Tensor _unsafe_view(const at::Tensor & self, at::IntArrayRef size) {
|
| 27 |
+
return at::_ops::_unsafe_view::call(self, c10::fromIntArrayRefSlow(size));
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor _unsafe_view(const at::Tensor & self, at::IntArrayRef size) {
|
| 32 |
+
return at::_ops::_unsafe_view::call(self, c10::fromIntArrayRefSlow(size));
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
|
| 37 |
+
inline at::Tensor _unsafe_view_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
|
| 38 |
+
return at::_ops::_unsafe_view::call(self, size);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 42 |
+
at::Tensor _unsafe_view(const at::Tensor & self, c10::SymIntArrayRef size) {
|
| 43 |
+
return at::_ops::_unsafe_view::call(self, size);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
|
| 48 |
+
inline at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
|
| 49 |
+
return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
|
| 50 |
+
}
|
| 51 |
+
namespace symint {
|
| 52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 53 |
+
at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
|
| 54 |
+
return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
|
| 59 |
+
inline at::Tensor & _unsafe_view_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
|
| 60 |
+
return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
|
| 61 |
+
}
|
| 62 |
+
namespace symint {
|
| 63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 64 |
+
at::Tensor & _unsafe_view_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
|
| 65 |
+
return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
|
| 70 |
+
inline at::Tensor & _unsafe_view_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
|
| 71 |
+
return at::_ops::_unsafe_view_out::call(self, size, out);
|
| 72 |
+
}
|
| 73 |
+
namespace symint {
|
| 74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 75 |
+
at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
|
| 76 |
+
return at::_ops::_unsafe_view_out::call(self, size, out);
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
|
| 81 |
+
inline at::Tensor & _unsafe_view_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
|
| 82 |
+
return at::_ops::_unsafe_view_out::call(self, size, out);
|
| 83 |
+
}
|
| 84 |
+
namespace symint {
|
| 85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 86 |
+
at::Tensor & _unsafe_view_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
|
| 87 |
+
return at::_ops::_unsafe_view_out::call(self, size, out);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/absolute_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor absolute(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & absolute_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & absolute_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & absolute_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeimplicitautograd
|
| 26 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/alias.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/alias_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::alias(Tensor(a) self) -> Tensor(a)
|
| 26 |
+
inline at::Tensor alias(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::alias::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & bitwise_xor_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace cuda
|
| 26 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/column_stack_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor column_stack(at::TensorList tensors);
|
| 21 |
+
TORCH_API at::Tensor & column_stack_out(at::Tensor & out, at::TensorList tensors);
|
| 22 |
+
TORCH_API at::Tensor & column_stack_outf(at::TensorList tensors, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace compositeimplicitautograd
|
| 25 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_backward.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/cumprod_backward_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
|
| 26 |
+
inline at::Tensor cumprod_backward(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
|
| 27 |
+
return at::_ops::cumprod_backward::call(grad, input, dim, output);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_cumsum : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API diag_embed {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diag_embed")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API diag_embed_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, int64_t, int64_t, int64_t, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diag_embed")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|