Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- parrot/lib/python3.10/_markupbase.py +396 -0
- parrot/lib/python3.10/_pyio.py +2718 -0
- parrot/lib/python3.10/_weakrefset.py +206 -0
- parrot/lib/python3.10/asyncore.py +649 -0
- parrot/lib/python3.10/calendar.py +759 -0
- parrot/lib/python3.10/colorsys.py +165 -0
- parrot/lib/python3.10/compileall.py +463 -0
- parrot/lib/python3.10/crypt.py +120 -0
- parrot/lib/python3.10/csv.py +444 -0
- parrot/lib/python3.10/dataclasses.py +1453 -0
- parrot/lib/python3.10/datetime.py +2524 -0
- parrot/lib/python3.10/difflib.py +2056 -0
- parrot/lib/python3.10/distutils/debug.py +5 -0
- parrot/lib/python3.10/distutils/dist.py +1256 -0
- parrot/lib/python3.10/distutils/errors.py +97 -0
- parrot/lib/python3.10/distutils/filelist.py +327 -0
- parrot/lib/python3.10/distutils/tests/Setup.sample +67 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_bdist.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_bdist_dumb.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_bdist_msi.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_bdist_rpm.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_build.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_build_ext.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_build_scripts.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_cmd.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_cygwinccompiler.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_dep_util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_file_util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_install_data.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_msvc9compiler.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_register.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_sdist.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_sysconfig.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_unixccompiler.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/__pycache__/test_version.cpython-310.pyc +0 -0
- parrot/lib/python3.10/distutils/tests/support.py +209 -0
- parrot/lib/python3.10/distutils/tests/test_archive_util.py +396 -0
- parrot/lib/python3.10/distutils/tests/test_bdist_dumb.py +97 -0
- parrot/lib/python3.10/distutils/tests/test_bdist_rpm.py +135 -0
- parrot/lib/python3.10/distutils/tests/test_build.py +56 -0
- parrot/lib/python3.10/distutils/tests/test_build_clib.py +144 -0
- parrot/lib/python3.10/distutils/tests/test_build_scripts.py +112 -0
- parrot/lib/python3.10/distutils/tests/test_check.py +163 -0
- parrot/lib/python3.10/distutils/tests/test_config.py +141 -0
- parrot/lib/python3.10/distutils/tests/test_cygwinccompiler.py +154 -0
- parrot/lib/python3.10/distutils/tests/test_dir_util.py +139 -0
- parrot/lib/python3.10/distutils/tests/test_extension.py +70 -0
- parrot/lib/python3.10/distutils/tests/test_file_util.py +124 -0
- parrot/lib/python3.10/distutils/tests/test_install.py +260 -0
- parrot/lib/python3.10/distutils/tests/test_install_headers.py +39 -0
parrot/lib/python3.10/_markupbase.py
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Shared support for scanning document type declarations in HTML and XHTML.
|
| 2 |
+
|
| 3 |
+
This module is used as a foundation for the html.parser module. It has no
|
| 4 |
+
documented public API and should not be used directly.
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import re
|
| 9 |
+
|
| 10 |
+
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
|
| 11 |
+
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
|
| 12 |
+
_commentclose = re.compile(r'--\s*>')
|
| 13 |
+
_markedsectionclose = re.compile(r']\s*]\s*>')
|
| 14 |
+
|
| 15 |
+
# An analysis of the MS-Word extensions is available at
|
| 16 |
+
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
|
| 17 |
+
|
| 18 |
+
_msmarkedsectionclose = re.compile(r']\s*>')
|
| 19 |
+
|
| 20 |
+
del re
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ParserBase:
|
| 24 |
+
"""Parser base class which provides some common support methods used
|
| 25 |
+
by the SGML/HTML and XHTML parsers."""
|
| 26 |
+
|
| 27 |
+
def __init__(self):
|
| 28 |
+
if self.__class__ is ParserBase:
|
| 29 |
+
raise RuntimeError(
|
| 30 |
+
"_markupbase.ParserBase must be subclassed")
|
| 31 |
+
|
| 32 |
+
def reset(self):
|
| 33 |
+
self.lineno = 1
|
| 34 |
+
self.offset = 0
|
| 35 |
+
|
| 36 |
+
def getpos(self):
|
| 37 |
+
"""Return current line number and offset."""
|
| 38 |
+
return self.lineno, self.offset
|
| 39 |
+
|
| 40 |
+
# Internal -- update line number and offset. This should be
|
| 41 |
+
# called for each piece of data exactly once, in order -- in other
|
| 42 |
+
# words the concatenation of all the input strings to this
|
| 43 |
+
# function should be exactly the entire input.
|
| 44 |
+
def updatepos(self, i, j):
|
| 45 |
+
if i >= j:
|
| 46 |
+
return j
|
| 47 |
+
rawdata = self.rawdata
|
| 48 |
+
nlines = rawdata.count("\n", i, j)
|
| 49 |
+
if nlines:
|
| 50 |
+
self.lineno = self.lineno + nlines
|
| 51 |
+
pos = rawdata.rindex("\n", i, j) # Should not fail
|
| 52 |
+
self.offset = j-(pos+1)
|
| 53 |
+
else:
|
| 54 |
+
self.offset = self.offset + j-i
|
| 55 |
+
return j
|
| 56 |
+
|
| 57 |
+
_decl_otherchars = ''
|
| 58 |
+
|
| 59 |
+
# Internal -- parse declaration (for use by subclasses).
|
| 60 |
+
def parse_declaration(self, i):
|
| 61 |
+
# This is some sort of declaration; in "HTML as
|
| 62 |
+
# deployed," this should only be the document type
|
| 63 |
+
# declaration ("<!DOCTYPE html...>").
|
| 64 |
+
# ISO 8879:1986, however, has more complex
|
| 65 |
+
# declaration syntax for elements in <!...>, including:
|
| 66 |
+
# --comment--
|
| 67 |
+
# [marked section]
|
| 68 |
+
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
|
| 69 |
+
# ATTLIST, NOTATION, SHORTREF, USEMAP,
|
| 70 |
+
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
|
| 71 |
+
rawdata = self.rawdata
|
| 72 |
+
j = i + 2
|
| 73 |
+
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
|
| 74 |
+
if rawdata[j:j+1] == ">":
|
| 75 |
+
# the empty comment <!>
|
| 76 |
+
return j + 1
|
| 77 |
+
if rawdata[j:j+1] in ("-", ""):
|
| 78 |
+
# Start of comment followed by buffer boundary,
|
| 79 |
+
# or just a buffer boundary.
|
| 80 |
+
return -1
|
| 81 |
+
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
|
| 82 |
+
n = len(rawdata)
|
| 83 |
+
if rawdata[j:j+2] == '--': #comment
|
| 84 |
+
# Locate --.*-- as the body of the comment
|
| 85 |
+
return self.parse_comment(i)
|
| 86 |
+
elif rawdata[j] == '[': #marked section
|
| 87 |
+
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
|
| 88 |
+
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
|
| 89 |
+
# Note that this is extended by Microsoft Office "Save as Web" function
|
| 90 |
+
# to include [if...] and [endif].
|
| 91 |
+
return self.parse_marked_section(i)
|
| 92 |
+
else: #all other declaration elements
|
| 93 |
+
decltype, j = self._scan_name(j, i)
|
| 94 |
+
if j < 0:
|
| 95 |
+
return j
|
| 96 |
+
if decltype == "doctype":
|
| 97 |
+
self._decl_otherchars = ''
|
| 98 |
+
while j < n:
|
| 99 |
+
c = rawdata[j]
|
| 100 |
+
if c == ">":
|
| 101 |
+
# end of declaration syntax
|
| 102 |
+
data = rawdata[i+2:j]
|
| 103 |
+
if decltype == "doctype":
|
| 104 |
+
self.handle_decl(data)
|
| 105 |
+
else:
|
| 106 |
+
# According to the HTML5 specs sections "8.2.4.44 Bogus
|
| 107 |
+
# comment state" and "8.2.4.45 Markup declaration open
|
| 108 |
+
# state", a comment token should be emitted.
|
| 109 |
+
# Calling unknown_decl provides more flexibility though.
|
| 110 |
+
self.unknown_decl(data)
|
| 111 |
+
return j + 1
|
| 112 |
+
if c in "\"'":
|
| 113 |
+
m = _declstringlit_match(rawdata, j)
|
| 114 |
+
if not m:
|
| 115 |
+
return -1 # incomplete
|
| 116 |
+
j = m.end()
|
| 117 |
+
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
|
| 118 |
+
name, j = self._scan_name(j, i)
|
| 119 |
+
elif c in self._decl_otherchars:
|
| 120 |
+
j = j + 1
|
| 121 |
+
elif c == "[":
|
| 122 |
+
# this could be handled in a separate doctype parser
|
| 123 |
+
if decltype == "doctype":
|
| 124 |
+
j = self._parse_doctype_subset(j + 1, i)
|
| 125 |
+
elif decltype in {"attlist", "linktype", "link", "element"}:
|
| 126 |
+
# must tolerate []'d groups in a content model in an element declaration
|
| 127 |
+
# also in data attribute specifications of attlist declaration
|
| 128 |
+
# also link type declaration subsets in linktype declarations
|
| 129 |
+
# also link attribute specification lists in link declarations
|
| 130 |
+
raise AssertionError("unsupported '[' char in %s declaration" % decltype)
|
| 131 |
+
else:
|
| 132 |
+
raise AssertionError("unexpected '[' char in declaration")
|
| 133 |
+
else:
|
| 134 |
+
raise AssertionError("unexpected %r char in declaration" % rawdata[j])
|
| 135 |
+
if j < 0:
|
| 136 |
+
return j
|
| 137 |
+
return -1 # incomplete
|
| 138 |
+
|
| 139 |
+
# Internal -- parse a marked section
|
| 140 |
+
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
|
| 141 |
+
def parse_marked_section(self, i, report=1):
|
| 142 |
+
rawdata= self.rawdata
|
| 143 |
+
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
|
| 144 |
+
sectName, j = self._scan_name( i+3, i )
|
| 145 |
+
if j < 0:
|
| 146 |
+
return j
|
| 147 |
+
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
|
| 148 |
+
# look for standard ]]> ending
|
| 149 |
+
match= _markedsectionclose.search(rawdata, i+3)
|
| 150 |
+
elif sectName in {"if", "else", "endif"}:
|
| 151 |
+
# look for MS Office ]> ending
|
| 152 |
+
match= _msmarkedsectionclose.search(rawdata, i+3)
|
| 153 |
+
else:
|
| 154 |
+
raise AssertionError(
|
| 155 |
+
'unknown status keyword %r in marked section' % rawdata[i+3:j]
|
| 156 |
+
)
|
| 157 |
+
if not match:
|
| 158 |
+
return -1
|
| 159 |
+
if report:
|
| 160 |
+
j = match.start(0)
|
| 161 |
+
self.unknown_decl(rawdata[i+3: j])
|
| 162 |
+
return match.end(0)
|
| 163 |
+
|
| 164 |
+
# Internal -- parse comment, return length or -1 if not terminated
|
| 165 |
+
def parse_comment(self, i, report=1):
|
| 166 |
+
rawdata = self.rawdata
|
| 167 |
+
if rawdata[i:i+4] != '<!--':
|
| 168 |
+
raise AssertionError('unexpected call to parse_comment()')
|
| 169 |
+
match = _commentclose.search(rawdata, i+4)
|
| 170 |
+
if not match:
|
| 171 |
+
return -1
|
| 172 |
+
if report:
|
| 173 |
+
j = match.start(0)
|
| 174 |
+
self.handle_comment(rawdata[i+4: j])
|
| 175 |
+
return match.end(0)
|
| 176 |
+
|
| 177 |
+
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
|
| 178 |
+
# returning the index just past any whitespace following the trailing ']'.
|
| 179 |
+
def _parse_doctype_subset(self, i, declstartpos):
|
| 180 |
+
rawdata = self.rawdata
|
| 181 |
+
n = len(rawdata)
|
| 182 |
+
j = i
|
| 183 |
+
while j < n:
|
| 184 |
+
c = rawdata[j]
|
| 185 |
+
if c == "<":
|
| 186 |
+
s = rawdata[j:j+2]
|
| 187 |
+
if s == "<":
|
| 188 |
+
# end of buffer; incomplete
|
| 189 |
+
return -1
|
| 190 |
+
if s != "<!":
|
| 191 |
+
self.updatepos(declstartpos, j + 1)
|
| 192 |
+
raise AssertionError(
|
| 193 |
+
"unexpected char in internal subset (in %r)" % s
|
| 194 |
+
)
|
| 195 |
+
if (j + 2) == n:
|
| 196 |
+
# end of buffer; incomplete
|
| 197 |
+
return -1
|
| 198 |
+
if (j + 4) > n:
|
| 199 |
+
# end of buffer; incomplete
|
| 200 |
+
return -1
|
| 201 |
+
if rawdata[j:j+4] == "<!--":
|
| 202 |
+
j = self.parse_comment(j, report=0)
|
| 203 |
+
if j < 0:
|
| 204 |
+
return j
|
| 205 |
+
continue
|
| 206 |
+
name, j = self._scan_name(j + 2, declstartpos)
|
| 207 |
+
if j == -1:
|
| 208 |
+
return -1
|
| 209 |
+
if name not in {"attlist", "element", "entity", "notation"}:
|
| 210 |
+
self.updatepos(declstartpos, j + 2)
|
| 211 |
+
raise AssertionError(
|
| 212 |
+
"unknown declaration %r in internal subset" % name
|
| 213 |
+
)
|
| 214 |
+
# handle the individual names
|
| 215 |
+
meth = getattr(self, "_parse_doctype_" + name)
|
| 216 |
+
j = meth(j, declstartpos)
|
| 217 |
+
if j < 0:
|
| 218 |
+
return j
|
| 219 |
+
elif c == "%":
|
| 220 |
+
# parameter entity reference
|
| 221 |
+
if (j + 1) == n:
|
| 222 |
+
# end of buffer; incomplete
|
| 223 |
+
return -1
|
| 224 |
+
s, j = self._scan_name(j + 1, declstartpos)
|
| 225 |
+
if j < 0:
|
| 226 |
+
return j
|
| 227 |
+
if rawdata[j] == ";":
|
| 228 |
+
j = j + 1
|
| 229 |
+
elif c == "]":
|
| 230 |
+
j = j + 1
|
| 231 |
+
while j < n and rawdata[j].isspace():
|
| 232 |
+
j = j + 1
|
| 233 |
+
if j < n:
|
| 234 |
+
if rawdata[j] == ">":
|
| 235 |
+
return j
|
| 236 |
+
self.updatepos(declstartpos, j)
|
| 237 |
+
raise AssertionError("unexpected char after internal subset")
|
| 238 |
+
else:
|
| 239 |
+
return -1
|
| 240 |
+
elif c.isspace():
|
| 241 |
+
j = j + 1
|
| 242 |
+
else:
|
| 243 |
+
self.updatepos(declstartpos, j)
|
| 244 |
+
raise AssertionError("unexpected char %r in internal subset" % c)
|
| 245 |
+
# end of buffer reached
|
| 246 |
+
return -1
|
| 247 |
+
|
| 248 |
+
# Internal -- scan past <!ELEMENT declarations
|
| 249 |
+
def _parse_doctype_element(self, i, declstartpos):
|
| 250 |
+
name, j = self._scan_name(i, declstartpos)
|
| 251 |
+
if j == -1:
|
| 252 |
+
return -1
|
| 253 |
+
# style content model; just skip until '>'
|
| 254 |
+
rawdata = self.rawdata
|
| 255 |
+
if '>' in rawdata[j:]:
|
| 256 |
+
return rawdata.find(">", j) + 1
|
| 257 |
+
return -1
|
| 258 |
+
|
| 259 |
+
# Internal -- scan past <!ATTLIST declarations
|
| 260 |
+
def _parse_doctype_attlist(self, i, declstartpos):
|
| 261 |
+
rawdata = self.rawdata
|
| 262 |
+
name, j = self._scan_name(i, declstartpos)
|
| 263 |
+
c = rawdata[j:j+1]
|
| 264 |
+
if c == "":
|
| 265 |
+
return -1
|
| 266 |
+
if c == ">":
|
| 267 |
+
return j + 1
|
| 268 |
+
while 1:
|
| 269 |
+
# scan a series of attribute descriptions; simplified:
|
| 270 |
+
# name type [value] [#constraint]
|
| 271 |
+
name, j = self._scan_name(j, declstartpos)
|
| 272 |
+
if j < 0:
|
| 273 |
+
return j
|
| 274 |
+
c = rawdata[j:j+1]
|
| 275 |
+
if c == "":
|
| 276 |
+
return -1
|
| 277 |
+
if c == "(":
|
| 278 |
+
# an enumerated type; look for ')'
|
| 279 |
+
if ")" in rawdata[j:]:
|
| 280 |
+
j = rawdata.find(")", j) + 1
|
| 281 |
+
else:
|
| 282 |
+
return -1
|
| 283 |
+
while rawdata[j:j+1].isspace():
|
| 284 |
+
j = j + 1
|
| 285 |
+
if not rawdata[j:]:
|
| 286 |
+
# end of buffer, incomplete
|
| 287 |
+
return -1
|
| 288 |
+
else:
|
| 289 |
+
name, j = self._scan_name(j, declstartpos)
|
| 290 |
+
c = rawdata[j:j+1]
|
| 291 |
+
if not c:
|
| 292 |
+
return -1
|
| 293 |
+
if c in "'\"":
|
| 294 |
+
m = _declstringlit_match(rawdata, j)
|
| 295 |
+
if m:
|
| 296 |
+
j = m.end()
|
| 297 |
+
else:
|
| 298 |
+
return -1
|
| 299 |
+
c = rawdata[j:j+1]
|
| 300 |
+
if not c:
|
| 301 |
+
return -1
|
| 302 |
+
if c == "#":
|
| 303 |
+
if rawdata[j:] == "#":
|
| 304 |
+
# end of buffer
|
| 305 |
+
return -1
|
| 306 |
+
name, j = self._scan_name(j + 1, declstartpos)
|
| 307 |
+
if j < 0:
|
| 308 |
+
return j
|
| 309 |
+
c = rawdata[j:j+1]
|
| 310 |
+
if not c:
|
| 311 |
+
return -1
|
| 312 |
+
if c == '>':
|
| 313 |
+
# all done
|
| 314 |
+
return j + 1
|
| 315 |
+
|
| 316 |
+
# Internal -- scan past <!NOTATION declarations
|
| 317 |
+
def _parse_doctype_notation(self, i, declstartpos):
|
| 318 |
+
name, j = self._scan_name(i, declstartpos)
|
| 319 |
+
if j < 0:
|
| 320 |
+
return j
|
| 321 |
+
rawdata = self.rawdata
|
| 322 |
+
while 1:
|
| 323 |
+
c = rawdata[j:j+1]
|
| 324 |
+
if not c:
|
| 325 |
+
# end of buffer; incomplete
|
| 326 |
+
return -1
|
| 327 |
+
if c == '>':
|
| 328 |
+
return j + 1
|
| 329 |
+
if c in "'\"":
|
| 330 |
+
m = _declstringlit_match(rawdata, j)
|
| 331 |
+
if not m:
|
| 332 |
+
return -1
|
| 333 |
+
j = m.end()
|
| 334 |
+
else:
|
| 335 |
+
name, j = self._scan_name(j, declstartpos)
|
| 336 |
+
if j < 0:
|
| 337 |
+
return j
|
| 338 |
+
|
| 339 |
+
# Internal -- scan past <!ENTITY declarations
|
| 340 |
+
def _parse_doctype_entity(self, i, declstartpos):
|
| 341 |
+
rawdata = self.rawdata
|
| 342 |
+
if rawdata[i:i+1] == "%":
|
| 343 |
+
j = i + 1
|
| 344 |
+
while 1:
|
| 345 |
+
c = rawdata[j:j+1]
|
| 346 |
+
if not c:
|
| 347 |
+
return -1
|
| 348 |
+
if c.isspace():
|
| 349 |
+
j = j + 1
|
| 350 |
+
else:
|
| 351 |
+
break
|
| 352 |
+
else:
|
| 353 |
+
j = i
|
| 354 |
+
name, j = self._scan_name(j, declstartpos)
|
| 355 |
+
if j < 0:
|
| 356 |
+
return j
|
| 357 |
+
while 1:
|
| 358 |
+
c = self.rawdata[j:j+1]
|
| 359 |
+
if not c:
|
| 360 |
+
return -1
|
| 361 |
+
if c in "'\"":
|
| 362 |
+
m = _declstringlit_match(rawdata, j)
|
| 363 |
+
if m:
|
| 364 |
+
j = m.end()
|
| 365 |
+
else:
|
| 366 |
+
return -1 # incomplete
|
| 367 |
+
elif c == ">":
|
| 368 |
+
return j + 1
|
| 369 |
+
else:
|
| 370 |
+
name, j = self._scan_name(j, declstartpos)
|
| 371 |
+
if j < 0:
|
| 372 |
+
return j
|
| 373 |
+
|
| 374 |
+
# Internal -- scan a name token and the new position and the token, or
|
| 375 |
+
# return -1 if we've reached the end of the buffer.
|
| 376 |
+
def _scan_name(self, i, declstartpos):
|
| 377 |
+
rawdata = self.rawdata
|
| 378 |
+
n = len(rawdata)
|
| 379 |
+
if i == n:
|
| 380 |
+
return None, -1
|
| 381 |
+
m = _declname_match(rawdata, i)
|
| 382 |
+
if m:
|
| 383 |
+
s = m.group()
|
| 384 |
+
name = s.strip()
|
| 385 |
+
if (i + len(s)) == n:
|
| 386 |
+
return None, -1 # end of buffer
|
| 387 |
+
return name.lower(), m.end()
|
| 388 |
+
else:
|
| 389 |
+
self.updatepos(declstartpos, i)
|
| 390 |
+
raise AssertionError(
|
| 391 |
+
"expected name token at %r" % rawdata[declstartpos:declstartpos+20]
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
# To be overridden -- handlers for unknown objects
|
| 395 |
+
def unknown_decl(self, data):
|
| 396 |
+
pass
|
parrot/lib/python3.10/_pyio.py
ADDED
|
@@ -0,0 +1,2718 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python implementation of the io module.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import abc
|
| 7 |
+
import codecs
|
| 8 |
+
import errno
|
| 9 |
+
import stat
|
| 10 |
+
import sys
|
| 11 |
+
# Import _thread instead of threading to reduce startup cost
|
| 12 |
+
from _thread import allocate_lock as Lock
|
| 13 |
+
if sys.platform in {'win32', 'cygwin'}:
|
| 14 |
+
from msvcrt import setmode as _setmode
|
| 15 |
+
else:
|
| 16 |
+
_setmode = None
|
| 17 |
+
|
| 18 |
+
import io
|
| 19 |
+
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
|
| 20 |
+
|
| 21 |
+
valid_seek_flags = {0, 1, 2} # Hardwired values
|
| 22 |
+
if hasattr(os, 'SEEK_HOLE') :
|
| 23 |
+
valid_seek_flags.add(os.SEEK_HOLE)
|
| 24 |
+
valid_seek_flags.add(os.SEEK_DATA)
|
| 25 |
+
|
| 26 |
+
# open() uses st_blksize whenever we can
|
| 27 |
+
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
|
| 28 |
+
|
| 29 |
+
# NOTE: Base classes defined here are registered with the "official" ABCs
|
| 30 |
+
# defined in io.py. We don't use real inheritance though, because we don't want
|
| 31 |
+
# to inherit the C implementations.
|
| 32 |
+
|
| 33 |
+
# Rebind for compatibility
|
| 34 |
+
BlockingIOError = BlockingIOError
|
| 35 |
+
|
| 36 |
+
# Does io.IOBase finalizer log the exception if the close() method fails?
|
| 37 |
+
# The exception is ignored silently by default in release build.
|
| 38 |
+
_IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
|
| 39 |
+
# Does open() check its 'errors' argument?
|
| 40 |
+
_CHECK_ERRORS = _IOBASE_EMITS_UNRAISABLE
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def text_encoding(encoding, stacklevel=2):
|
| 44 |
+
"""
|
| 45 |
+
A helper function to choose the text encoding.
|
| 46 |
+
|
| 47 |
+
When encoding is not None, just return it.
|
| 48 |
+
Otherwise, return the default text encoding (i.e. "locale").
|
| 49 |
+
|
| 50 |
+
This function emits an EncodingWarning if *encoding* is None and
|
| 51 |
+
sys.flags.warn_default_encoding is true.
|
| 52 |
+
|
| 53 |
+
This can be used in APIs with an encoding=None parameter
|
| 54 |
+
that pass it to TextIOWrapper or open.
|
| 55 |
+
However, please consider using encoding="utf-8" for new APIs.
|
| 56 |
+
"""
|
| 57 |
+
if encoding is None:
|
| 58 |
+
encoding = "locale"
|
| 59 |
+
if sys.flags.warn_default_encoding:
|
| 60 |
+
import warnings
|
| 61 |
+
warnings.warn("'encoding' argument not specified.",
|
| 62 |
+
EncodingWarning, stacklevel + 1)
|
| 63 |
+
return encoding
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Wrapper for builtins.open
|
| 67 |
+
#
|
| 68 |
+
# Trick so that open() won't become a bound method when stored
|
| 69 |
+
# as a class variable (as dbm.dumb does).
|
| 70 |
+
#
|
| 71 |
+
# See init_set_builtins_open() in Python/pylifecycle.c.
|
| 72 |
+
@staticmethod
|
| 73 |
+
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
|
| 74 |
+
newline=None, closefd=True, opener=None):
|
| 75 |
+
|
| 76 |
+
r"""Open file and return a stream. Raise OSError upon failure.
|
| 77 |
+
|
| 78 |
+
file is either a text or byte string giving the name (and the path
|
| 79 |
+
if the file isn't in the current working directory) of the file to
|
| 80 |
+
be opened or an integer file descriptor of the file to be
|
| 81 |
+
wrapped. (If a file descriptor is given, it is closed when the
|
| 82 |
+
returned I/O object is closed, unless closefd is set to False.)
|
| 83 |
+
|
| 84 |
+
mode is an optional string that specifies the mode in which the file is
|
| 85 |
+
opened. It defaults to 'r' which means open for reading in text mode. Other
|
| 86 |
+
common values are 'w' for writing (truncating the file if it already
|
| 87 |
+
exists), 'x' for exclusive creation of a new file, and 'a' for appending
|
| 88 |
+
(which on some Unix systems, means that all writes append to the end of the
|
| 89 |
+
file regardless of the current seek position). In text mode, if encoding is
|
| 90 |
+
not specified the encoding used is platform dependent. (For reading and
|
| 91 |
+
writing raw bytes use binary mode and leave encoding unspecified.) The
|
| 92 |
+
available modes are:
|
| 93 |
+
|
| 94 |
+
========= ===============================================================
|
| 95 |
+
Character Meaning
|
| 96 |
+
--------- ---------------------------------------------------------------
|
| 97 |
+
'r' open for reading (default)
|
| 98 |
+
'w' open for writing, truncating the file first
|
| 99 |
+
'x' create a new file and open it for writing
|
| 100 |
+
'a' open for writing, appending to the end of the file if it exists
|
| 101 |
+
'b' binary mode
|
| 102 |
+
't' text mode (default)
|
| 103 |
+
'+' open a disk file for updating (reading and writing)
|
| 104 |
+
'U' universal newline mode (deprecated)
|
| 105 |
+
========= ===============================================================
|
| 106 |
+
|
| 107 |
+
The default mode is 'rt' (open for reading text). For binary random
|
| 108 |
+
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
|
| 109 |
+
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
|
| 110 |
+
raises an `FileExistsError` if the file already exists.
|
| 111 |
+
|
| 112 |
+
Python distinguishes between files opened in binary and text modes,
|
| 113 |
+
even when the underlying operating system doesn't. Files opened in
|
| 114 |
+
binary mode (appending 'b' to the mode argument) return contents as
|
| 115 |
+
bytes objects without any decoding. In text mode (the default, or when
|
| 116 |
+
't' is appended to the mode argument), the contents of the file are
|
| 117 |
+
returned as strings, the bytes having been first decoded using a
|
| 118 |
+
platform-dependent encoding or using the specified encoding if given.
|
| 119 |
+
|
| 120 |
+
'U' mode is deprecated and will raise an exception in future versions
|
| 121 |
+
of Python. It has no effect in Python 3. Use newline to control
|
| 122 |
+
universal newlines mode.
|
| 123 |
+
|
| 124 |
+
buffering is an optional integer used to set the buffering policy.
|
| 125 |
+
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
|
| 126 |
+
line buffering (only usable in text mode), and an integer > 1 to indicate
|
| 127 |
+
the size of a fixed-size chunk buffer. When no buffering argument is
|
| 128 |
+
given, the default buffering policy works as follows:
|
| 129 |
+
|
| 130 |
+
* Binary files are buffered in fixed-size chunks; the size of the buffer
|
| 131 |
+
is chosen using a heuristic trying to determine the underlying device's
|
| 132 |
+
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
|
| 133 |
+
On many systems, the buffer will typically be 4096 or 8192 bytes long.
|
| 134 |
+
|
| 135 |
+
* "Interactive" text files (files for which isatty() returns True)
|
| 136 |
+
use line buffering. Other text files use the policy described above
|
| 137 |
+
for binary files.
|
| 138 |
+
|
| 139 |
+
encoding is the str name of the encoding used to decode or encode the
|
| 140 |
+
file. This should only be used in text mode. The default encoding is
|
| 141 |
+
platform dependent, but any encoding supported by Python can be
|
| 142 |
+
passed. See the codecs module for the list of supported encodings.
|
| 143 |
+
|
| 144 |
+
errors is an optional string that specifies how encoding errors are to
|
| 145 |
+
be handled---this argument should not be used in binary mode. Pass
|
| 146 |
+
'strict' to raise a ValueError exception if there is an encoding error
|
| 147 |
+
(the default of None has the same effect), or pass 'ignore' to ignore
|
| 148 |
+
errors. (Note that ignoring encoding errors can lead to data loss.)
|
| 149 |
+
See the documentation for codecs.register for a list of the permitted
|
| 150 |
+
encoding error strings.
|
| 151 |
+
|
| 152 |
+
newline is a string controlling how universal newlines works (it only
|
| 153 |
+
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
|
| 154 |
+
as follows:
|
| 155 |
+
|
| 156 |
+
* On input, if newline is None, universal newlines mode is
|
| 157 |
+
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
|
| 158 |
+
these are translated into '\n' before being returned to the
|
| 159 |
+
caller. If it is '', universal newline mode is enabled, but line
|
| 160 |
+
endings are returned to the caller untranslated. If it has any of
|
| 161 |
+
the other legal values, input lines are only terminated by the given
|
| 162 |
+
string, and the line ending is returned to the caller untranslated.
|
| 163 |
+
|
| 164 |
+
* On output, if newline is None, any '\n' characters written are
|
| 165 |
+
translated to the system default line separator, os.linesep. If
|
| 166 |
+
newline is '', no translation takes place. If newline is any of the
|
| 167 |
+
other legal values, any '\n' characters written are translated to
|
| 168 |
+
the given string.
|
| 169 |
+
|
| 170 |
+
closedfd is a bool. If closefd is False, the underlying file descriptor will
|
| 171 |
+
be kept open when the file is closed. This does not work when a file name is
|
| 172 |
+
given and must be True in that case.
|
| 173 |
+
|
| 174 |
+
The newly created file is non-inheritable.
|
| 175 |
+
|
| 176 |
+
A custom opener can be used by passing a callable as *opener*. The
|
| 177 |
+
underlying file descriptor for the file object is then obtained by calling
|
| 178 |
+
*opener* with (*file*, *flags*). *opener* must return an open file
|
| 179 |
+
descriptor (passing os.open as *opener* results in functionality similar to
|
| 180 |
+
passing None).
|
| 181 |
+
|
| 182 |
+
open() returns a file object whose type depends on the mode, and
|
| 183 |
+
through which the standard file operations such as reading and writing
|
| 184 |
+
are performed. When open() is used to open a file in a text mode ('w',
|
| 185 |
+
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
|
| 186 |
+
a file in a binary mode, the returned class varies: in read binary
|
| 187 |
+
mode, it returns a BufferedReader; in write binary and append binary
|
| 188 |
+
modes, it returns a BufferedWriter, and in read/write mode, it returns
|
| 189 |
+
a BufferedRandom.
|
| 190 |
+
|
| 191 |
+
It is also possible to use a string or bytearray as a file for both
|
| 192 |
+
reading and writing. For strings StringIO can be used like a file
|
| 193 |
+
opened in a text mode, and for bytes a BytesIO can be used like a file
|
| 194 |
+
opened in a binary mode.
|
| 195 |
+
"""
|
| 196 |
+
if not isinstance(file, int):
|
| 197 |
+
file = os.fspath(file)
|
| 198 |
+
if not isinstance(file, (str, bytes, int)):
|
| 199 |
+
raise TypeError("invalid file: %r" % file)
|
| 200 |
+
if not isinstance(mode, str):
|
| 201 |
+
raise TypeError("invalid mode: %r" % mode)
|
| 202 |
+
if not isinstance(buffering, int):
|
| 203 |
+
raise TypeError("invalid buffering: %r" % buffering)
|
| 204 |
+
if encoding is not None and not isinstance(encoding, str):
|
| 205 |
+
raise TypeError("invalid encoding: %r" % encoding)
|
| 206 |
+
if errors is not None and not isinstance(errors, str):
|
| 207 |
+
raise TypeError("invalid errors: %r" % errors)
|
| 208 |
+
modes = set(mode)
|
| 209 |
+
if modes - set("axrwb+tU") or len(mode) > len(modes):
|
| 210 |
+
raise ValueError("invalid mode: %r" % mode)
|
| 211 |
+
creating = "x" in modes
|
| 212 |
+
reading = "r" in modes
|
| 213 |
+
writing = "w" in modes
|
| 214 |
+
appending = "a" in modes
|
| 215 |
+
updating = "+" in modes
|
| 216 |
+
text = "t" in modes
|
| 217 |
+
binary = "b" in modes
|
| 218 |
+
if "U" in modes:
|
| 219 |
+
if creating or writing or appending or updating:
|
| 220 |
+
raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'")
|
| 221 |
+
import warnings
|
| 222 |
+
warnings.warn("'U' mode is deprecated",
|
| 223 |
+
DeprecationWarning, 2)
|
| 224 |
+
reading = True
|
| 225 |
+
if text and binary:
|
| 226 |
+
raise ValueError("can't have text and binary mode at once")
|
| 227 |
+
if creating + reading + writing + appending > 1:
|
| 228 |
+
raise ValueError("can't have read/write/append mode at once")
|
| 229 |
+
if not (creating or reading or writing or appending):
|
| 230 |
+
raise ValueError("must have exactly one of read/write/append mode")
|
| 231 |
+
if binary and encoding is not None:
|
| 232 |
+
raise ValueError("binary mode doesn't take an encoding argument")
|
| 233 |
+
if binary and errors is not None:
|
| 234 |
+
raise ValueError("binary mode doesn't take an errors argument")
|
| 235 |
+
if binary and newline is not None:
|
| 236 |
+
raise ValueError("binary mode doesn't take a newline argument")
|
| 237 |
+
if binary and buffering == 1:
|
| 238 |
+
import warnings
|
| 239 |
+
warnings.warn("line buffering (buffering=1) isn't supported in binary "
|
| 240 |
+
"mode, the default buffer size will be used",
|
| 241 |
+
RuntimeWarning, 2)
|
| 242 |
+
raw = FileIO(file,
|
| 243 |
+
(creating and "x" or "") +
|
| 244 |
+
(reading and "r" or "") +
|
| 245 |
+
(writing and "w" or "") +
|
| 246 |
+
(appending and "a" or "") +
|
| 247 |
+
(updating and "+" or ""),
|
| 248 |
+
closefd, opener=opener)
|
| 249 |
+
result = raw
|
| 250 |
+
try:
|
| 251 |
+
line_buffering = False
|
| 252 |
+
if buffering == 1 or buffering < 0 and raw.isatty():
|
| 253 |
+
buffering = -1
|
| 254 |
+
line_buffering = True
|
| 255 |
+
if buffering < 0:
|
| 256 |
+
buffering = DEFAULT_BUFFER_SIZE
|
| 257 |
+
try:
|
| 258 |
+
bs = os.fstat(raw.fileno()).st_blksize
|
| 259 |
+
except (OSError, AttributeError):
|
| 260 |
+
pass
|
| 261 |
+
else:
|
| 262 |
+
if bs > 1:
|
| 263 |
+
buffering = bs
|
| 264 |
+
if buffering < 0:
|
| 265 |
+
raise ValueError("invalid buffering size")
|
| 266 |
+
if buffering == 0:
|
| 267 |
+
if binary:
|
| 268 |
+
return result
|
| 269 |
+
raise ValueError("can't have unbuffered text I/O")
|
| 270 |
+
if updating:
|
| 271 |
+
buffer = BufferedRandom(raw, buffering)
|
| 272 |
+
elif creating or writing or appending:
|
| 273 |
+
buffer = BufferedWriter(raw, buffering)
|
| 274 |
+
elif reading:
|
| 275 |
+
buffer = BufferedReader(raw, buffering)
|
| 276 |
+
else:
|
| 277 |
+
raise ValueError("unknown mode: %r" % mode)
|
| 278 |
+
result = buffer
|
| 279 |
+
if binary:
|
| 280 |
+
return result
|
| 281 |
+
encoding = text_encoding(encoding)
|
| 282 |
+
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
|
| 283 |
+
result = text
|
| 284 |
+
text.mode = mode
|
| 285 |
+
return result
|
| 286 |
+
except:
|
| 287 |
+
result.close()
|
| 288 |
+
raise
|
| 289 |
+
|
| 290 |
+
# Define a default pure-Python implementation for open_code()
|
| 291 |
+
# that does not allow hooks. Warn on first use. Defined for tests.
|
| 292 |
+
def _open_code_with_warning(path):
|
| 293 |
+
"""Opens the provided file with mode ``'rb'``. This function
|
| 294 |
+
should be used when the intent is to treat the contents as
|
| 295 |
+
executable code.
|
| 296 |
+
|
| 297 |
+
``path`` should be an absolute path.
|
| 298 |
+
|
| 299 |
+
When supported by the runtime, this function can be hooked
|
| 300 |
+
in order to allow embedders more control over code files.
|
| 301 |
+
This functionality is not supported on the current runtime.
|
| 302 |
+
"""
|
| 303 |
+
import warnings
|
| 304 |
+
warnings.warn("_pyio.open_code() may not be using hooks",
|
| 305 |
+
RuntimeWarning, 2)
|
| 306 |
+
return open(path, "rb")
|
| 307 |
+
|
| 308 |
+
try:
|
| 309 |
+
open_code = io.open_code
|
| 310 |
+
except AttributeError:
|
| 311 |
+
open_code = _open_code_with_warning
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def __getattr__(name):
|
| 315 |
+
if name == "OpenWrapper":
|
| 316 |
+
# bpo-43680: Until Python 3.9, _pyio.open was not a static method and
|
| 317 |
+
# builtins.open was set to OpenWrapper to not become a bound method
|
| 318 |
+
# when set to a class variable. _io.open is a built-in function whereas
|
| 319 |
+
# _pyio.open is a Python function. In Python 3.10, _pyio.open() is now
|
| 320 |
+
# a static method, and builtins.open() is now io.open().
|
| 321 |
+
import warnings
|
| 322 |
+
warnings.warn('OpenWrapper is deprecated, use open instead',
|
| 323 |
+
DeprecationWarning, stacklevel=2)
|
| 324 |
+
global OpenWrapper
|
| 325 |
+
OpenWrapper = open
|
| 326 |
+
return OpenWrapper
|
| 327 |
+
raise AttributeError(name)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
# In normal operation, both `UnsupportedOperation`s should be bound to the
|
| 331 |
+
# same object.
|
| 332 |
+
try:
|
| 333 |
+
UnsupportedOperation = io.UnsupportedOperation
|
| 334 |
+
except AttributeError:
|
| 335 |
+
class UnsupportedOperation(OSError, ValueError):
|
| 336 |
+
pass
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
class IOBase(metaclass=abc.ABCMeta):
|
| 340 |
+
|
| 341 |
+
"""The abstract base class for all I/O classes.
|
| 342 |
+
|
| 343 |
+
This class provides dummy implementations for many methods that
|
| 344 |
+
derived classes can override selectively; the default implementations
|
| 345 |
+
represent a file that cannot be read, written or seeked.
|
| 346 |
+
|
| 347 |
+
Even though IOBase does not declare read or write because
|
| 348 |
+
their signatures will vary, implementations and clients should
|
| 349 |
+
consider those methods part of the interface. Also, implementations
|
| 350 |
+
may raise UnsupportedOperation when operations they do not support are
|
| 351 |
+
called.
|
| 352 |
+
|
| 353 |
+
The basic type used for binary data read from or written to a file is
|
| 354 |
+
bytes. Other bytes-like objects are accepted as method arguments too.
|
| 355 |
+
Text I/O classes work with str data.
|
| 356 |
+
|
| 357 |
+
Note that calling any method (even inquiries) on a closed stream is
|
| 358 |
+
undefined. Implementations may raise OSError in this case.
|
| 359 |
+
|
| 360 |
+
IOBase (and its subclasses) support the iterator protocol, meaning
|
| 361 |
+
that an IOBase object can be iterated over yielding the lines in a
|
| 362 |
+
stream.
|
| 363 |
+
|
| 364 |
+
IOBase also supports the :keyword:`with` statement. In this example,
|
| 365 |
+
fp is closed after the suite of the with statement is complete:
|
| 366 |
+
|
| 367 |
+
with open('spam.txt', 'r') as fp:
|
| 368 |
+
fp.write('Spam and eggs!')
|
| 369 |
+
"""
|
| 370 |
+
|
| 371 |
+
### Internal ###
|
| 372 |
+
|
| 373 |
+
def _unsupported(self, name):
|
| 374 |
+
"""Internal: raise an OSError exception for unsupported operations."""
|
| 375 |
+
raise UnsupportedOperation("%s.%s() not supported" %
|
| 376 |
+
(self.__class__.__name__, name))
|
| 377 |
+
|
| 378 |
+
### Positioning ###
|
| 379 |
+
|
| 380 |
+
def seek(self, pos, whence=0):
|
| 381 |
+
"""Change stream position.
|
| 382 |
+
|
| 383 |
+
Change the stream position to byte offset pos. Argument pos is
|
| 384 |
+
interpreted relative to the position indicated by whence. Values
|
| 385 |
+
for whence are ints:
|
| 386 |
+
|
| 387 |
+
* 0 -- start of stream (the default); offset should be zero or positive
|
| 388 |
+
* 1 -- current stream position; offset may be negative
|
| 389 |
+
* 2 -- end of stream; offset is usually negative
|
| 390 |
+
Some operating systems / file systems could provide additional values.
|
| 391 |
+
|
| 392 |
+
Return an int indicating the new absolute position.
|
| 393 |
+
"""
|
| 394 |
+
self._unsupported("seek")
|
| 395 |
+
|
| 396 |
+
def tell(self):
|
| 397 |
+
"""Return an int indicating the current stream position."""
|
| 398 |
+
return self.seek(0, 1)
|
| 399 |
+
|
| 400 |
+
def truncate(self, pos=None):
|
| 401 |
+
"""Truncate file to size bytes.
|
| 402 |
+
|
| 403 |
+
Size defaults to the current IO position as reported by tell(). Return
|
| 404 |
+
the new size.
|
| 405 |
+
"""
|
| 406 |
+
self._unsupported("truncate")
|
| 407 |
+
|
| 408 |
+
### Flush and close ###
|
| 409 |
+
|
| 410 |
+
def flush(self):
|
| 411 |
+
"""Flush write buffers, if applicable.
|
| 412 |
+
|
| 413 |
+
This is not implemented for read-only and non-blocking streams.
|
| 414 |
+
"""
|
| 415 |
+
self._checkClosed()
|
| 416 |
+
# XXX Should this return the number of bytes written???
|
| 417 |
+
|
| 418 |
+
__closed = False
|
| 419 |
+
|
| 420 |
+
def close(self):
|
| 421 |
+
"""Flush and close the IO object.
|
| 422 |
+
|
| 423 |
+
This method has no effect if the file is already closed.
|
| 424 |
+
"""
|
| 425 |
+
if not self.__closed:
|
| 426 |
+
try:
|
| 427 |
+
self.flush()
|
| 428 |
+
finally:
|
| 429 |
+
self.__closed = True
|
| 430 |
+
|
| 431 |
+
def __del__(self):
|
| 432 |
+
"""Destructor. Calls close()."""
|
| 433 |
+
try:
|
| 434 |
+
closed = self.closed
|
| 435 |
+
except AttributeError:
|
| 436 |
+
# If getting closed fails, then the object is probably
|
| 437 |
+
# in an unusable state, so ignore.
|
| 438 |
+
return
|
| 439 |
+
|
| 440 |
+
if closed:
|
| 441 |
+
return
|
| 442 |
+
|
| 443 |
+
if _IOBASE_EMITS_UNRAISABLE:
|
| 444 |
+
self.close()
|
| 445 |
+
else:
|
| 446 |
+
# The try/except block is in case this is called at program
|
| 447 |
+
# exit time, when it's possible that globals have already been
|
| 448 |
+
# deleted, and then the close() call might fail. Since
|
| 449 |
+
# there's nothing we can do about such failures and they annoy
|
| 450 |
+
# the end users, we suppress the traceback.
|
| 451 |
+
try:
|
| 452 |
+
self.close()
|
| 453 |
+
except:
|
| 454 |
+
pass
|
| 455 |
+
|
| 456 |
+
### Inquiries ###
|
| 457 |
+
|
| 458 |
+
def seekable(self):
|
| 459 |
+
"""Return a bool indicating whether object supports random access.
|
| 460 |
+
|
| 461 |
+
If False, seek(), tell() and truncate() will raise OSError.
|
| 462 |
+
This method may need to do a test seek().
|
| 463 |
+
"""
|
| 464 |
+
return False
|
| 465 |
+
|
| 466 |
+
def _checkSeekable(self, msg=None):
|
| 467 |
+
"""Internal: raise UnsupportedOperation if file is not seekable
|
| 468 |
+
"""
|
| 469 |
+
if not self.seekable():
|
| 470 |
+
raise UnsupportedOperation("File or stream is not seekable."
|
| 471 |
+
if msg is None else msg)
|
| 472 |
+
|
| 473 |
+
def readable(self):
|
| 474 |
+
"""Return a bool indicating whether object was opened for reading.
|
| 475 |
+
|
| 476 |
+
If False, read() will raise OSError.
|
| 477 |
+
"""
|
| 478 |
+
return False
|
| 479 |
+
|
| 480 |
+
def _checkReadable(self, msg=None):
|
| 481 |
+
"""Internal: raise UnsupportedOperation if file is not readable
|
| 482 |
+
"""
|
| 483 |
+
if not self.readable():
|
| 484 |
+
raise UnsupportedOperation("File or stream is not readable."
|
| 485 |
+
if msg is None else msg)
|
| 486 |
+
|
| 487 |
+
def writable(self):
|
| 488 |
+
"""Return a bool indicating whether object was opened for writing.
|
| 489 |
+
|
| 490 |
+
If False, write() and truncate() will raise OSError.
|
| 491 |
+
"""
|
| 492 |
+
return False
|
| 493 |
+
|
| 494 |
+
def _checkWritable(self, msg=None):
|
| 495 |
+
"""Internal: raise UnsupportedOperation if file is not writable
|
| 496 |
+
"""
|
| 497 |
+
if not self.writable():
|
| 498 |
+
raise UnsupportedOperation("File or stream is not writable."
|
| 499 |
+
if msg is None else msg)
|
| 500 |
+
|
| 501 |
+
@property
|
| 502 |
+
def closed(self):
|
| 503 |
+
"""closed: bool. True iff the file has been closed.
|
| 504 |
+
|
| 505 |
+
For backwards compatibility, this is a property, not a predicate.
|
| 506 |
+
"""
|
| 507 |
+
return self.__closed
|
| 508 |
+
|
| 509 |
+
def _checkClosed(self, msg=None):
|
| 510 |
+
"""Internal: raise a ValueError if file is closed
|
| 511 |
+
"""
|
| 512 |
+
if self.closed:
|
| 513 |
+
raise ValueError("I/O operation on closed file."
|
| 514 |
+
if msg is None else msg)
|
| 515 |
+
|
| 516 |
+
### Context manager ###
|
| 517 |
+
|
| 518 |
+
def __enter__(self): # That's a forward reference
|
| 519 |
+
"""Context management protocol. Returns self (an instance of IOBase)."""
|
| 520 |
+
self._checkClosed()
|
| 521 |
+
return self
|
| 522 |
+
|
| 523 |
+
def __exit__(self, *args):
|
| 524 |
+
"""Context management protocol. Calls close()"""
|
| 525 |
+
self.close()
|
| 526 |
+
|
| 527 |
+
### Lower-level APIs ###
|
| 528 |
+
|
| 529 |
+
# XXX Should these be present even if unimplemented?
|
| 530 |
+
|
| 531 |
+
def fileno(self):
|
| 532 |
+
"""Returns underlying file descriptor (an int) if one exists.
|
| 533 |
+
|
| 534 |
+
An OSError is raised if the IO object does not use a file descriptor.
|
| 535 |
+
"""
|
| 536 |
+
self._unsupported("fileno")
|
| 537 |
+
|
| 538 |
+
def isatty(self):
|
| 539 |
+
"""Return a bool indicating whether this is an 'interactive' stream.
|
| 540 |
+
|
| 541 |
+
Return False if it can't be determined.
|
| 542 |
+
"""
|
| 543 |
+
self._checkClosed()
|
| 544 |
+
return False
|
| 545 |
+
|
| 546 |
+
### Readline[s] and writelines ###
|
| 547 |
+
|
| 548 |
+
def readline(self, size=-1):
|
| 549 |
+
r"""Read and return a line of bytes from the stream.
|
| 550 |
+
|
| 551 |
+
If size is specified, at most size bytes will be read.
|
| 552 |
+
Size should be an int.
|
| 553 |
+
|
| 554 |
+
The line terminator is always b'\n' for binary files; for text
|
| 555 |
+
files, the newlines argument to open can be used to select the line
|
| 556 |
+
terminator(s) recognized.
|
| 557 |
+
"""
|
| 558 |
+
# For backwards compatibility, a (slowish) readline().
|
| 559 |
+
if hasattr(self, "peek"):
|
| 560 |
+
def nreadahead():
|
| 561 |
+
readahead = self.peek(1)
|
| 562 |
+
if not readahead:
|
| 563 |
+
return 1
|
| 564 |
+
n = (readahead.find(b"\n") + 1) or len(readahead)
|
| 565 |
+
if size >= 0:
|
| 566 |
+
n = min(n, size)
|
| 567 |
+
return n
|
| 568 |
+
else:
|
| 569 |
+
def nreadahead():
|
| 570 |
+
return 1
|
| 571 |
+
if size is None:
|
| 572 |
+
size = -1
|
| 573 |
+
else:
|
| 574 |
+
try:
|
| 575 |
+
size_index = size.__index__
|
| 576 |
+
except AttributeError:
|
| 577 |
+
raise TypeError(f"{size!r} is not an integer")
|
| 578 |
+
else:
|
| 579 |
+
size = size_index()
|
| 580 |
+
res = bytearray()
|
| 581 |
+
while size < 0 or len(res) < size:
|
| 582 |
+
b = self.read(nreadahead())
|
| 583 |
+
if not b:
|
| 584 |
+
break
|
| 585 |
+
res += b
|
| 586 |
+
if res.endswith(b"\n"):
|
| 587 |
+
break
|
| 588 |
+
return bytes(res)
|
| 589 |
+
|
| 590 |
+
def __iter__(self):
|
| 591 |
+
self._checkClosed()
|
| 592 |
+
return self
|
| 593 |
+
|
| 594 |
+
def __next__(self):
|
| 595 |
+
line = self.readline()
|
| 596 |
+
if not line:
|
| 597 |
+
raise StopIteration
|
| 598 |
+
return line
|
| 599 |
+
|
| 600 |
+
def readlines(self, hint=None):
|
| 601 |
+
"""Return a list of lines from the stream.
|
| 602 |
+
|
| 603 |
+
hint can be specified to control the number of lines read: no more
|
| 604 |
+
lines will be read if the total size (in bytes/characters) of all
|
| 605 |
+
lines so far exceeds hint.
|
| 606 |
+
"""
|
| 607 |
+
if hint is None or hint <= 0:
|
| 608 |
+
return list(self)
|
| 609 |
+
n = 0
|
| 610 |
+
lines = []
|
| 611 |
+
for line in self:
|
| 612 |
+
lines.append(line)
|
| 613 |
+
n += len(line)
|
| 614 |
+
if n >= hint:
|
| 615 |
+
break
|
| 616 |
+
return lines
|
| 617 |
+
|
| 618 |
+
def writelines(self, lines):
|
| 619 |
+
"""Write a list of lines to the stream.
|
| 620 |
+
|
| 621 |
+
Line separators are not added, so it is usual for each of the lines
|
| 622 |
+
provided to have a line separator at the end.
|
| 623 |
+
"""
|
| 624 |
+
self._checkClosed()
|
| 625 |
+
for line in lines:
|
| 626 |
+
self.write(line)
|
| 627 |
+
|
| 628 |
+
io.IOBase.register(IOBase)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
class RawIOBase(IOBase):
|
| 632 |
+
|
| 633 |
+
"""Base class for raw binary I/O."""
|
| 634 |
+
|
| 635 |
+
# The read() method is implemented by calling readinto(); derived
|
| 636 |
+
# classes that want to support read() only need to implement
|
| 637 |
+
# readinto() as a primitive operation. In general, readinto() can be
|
| 638 |
+
# more efficient than read().
|
| 639 |
+
|
| 640 |
+
# (It would be tempting to also provide an implementation of
|
| 641 |
+
# readinto() in terms of read(), in case the latter is a more suitable
|
| 642 |
+
# primitive operation, but that would lead to nasty recursion in case
|
| 643 |
+
# a subclass doesn't implement either.)
|
| 644 |
+
|
| 645 |
+
def read(self, size=-1):
|
| 646 |
+
"""Read and return up to size bytes, where size is an int.
|
| 647 |
+
|
| 648 |
+
Returns an empty bytes object on EOF, or None if the object is
|
| 649 |
+
set not to block and has no data to read.
|
| 650 |
+
"""
|
| 651 |
+
if size is None:
|
| 652 |
+
size = -1
|
| 653 |
+
if size < 0:
|
| 654 |
+
return self.readall()
|
| 655 |
+
b = bytearray(size.__index__())
|
| 656 |
+
n = self.readinto(b)
|
| 657 |
+
if n is None:
|
| 658 |
+
return None
|
| 659 |
+
del b[n:]
|
| 660 |
+
return bytes(b)
|
| 661 |
+
|
| 662 |
+
def readall(self):
|
| 663 |
+
"""Read until EOF, using multiple read() call."""
|
| 664 |
+
res = bytearray()
|
| 665 |
+
while True:
|
| 666 |
+
data = self.read(DEFAULT_BUFFER_SIZE)
|
| 667 |
+
if not data:
|
| 668 |
+
break
|
| 669 |
+
res += data
|
| 670 |
+
if res:
|
| 671 |
+
return bytes(res)
|
| 672 |
+
else:
|
| 673 |
+
# b'' or None
|
| 674 |
+
return data
|
| 675 |
+
|
| 676 |
+
def readinto(self, b):
|
| 677 |
+
"""Read bytes into a pre-allocated bytes-like object b.
|
| 678 |
+
|
| 679 |
+
Returns an int representing the number of bytes read (0 for EOF), or
|
| 680 |
+
None if the object is set not to block and has no data to read.
|
| 681 |
+
"""
|
| 682 |
+
self._unsupported("readinto")
|
| 683 |
+
|
| 684 |
+
def write(self, b):
|
| 685 |
+
"""Write the given buffer to the IO stream.
|
| 686 |
+
|
| 687 |
+
Returns the number of bytes written, which may be less than the
|
| 688 |
+
length of b in bytes.
|
| 689 |
+
"""
|
| 690 |
+
self._unsupported("write")
|
| 691 |
+
|
| 692 |
+
io.RawIOBase.register(RawIOBase)
|
| 693 |
+
from _io import FileIO
|
| 694 |
+
RawIOBase.register(FileIO)
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
class BufferedIOBase(IOBase):
|
| 698 |
+
|
| 699 |
+
"""Base class for buffered IO objects.
|
| 700 |
+
|
| 701 |
+
The main difference with RawIOBase is that the read() method
|
| 702 |
+
supports omitting the size argument, and does not have a default
|
| 703 |
+
implementation that defers to readinto().
|
| 704 |
+
|
| 705 |
+
In addition, read(), readinto() and write() may raise
|
| 706 |
+
BlockingIOError if the underlying raw stream is in non-blocking
|
| 707 |
+
mode and not ready; unlike their raw counterparts, they will never
|
| 708 |
+
return None.
|
| 709 |
+
|
| 710 |
+
A typical implementation should not inherit from a RawIOBase
|
| 711 |
+
implementation, but wrap one.
|
| 712 |
+
"""
|
| 713 |
+
|
| 714 |
+
def read(self, size=-1):
|
| 715 |
+
"""Read and return up to size bytes, where size is an int.
|
| 716 |
+
|
| 717 |
+
If the argument is omitted, None, or negative, reads and
|
| 718 |
+
returns all data until EOF.
|
| 719 |
+
|
| 720 |
+
If the argument is positive, and the underlying raw stream is
|
| 721 |
+
not 'interactive', multiple raw reads may be issued to satisfy
|
| 722 |
+
the byte count (unless EOF is reached first). But for
|
| 723 |
+
interactive raw streams (XXX and for pipes?), at most one raw
|
| 724 |
+
read will be issued, and a short result does not imply that
|
| 725 |
+
EOF is imminent.
|
| 726 |
+
|
| 727 |
+
Returns an empty bytes array on EOF.
|
| 728 |
+
|
| 729 |
+
Raises BlockingIOError if the underlying raw stream has no
|
| 730 |
+
data at the moment.
|
| 731 |
+
"""
|
| 732 |
+
self._unsupported("read")
|
| 733 |
+
|
| 734 |
+
def read1(self, size=-1):
|
| 735 |
+
"""Read up to size bytes with at most one read() system call,
|
| 736 |
+
where size is an int.
|
| 737 |
+
"""
|
| 738 |
+
self._unsupported("read1")
|
| 739 |
+
|
| 740 |
+
def readinto(self, b):
|
| 741 |
+
"""Read bytes into a pre-allocated bytes-like object b.
|
| 742 |
+
|
| 743 |
+
Like read(), this may issue multiple reads to the underlying raw
|
| 744 |
+
stream, unless the latter is 'interactive'.
|
| 745 |
+
|
| 746 |
+
Returns an int representing the number of bytes read (0 for EOF).
|
| 747 |
+
|
| 748 |
+
Raises BlockingIOError if the underlying raw stream has no
|
| 749 |
+
data at the moment.
|
| 750 |
+
"""
|
| 751 |
+
|
| 752 |
+
return self._readinto(b, read1=False)
|
| 753 |
+
|
| 754 |
+
def readinto1(self, b):
|
| 755 |
+
"""Read bytes into buffer *b*, using at most one system call
|
| 756 |
+
|
| 757 |
+
Returns an int representing the number of bytes read (0 for EOF).
|
| 758 |
+
|
| 759 |
+
Raises BlockingIOError if the underlying raw stream has no
|
| 760 |
+
data at the moment.
|
| 761 |
+
"""
|
| 762 |
+
|
| 763 |
+
return self._readinto(b, read1=True)
|
| 764 |
+
|
| 765 |
+
def _readinto(self, b, read1):
|
| 766 |
+
if not isinstance(b, memoryview):
|
| 767 |
+
b = memoryview(b)
|
| 768 |
+
b = b.cast('B')
|
| 769 |
+
|
| 770 |
+
if read1:
|
| 771 |
+
data = self.read1(len(b))
|
| 772 |
+
else:
|
| 773 |
+
data = self.read(len(b))
|
| 774 |
+
n = len(data)
|
| 775 |
+
|
| 776 |
+
b[:n] = data
|
| 777 |
+
|
| 778 |
+
return n
|
| 779 |
+
|
| 780 |
+
def write(self, b):
|
| 781 |
+
"""Write the given bytes buffer to the IO stream.
|
| 782 |
+
|
| 783 |
+
Return the number of bytes written, which is always the length of b
|
| 784 |
+
in bytes.
|
| 785 |
+
|
| 786 |
+
Raises BlockingIOError if the buffer is full and the
|
| 787 |
+
underlying raw stream cannot accept more data at the moment.
|
| 788 |
+
"""
|
| 789 |
+
self._unsupported("write")
|
| 790 |
+
|
| 791 |
+
def detach(self):
|
| 792 |
+
"""
|
| 793 |
+
Separate the underlying raw stream from the buffer and return it.
|
| 794 |
+
|
| 795 |
+
After the raw stream has been detached, the buffer is in an unusable
|
| 796 |
+
state.
|
| 797 |
+
"""
|
| 798 |
+
self._unsupported("detach")
|
| 799 |
+
|
| 800 |
+
io.BufferedIOBase.register(BufferedIOBase)
|
| 801 |
+
|
| 802 |
+
|
| 803 |
+
class _BufferedIOMixin(BufferedIOBase):
|
| 804 |
+
|
| 805 |
+
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
|
| 806 |
+
|
| 807 |
+
This passes most requests on to the underlying raw stream. It
|
| 808 |
+
does *not* provide implementations of read(), readinto() or
|
| 809 |
+
write().
|
| 810 |
+
"""
|
| 811 |
+
|
| 812 |
+
def __init__(self, raw):
|
| 813 |
+
self._raw = raw
|
| 814 |
+
|
| 815 |
+
### Positioning ###
|
| 816 |
+
|
| 817 |
+
def seek(self, pos, whence=0):
|
| 818 |
+
new_position = self.raw.seek(pos, whence)
|
| 819 |
+
if new_position < 0:
|
| 820 |
+
raise OSError("seek() returned an invalid position")
|
| 821 |
+
return new_position
|
| 822 |
+
|
| 823 |
+
def tell(self):
|
| 824 |
+
pos = self.raw.tell()
|
| 825 |
+
if pos < 0:
|
| 826 |
+
raise OSError("tell() returned an invalid position")
|
| 827 |
+
return pos
|
| 828 |
+
|
| 829 |
+
def truncate(self, pos=None):
|
| 830 |
+
self._checkClosed()
|
| 831 |
+
self._checkWritable()
|
| 832 |
+
|
| 833 |
+
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
|
| 834 |
+
# and a flush may be necessary to synch both views of the current
|
| 835 |
+
# file state.
|
| 836 |
+
self.flush()
|
| 837 |
+
|
| 838 |
+
if pos is None:
|
| 839 |
+
pos = self.tell()
|
| 840 |
+
# XXX: Should seek() be used, instead of passing the position
|
| 841 |
+
# XXX directly to truncate?
|
| 842 |
+
return self.raw.truncate(pos)
|
| 843 |
+
|
| 844 |
+
### Flush and close ###
|
| 845 |
+
|
| 846 |
+
def flush(self):
|
| 847 |
+
if self.closed:
|
| 848 |
+
raise ValueError("flush on closed file")
|
| 849 |
+
self.raw.flush()
|
| 850 |
+
|
| 851 |
+
def close(self):
|
| 852 |
+
if self.raw is not None and not self.closed:
|
| 853 |
+
try:
|
| 854 |
+
# may raise BlockingIOError or BrokenPipeError etc
|
| 855 |
+
self.flush()
|
| 856 |
+
finally:
|
| 857 |
+
self.raw.close()
|
| 858 |
+
|
| 859 |
+
def detach(self):
|
| 860 |
+
if self.raw is None:
|
| 861 |
+
raise ValueError("raw stream already detached")
|
| 862 |
+
self.flush()
|
| 863 |
+
raw = self._raw
|
| 864 |
+
self._raw = None
|
| 865 |
+
return raw
|
| 866 |
+
|
| 867 |
+
### Inquiries ###
|
| 868 |
+
|
| 869 |
+
def seekable(self):
|
| 870 |
+
return self.raw.seekable()
|
| 871 |
+
|
| 872 |
+
@property
|
| 873 |
+
def raw(self):
|
| 874 |
+
return self._raw
|
| 875 |
+
|
| 876 |
+
@property
|
| 877 |
+
def closed(self):
|
| 878 |
+
return self.raw.closed
|
| 879 |
+
|
| 880 |
+
@property
|
| 881 |
+
def name(self):
|
| 882 |
+
return self.raw.name
|
| 883 |
+
|
| 884 |
+
@property
|
| 885 |
+
def mode(self):
|
| 886 |
+
return self.raw.mode
|
| 887 |
+
|
| 888 |
+
def __getstate__(self):
|
| 889 |
+
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
|
| 890 |
+
|
| 891 |
+
def __repr__(self):
|
| 892 |
+
modname = self.__class__.__module__
|
| 893 |
+
clsname = self.__class__.__qualname__
|
| 894 |
+
try:
|
| 895 |
+
name = self.name
|
| 896 |
+
except AttributeError:
|
| 897 |
+
return "<{}.{}>".format(modname, clsname)
|
| 898 |
+
else:
|
| 899 |
+
return "<{}.{} name={!r}>".format(modname, clsname, name)
|
| 900 |
+
|
| 901 |
+
### Lower-level APIs ###
|
| 902 |
+
|
| 903 |
+
def fileno(self):
|
| 904 |
+
return self.raw.fileno()
|
| 905 |
+
|
| 906 |
+
def isatty(self):
|
| 907 |
+
return self.raw.isatty()
|
| 908 |
+
|
| 909 |
+
|
| 910 |
+
class BytesIO(BufferedIOBase):
|
| 911 |
+
|
| 912 |
+
"""Buffered I/O implementation using an in-memory bytes buffer."""
|
| 913 |
+
|
| 914 |
+
# Initialize _buffer as soon as possible since it's used by __del__()
|
| 915 |
+
# which calls close()
|
| 916 |
+
_buffer = None
|
| 917 |
+
|
| 918 |
+
def __init__(self, initial_bytes=None):
|
| 919 |
+
buf = bytearray()
|
| 920 |
+
if initial_bytes is not None:
|
| 921 |
+
buf += initial_bytes
|
| 922 |
+
self._buffer = buf
|
| 923 |
+
self._pos = 0
|
| 924 |
+
|
| 925 |
+
def __getstate__(self):
|
| 926 |
+
if self.closed:
|
| 927 |
+
raise ValueError("__getstate__ on closed file")
|
| 928 |
+
return self.__dict__.copy()
|
| 929 |
+
|
| 930 |
+
def getvalue(self):
|
| 931 |
+
"""Return the bytes value (contents) of the buffer
|
| 932 |
+
"""
|
| 933 |
+
if self.closed:
|
| 934 |
+
raise ValueError("getvalue on closed file")
|
| 935 |
+
return bytes(self._buffer)
|
| 936 |
+
|
| 937 |
+
def getbuffer(self):
|
| 938 |
+
"""Return a readable and writable view of the buffer.
|
| 939 |
+
"""
|
| 940 |
+
if self.closed:
|
| 941 |
+
raise ValueError("getbuffer on closed file")
|
| 942 |
+
return memoryview(self._buffer)
|
| 943 |
+
|
| 944 |
+
def close(self):
|
| 945 |
+
if self._buffer is not None:
|
| 946 |
+
self._buffer.clear()
|
| 947 |
+
super().close()
|
| 948 |
+
|
| 949 |
+
def read(self, size=-1):
|
| 950 |
+
if self.closed:
|
| 951 |
+
raise ValueError("read from closed file")
|
| 952 |
+
if size is None:
|
| 953 |
+
size = -1
|
| 954 |
+
else:
|
| 955 |
+
try:
|
| 956 |
+
size_index = size.__index__
|
| 957 |
+
except AttributeError:
|
| 958 |
+
raise TypeError(f"{size!r} is not an integer")
|
| 959 |
+
else:
|
| 960 |
+
size = size_index()
|
| 961 |
+
if size < 0:
|
| 962 |
+
size = len(self._buffer)
|
| 963 |
+
if len(self._buffer) <= self._pos:
|
| 964 |
+
return b""
|
| 965 |
+
newpos = min(len(self._buffer), self._pos + size)
|
| 966 |
+
b = self._buffer[self._pos : newpos]
|
| 967 |
+
self._pos = newpos
|
| 968 |
+
return bytes(b)
|
| 969 |
+
|
| 970 |
+
def read1(self, size=-1):
|
| 971 |
+
"""This is the same as read.
|
| 972 |
+
"""
|
| 973 |
+
return self.read(size)
|
| 974 |
+
|
| 975 |
+
def write(self, b):
|
| 976 |
+
if self.closed:
|
| 977 |
+
raise ValueError("write to closed file")
|
| 978 |
+
if isinstance(b, str):
|
| 979 |
+
raise TypeError("can't write str to binary stream")
|
| 980 |
+
with memoryview(b) as view:
|
| 981 |
+
n = view.nbytes # Size of any bytes-like object
|
| 982 |
+
if n == 0:
|
| 983 |
+
return 0
|
| 984 |
+
pos = self._pos
|
| 985 |
+
if pos > len(self._buffer):
|
| 986 |
+
# Inserts null bytes between the current end of the file
|
| 987 |
+
# and the new write position.
|
| 988 |
+
padding = b'\x00' * (pos - len(self._buffer))
|
| 989 |
+
self._buffer += padding
|
| 990 |
+
self._buffer[pos:pos + n] = b
|
| 991 |
+
self._pos += n
|
| 992 |
+
return n
|
| 993 |
+
|
| 994 |
+
def seek(self, pos, whence=0):
|
| 995 |
+
if self.closed:
|
| 996 |
+
raise ValueError("seek on closed file")
|
| 997 |
+
try:
|
| 998 |
+
pos_index = pos.__index__
|
| 999 |
+
except AttributeError:
|
| 1000 |
+
raise TypeError(f"{pos!r} is not an integer")
|
| 1001 |
+
else:
|
| 1002 |
+
pos = pos_index()
|
| 1003 |
+
if whence == 0:
|
| 1004 |
+
if pos < 0:
|
| 1005 |
+
raise ValueError("negative seek position %r" % (pos,))
|
| 1006 |
+
self._pos = pos
|
| 1007 |
+
elif whence == 1:
|
| 1008 |
+
self._pos = max(0, self._pos + pos)
|
| 1009 |
+
elif whence == 2:
|
| 1010 |
+
self._pos = max(0, len(self._buffer) + pos)
|
| 1011 |
+
else:
|
| 1012 |
+
raise ValueError("unsupported whence value")
|
| 1013 |
+
return self._pos
|
| 1014 |
+
|
| 1015 |
+
def tell(self):
|
| 1016 |
+
if self.closed:
|
| 1017 |
+
raise ValueError("tell on closed file")
|
| 1018 |
+
return self._pos
|
| 1019 |
+
|
| 1020 |
+
def truncate(self, pos=None):
|
| 1021 |
+
if self.closed:
|
| 1022 |
+
raise ValueError("truncate on closed file")
|
| 1023 |
+
if pos is None:
|
| 1024 |
+
pos = self._pos
|
| 1025 |
+
else:
|
| 1026 |
+
try:
|
| 1027 |
+
pos_index = pos.__index__
|
| 1028 |
+
except AttributeError:
|
| 1029 |
+
raise TypeError(f"{pos!r} is not an integer")
|
| 1030 |
+
else:
|
| 1031 |
+
pos = pos_index()
|
| 1032 |
+
if pos < 0:
|
| 1033 |
+
raise ValueError("negative truncate position %r" % (pos,))
|
| 1034 |
+
del self._buffer[pos:]
|
| 1035 |
+
return pos
|
| 1036 |
+
|
| 1037 |
+
def readable(self):
|
| 1038 |
+
if self.closed:
|
| 1039 |
+
raise ValueError("I/O operation on closed file.")
|
| 1040 |
+
return True
|
| 1041 |
+
|
| 1042 |
+
def writable(self):
|
| 1043 |
+
if self.closed:
|
| 1044 |
+
raise ValueError("I/O operation on closed file.")
|
| 1045 |
+
return True
|
| 1046 |
+
|
| 1047 |
+
def seekable(self):
|
| 1048 |
+
if self.closed:
|
| 1049 |
+
raise ValueError("I/O operation on closed file.")
|
| 1050 |
+
return True
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
class BufferedReader(_BufferedIOMixin):
|
| 1054 |
+
|
| 1055 |
+
"""BufferedReader(raw[, buffer_size])
|
| 1056 |
+
|
| 1057 |
+
A buffer for a readable, sequential BaseRawIO object.
|
| 1058 |
+
|
| 1059 |
+
The constructor creates a BufferedReader for the given readable raw
|
| 1060 |
+
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
|
| 1061 |
+
is used.
|
| 1062 |
+
"""
|
| 1063 |
+
|
| 1064 |
+
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
|
| 1065 |
+
"""Create a new buffered reader using the given readable raw IO object.
|
| 1066 |
+
"""
|
| 1067 |
+
if not raw.readable():
|
| 1068 |
+
raise OSError('"raw" argument must be readable.')
|
| 1069 |
+
|
| 1070 |
+
_BufferedIOMixin.__init__(self, raw)
|
| 1071 |
+
if buffer_size <= 0:
|
| 1072 |
+
raise ValueError("invalid buffer size")
|
| 1073 |
+
self.buffer_size = buffer_size
|
| 1074 |
+
self._reset_read_buf()
|
| 1075 |
+
self._read_lock = Lock()
|
| 1076 |
+
|
| 1077 |
+
def readable(self):
|
| 1078 |
+
return self.raw.readable()
|
| 1079 |
+
|
| 1080 |
+
def _reset_read_buf(self):
|
| 1081 |
+
self._read_buf = b""
|
| 1082 |
+
self._read_pos = 0
|
| 1083 |
+
|
| 1084 |
+
def read(self, size=None):
|
| 1085 |
+
"""Read size bytes.
|
| 1086 |
+
|
| 1087 |
+
Returns exactly size bytes of data unless the underlying raw IO
|
| 1088 |
+
stream reaches EOF or if the call would block in non-blocking
|
| 1089 |
+
mode. If size is negative, read until EOF or until read() would
|
| 1090 |
+
block.
|
| 1091 |
+
"""
|
| 1092 |
+
if size is not None and size < -1:
|
| 1093 |
+
raise ValueError("invalid number of bytes to read")
|
| 1094 |
+
with self._read_lock:
|
| 1095 |
+
return self._read_unlocked(size)
|
| 1096 |
+
|
| 1097 |
+
def _read_unlocked(self, n=None):
|
| 1098 |
+
nodata_val = b""
|
| 1099 |
+
empty_values = (b"", None)
|
| 1100 |
+
buf = self._read_buf
|
| 1101 |
+
pos = self._read_pos
|
| 1102 |
+
|
| 1103 |
+
# Special case for when the number of bytes to read is unspecified.
|
| 1104 |
+
if n is None or n == -1:
|
| 1105 |
+
self._reset_read_buf()
|
| 1106 |
+
if hasattr(self.raw, 'readall'):
|
| 1107 |
+
chunk = self.raw.readall()
|
| 1108 |
+
if chunk is None:
|
| 1109 |
+
return buf[pos:] or None
|
| 1110 |
+
else:
|
| 1111 |
+
return buf[pos:] + chunk
|
| 1112 |
+
chunks = [buf[pos:]] # Strip the consumed bytes.
|
| 1113 |
+
current_size = 0
|
| 1114 |
+
while True:
|
| 1115 |
+
# Read until EOF or until read() would block.
|
| 1116 |
+
chunk = self.raw.read()
|
| 1117 |
+
if chunk in empty_values:
|
| 1118 |
+
nodata_val = chunk
|
| 1119 |
+
break
|
| 1120 |
+
current_size += len(chunk)
|
| 1121 |
+
chunks.append(chunk)
|
| 1122 |
+
return b"".join(chunks) or nodata_val
|
| 1123 |
+
|
| 1124 |
+
# The number of bytes to read is specified, return at most n bytes.
|
| 1125 |
+
avail = len(buf) - pos # Length of the available buffered data.
|
| 1126 |
+
if n <= avail:
|
| 1127 |
+
# Fast path: the data to read is fully buffered.
|
| 1128 |
+
self._read_pos += n
|
| 1129 |
+
return buf[pos:pos+n]
|
| 1130 |
+
# Slow path: read from the stream until enough bytes are read,
|
| 1131 |
+
# or until an EOF occurs or until read() would block.
|
| 1132 |
+
chunks = [buf[pos:]]
|
| 1133 |
+
wanted = max(self.buffer_size, n)
|
| 1134 |
+
while avail < n:
|
| 1135 |
+
chunk = self.raw.read(wanted)
|
| 1136 |
+
if chunk in empty_values:
|
| 1137 |
+
nodata_val = chunk
|
| 1138 |
+
break
|
| 1139 |
+
avail += len(chunk)
|
| 1140 |
+
chunks.append(chunk)
|
| 1141 |
+
# n is more than avail only when an EOF occurred or when
|
| 1142 |
+
# read() would have blocked.
|
| 1143 |
+
n = min(n, avail)
|
| 1144 |
+
out = b"".join(chunks)
|
| 1145 |
+
self._read_buf = out[n:] # Save the extra data in the buffer.
|
| 1146 |
+
self._read_pos = 0
|
| 1147 |
+
return out[:n] if out else nodata_val
|
| 1148 |
+
|
| 1149 |
+
def peek(self, size=0):
|
| 1150 |
+
"""Returns buffered bytes without advancing the position.
|
| 1151 |
+
|
| 1152 |
+
The argument indicates a desired minimal number of bytes; we
|
| 1153 |
+
do at most one raw read to satisfy it. We never return more
|
| 1154 |
+
than self.buffer_size.
|
| 1155 |
+
"""
|
| 1156 |
+
with self._read_lock:
|
| 1157 |
+
return self._peek_unlocked(size)
|
| 1158 |
+
|
| 1159 |
+
def _peek_unlocked(self, n=0):
|
| 1160 |
+
want = min(n, self.buffer_size)
|
| 1161 |
+
have = len(self._read_buf) - self._read_pos
|
| 1162 |
+
if have < want or have <= 0:
|
| 1163 |
+
to_read = self.buffer_size - have
|
| 1164 |
+
current = self.raw.read(to_read)
|
| 1165 |
+
if current:
|
| 1166 |
+
self._read_buf = self._read_buf[self._read_pos:] + current
|
| 1167 |
+
self._read_pos = 0
|
| 1168 |
+
return self._read_buf[self._read_pos:]
|
| 1169 |
+
|
| 1170 |
+
def read1(self, size=-1):
|
| 1171 |
+
"""Reads up to size bytes, with at most one read() system call."""
|
| 1172 |
+
# Returns up to size bytes. If at least one byte is buffered, we
|
| 1173 |
+
# only return buffered bytes. Otherwise, we do one raw read.
|
| 1174 |
+
if size < 0:
|
| 1175 |
+
size = self.buffer_size
|
| 1176 |
+
if size == 0:
|
| 1177 |
+
return b""
|
| 1178 |
+
with self._read_lock:
|
| 1179 |
+
self._peek_unlocked(1)
|
| 1180 |
+
return self._read_unlocked(
|
| 1181 |
+
min(size, len(self._read_buf) - self._read_pos))
|
| 1182 |
+
|
| 1183 |
+
# Implementing readinto() and readinto1() is not strictly necessary (we
|
| 1184 |
+
# could rely on the base class that provides an implementation in terms of
|
| 1185 |
+
# read() and read1()). We do it anyway to keep the _pyio implementation
|
| 1186 |
+
# similar to the io implementation (which implements the methods for
|
| 1187 |
+
# performance reasons).
|
| 1188 |
+
def _readinto(self, buf, read1):
|
| 1189 |
+
"""Read data into *buf* with at most one system call."""
|
| 1190 |
+
|
| 1191 |
+
# Need to create a memoryview object of type 'b', otherwise
|
| 1192 |
+
# we may not be able to assign bytes to it, and slicing it
|
| 1193 |
+
# would create a new object.
|
| 1194 |
+
if not isinstance(buf, memoryview):
|
| 1195 |
+
buf = memoryview(buf)
|
| 1196 |
+
if buf.nbytes == 0:
|
| 1197 |
+
return 0
|
| 1198 |
+
buf = buf.cast('B')
|
| 1199 |
+
|
| 1200 |
+
written = 0
|
| 1201 |
+
with self._read_lock:
|
| 1202 |
+
while written < len(buf):
|
| 1203 |
+
|
| 1204 |
+
# First try to read from internal buffer
|
| 1205 |
+
avail = min(len(self._read_buf) - self._read_pos, len(buf))
|
| 1206 |
+
if avail:
|
| 1207 |
+
buf[written:written+avail] = \
|
| 1208 |
+
self._read_buf[self._read_pos:self._read_pos+avail]
|
| 1209 |
+
self._read_pos += avail
|
| 1210 |
+
written += avail
|
| 1211 |
+
if written == len(buf):
|
| 1212 |
+
break
|
| 1213 |
+
|
| 1214 |
+
# If remaining space in callers buffer is larger than
|
| 1215 |
+
# internal buffer, read directly into callers buffer
|
| 1216 |
+
if len(buf) - written > self.buffer_size:
|
| 1217 |
+
n = self.raw.readinto(buf[written:])
|
| 1218 |
+
if not n:
|
| 1219 |
+
break # eof
|
| 1220 |
+
written += n
|
| 1221 |
+
|
| 1222 |
+
# Otherwise refill internal buffer - unless we're
|
| 1223 |
+
# in read1 mode and already got some data
|
| 1224 |
+
elif not (read1 and written):
|
| 1225 |
+
if not self._peek_unlocked(1):
|
| 1226 |
+
break # eof
|
| 1227 |
+
|
| 1228 |
+
# In readinto1 mode, return as soon as we have some data
|
| 1229 |
+
if read1 and written:
|
| 1230 |
+
break
|
| 1231 |
+
|
| 1232 |
+
return written
|
| 1233 |
+
|
| 1234 |
+
def tell(self):
|
| 1235 |
+
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
|
| 1236 |
+
|
| 1237 |
+
def seek(self, pos, whence=0):
|
| 1238 |
+
if whence not in valid_seek_flags:
|
| 1239 |
+
raise ValueError("invalid whence value")
|
| 1240 |
+
with self._read_lock:
|
| 1241 |
+
if whence == 1:
|
| 1242 |
+
pos -= len(self._read_buf) - self._read_pos
|
| 1243 |
+
pos = _BufferedIOMixin.seek(self, pos, whence)
|
| 1244 |
+
self._reset_read_buf()
|
| 1245 |
+
return pos
|
| 1246 |
+
|
| 1247 |
+
class BufferedWriter(_BufferedIOMixin):
|
| 1248 |
+
|
| 1249 |
+
"""A buffer for a writeable sequential RawIO object.
|
| 1250 |
+
|
| 1251 |
+
The constructor creates a BufferedWriter for the given writeable raw
|
| 1252 |
+
stream. If the buffer_size is not given, it defaults to
|
| 1253 |
+
DEFAULT_BUFFER_SIZE.
|
| 1254 |
+
"""
|
| 1255 |
+
|
| 1256 |
+
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
|
| 1257 |
+
if not raw.writable():
|
| 1258 |
+
raise OSError('"raw" argument must be writable.')
|
| 1259 |
+
|
| 1260 |
+
_BufferedIOMixin.__init__(self, raw)
|
| 1261 |
+
if buffer_size <= 0:
|
| 1262 |
+
raise ValueError("invalid buffer size")
|
| 1263 |
+
self.buffer_size = buffer_size
|
| 1264 |
+
self._write_buf = bytearray()
|
| 1265 |
+
self._write_lock = Lock()
|
| 1266 |
+
|
| 1267 |
+
def writable(self):
|
| 1268 |
+
return self.raw.writable()
|
| 1269 |
+
|
| 1270 |
+
def write(self, b):
|
| 1271 |
+
if isinstance(b, str):
|
| 1272 |
+
raise TypeError("can't write str to binary stream")
|
| 1273 |
+
with self._write_lock:
|
| 1274 |
+
if self.closed:
|
| 1275 |
+
raise ValueError("write to closed file")
|
| 1276 |
+
# XXX we can implement some more tricks to try and avoid
|
| 1277 |
+
# partial writes
|
| 1278 |
+
if len(self._write_buf) > self.buffer_size:
|
| 1279 |
+
# We're full, so let's pre-flush the buffer. (This may
|
| 1280 |
+
# raise BlockingIOError with characters_written == 0.)
|
| 1281 |
+
self._flush_unlocked()
|
| 1282 |
+
before = len(self._write_buf)
|
| 1283 |
+
self._write_buf.extend(b)
|
| 1284 |
+
written = len(self._write_buf) - before
|
| 1285 |
+
if len(self._write_buf) > self.buffer_size:
|
| 1286 |
+
try:
|
| 1287 |
+
self._flush_unlocked()
|
| 1288 |
+
except BlockingIOError as e:
|
| 1289 |
+
if len(self._write_buf) > self.buffer_size:
|
| 1290 |
+
# We've hit the buffer_size. We have to accept a partial
|
| 1291 |
+
# write and cut back our buffer.
|
| 1292 |
+
overage = len(self._write_buf) - self.buffer_size
|
| 1293 |
+
written -= overage
|
| 1294 |
+
self._write_buf = self._write_buf[:self.buffer_size]
|
| 1295 |
+
raise BlockingIOError(e.errno, e.strerror, written)
|
| 1296 |
+
return written
|
| 1297 |
+
|
| 1298 |
+
def truncate(self, pos=None):
|
| 1299 |
+
with self._write_lock:
|
| 1300 |
+
self._flush_unlocked()
|
| 1301 |
+
if pos is None:
|
| 1302 |
+
pos = self.raw.tell()
|
| 1303 |
+
return self.raw.truncate(pos)
|
| 1304 |
+
|
| 1305 |
+
def flush(self):
|
| 1306 |
+
with self._write_lock:
|
| 1307 |
+
self._flush_unlocked()
|
| 1308 |
+
|
| 1309 |
+
def _flush_unlocked(self):
|
| 1310 |
+
if self.closed:
|
| 1311 |
+
raise ValueError("flush on closed file")
|
| 1312 |
+
while self._write_buf:
|
| 1313 |
+
try:
|
| 1314 |
+
n = self.raw.write(self._write_buf)
|
| 1315 |
+
except BlockingIOError:
|
| 1316 |
+
raise RuntimeError("self.raw should implement RawIOBase: it "
|
| 1317 |
+
"should not raise BlockingIOError")
|
| 1318 |
+
if n is None:
|
| 1319 |
+
raise BlockingIOError(
|
| 1320 |
+
errno.EAGAIN,
|
| 1321 |
+
"write could not complete without blocking", 0)
|
| 1322 |
+
if n > len(self._write_buf) or n < 0:
|
| 1323 |
+
raise OSError("write() returned incorrect number of bytes")
|
| 1324 |
+
del self._write_buf[:n]
|
| 1325 |
+
|
| 1326 |
+
def tell(self):
|
| 1327 |
+
return _BufferedIOMixin.tell(self) + len(self._write_buf)
|
| 1328 |
+
|
| 1329 |
+
def seek(self, pos, whence=0):
|
| 1330 |
+
if whence not in valid_seek_flags:
|
| 1331 |
+
raise ValueError("invalid whence value")
|
| 1332 |
+
with self._write_lock:
|
| 1333 |
+
self._flush_unlocked()
|
| 1334 |
+
return _BufferedIOMixin.seek(self, pos, whence)
|
| 1335 |
+
|
| 1336 |
+
def close(self):
|
| 1337 |
+
with self._write_lock:
|
| 1338 |
+
if self.raw is None or self.closed:
|
| 1339 |
+
return
|
| 1340 |
+
# We have to release the lock and call self.flush() (which will
|
| 1341 |
+
# probably just re-take the lock) in case flush has been overridden in
|
| 1342 |
+
# a subclass or the user set self.flush to something. This is the same
|
| 1343 |
+
# behavior as the C implementation.
|
| 1344 |
+
try:
|
| 1345 |
+
# may raise BlockingIOError or BrokenPipeError etc
|
| 1346 |
+
self.flush()
|
| 1347 |
+
finally:
|
| 1348 |
+
with self._write_lock:
|
| 1349 |
+
self.raw.close()
|
| 1350 |
+
|
| 1351 |
+
|
| 1352 |
+
class BufferedRWPair(BufferedIOBase):
|
| 1353 |
+
|
| 1354 |
+
"""A buffered reader and writer object together.
|
| 1355 |
+
|
| 1356 |
+
A buffered reader object and buffered writer object put together to
|
| 1357 |
+
form a sequential IO object that can read and write. This is typically
|
| 1358 |
+
used with a socket or two-way pipe.
|
| 1359 |
+
|
| 1360 |
+
reader and writer are RawIOBase objects that are readable and
|
| 1361 |
+
writeable respectively. If the buffer_size is omitted it defaults to
|
| 1362 |
+
DEFAULT_BUFFER_SIZE.
|
| 1363 |
+
"""
|
| 1364 |
+
|
| 1365 |
+
# XXX The usefulness of this (compared to having two separate IO
|
| 1366 |
+
# objects) is questionable.
|
| 1367 |
+
|
| 1368 |
+
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
|
| 1369 |
+
"""Constructor.
|
| 1370 |
+
|
| 1371 |
+
The arguments are two RawIO instances.
|
| 1372 |
+
"""
|
| 1373 |
+
if not reader.readable():
|
| 1374 |
+
raise OSError('"reader" argument must be readable.')
|
| 1375 |
+
|
| 1376 |
+
if not writer.writable():
|
| 1377 |
+
raise OSError('"writer" argument must be writable.')
|
| 1378 |
+
|
| 1379 |
+
self.reader = BufferedReader(reader, buffer_size)
|
| 1380 |
+
self.writer = BufferedWriter(writer, buffer_size)
|
| 1381 |
+
|
| 1382 |
+
def read(self, size=-1):
|
| 1383 |
+
if size is None:
|
| 1384 |
+
size = -1
|
| 1385 |
+
return self.reader.read(size)
|
| 1386 |
+
|
| 1387 |
+
def readinto(self, b):
|
| 1388 |
+
return self.reader.readinto(b)
|
| 1389 |
+
|
| 1390 |
+
def write(self, b):
|
| 1391 |
+
return self.writer.write(b)
|
| 1392 |
+
|
| 1393 |
+
def peek(self, size=0):
|
| 1394 |
+
return self.reader.peek(size)
|
| 1395 |
+
|
| 1396 |
+
def read1(self, size=-1):
|
| 1397 |
+
return self.reader.read1(size)
|
| 1398 |
+
|
| 1399 |
+
def readinto1(self, b):
|
| 1400 |
+
return self.reader.readinto1(b)
|
| 1401 |
+
|
| 1402 |
+
def readable(self):
|
| 1403 |
+
return self.reader.readable()
|
| 1404 |
+
|
| 1405 |
+
def writable(self):
|
| 1406 |
+
return self.writer.writable()
|
| 1407 |
+
|
| 1408 |
+
def flush(self):
|
| 1409 |
+
return self.writer.flush()
|
| 1410 |
+
|
| 1411 |
+
def close(self):
|
| 1412 |
+
try:
|
| 1413 |
+
self.writer.close()
|
| 1414 |
+
finally:
|
| 1415 |
+
self.reader.close()
|
| 1416 |
+
|
| 1417 |
+
def isatty(self):
|
| 1418 |
+
return self.reader.isatty() or self.writer.isatty()
|
| 1419 |
+
|
| 1420 |
+
@property
|
| 1421 |
+
def closed(self):
|
| 1422 |
+
return self.writer.closed
|
| 1423 |
+
|
| 1424 |
+
|
| 1425 |
+
class BufferedRandom(BufferedWriter, BufferedReader):
|
| 1426 |
+
|
| 1427 |
+
"""A buffered interface to random access streams.
|
| 1428 |
+
|
| 1429 |
+
The constructor creates a reader and writer for a seekable stream,
|
| 1430 |
+
raw, given in the first argument. If the buffer_size is omitted it
|
| 1431 |
+
defaults to DEFAULT_BUFFER_SIZE.
|
| 1432 |
+
"""
|
| 1433 |
+
|
| 1434 |
+
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
|
| 1435 |
+
raw._checkSeekable()
|
| 1436 |
+
BufferedReader.__init__(self, raw, buffer_size)
|
| 1437 |
+
BufferedWriter.__init__(self, raw, buffer_size)
|
| 1438 |
+
|
| 1439 |
+
def seek(self, pos, whence=0):
|
| 1440 |
+
if whence not in valid_seek_flags:
|
| 1441 |
+
raise ValueError("invalid whence value")
|
| 1442 |
+
self.flush()
|
| 1443 |
+
if self._read_buf:
|
| 1444 |
+
# Undo read ahead.
|
| 1445 |
+
with self._read_lock:
|
| 1446 |
+
self.raw.seek(self._read_pos - len(self._read_buf), 1)
|
| 1447 |
+
# First do the raw seek, then empty the read buffer, so that
|
| 1448 |
+
# if the raw seek fails, we don't lose buffered data forever.
|
| 1449 |
+
pos = self.raw.seek(pos, whence)
|
| 1450 |
+
with self._read_lock:
|
| 1451 |
+
self._reset_read_buf()
|
| 1452 |
+
if pos < 0:
|
| 1453 |
+
raise OSError("seek() returned invalid position")
|
| 1454 |
+
return pos
|
| 1455 |
+
|
| 1456 |
+
def tell(self):
|
| 1457 |
+
if self._write_buf:
|
| 1458 |
+
return BufferedWriter.tell(self)
|
| 1459 |
+
else:
|
| 1460 |
+
return BufferedReader.tell(self)
|
| 1461 |
+
|
| 1462 |
+
def truncate(self, pos=None):
|
| 1463 |
+
if pos is None:
|
| 1464 |
+
pos = self.tell()
|
| 1465 |
+
# Use seek to flush the read buffer.
|
| 1466 |
+
return BufferedWriter.truncate(self, pos)
|
| 1467 |
+
|
| 1468 |
+
def read(self, size=None):
|
| 1469 |
+
if size is None:
|
| 1470 |
+
size = -1
|
| 1471 |
+
self.flush()
|
| 1472 |
+
return BufferedReader.read(self, size)
|
| 1473 |
+
|
| 1474 |
+
def readinto(self, b):
|
| 1475 |
+
self.flush()
|
| 1476 |
+
return BufferedReader.readinto(self, b)
|
| 1477 |
+
|
| 1478 |
+
def peek(self, size=0):
|
| 1479 |
+
self.flush()
|
| 1480 |
+
return BufferedReader.peek(self, size)
|
| 1481 |
+
|
| 1482 |
+
def read1(self, size=-1):
|
| 1483 |
+
self.flush()
|
| 1484 |
+
return BufferedReader.read1(self, size)
|
| 1485 |
+
|
| 1486 |
+
def readinto1(self, b):
|
| 1487 |
+
self.flush()
|
| 1488 |
+
return BufferedReader.readinto1(self, b)
|
| 1489 |
+
|
| 1490 |
+
def write(self, b):
|
| 1491 |
+
if self._read_buf:
|
| 1492 |
+
# Undo readahead
|
| 1493 |
+
with self._read_lock:
|
| 1494 |
+
self.raw.seek(self._read_pos - len(self._read_buf), 1)
|
| 1495 |
+
self._reset_read_buf()
|
| 1496 |
+
return BufferedWriter.write(self, b)
|
| 1497 |
+
|
| 1498 |
+
|
| 1499 |
+
class FileIO(RawIOBase):
|
| 1500 |
+
_fd = -1
|
| 1501 |
+
_created = False
|
| 1502 |
+
_readable = False
|
| 1503 |
+
_writable = False
|
| 1504 |
+
_appending = False
|
| 1505 |
+
_seekable = None
|
| 1506 |
+
_closefd = True
|
| 1507 |
+
|
| 1508 |
+
def __init__(self, file, mode='r', closefd=True, opener=None):
|
| 1509 |
+
"""Open a file. The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
|
| 1510 |
+
writing, exclusive creation or appending. The file will be created if it
|
| 1511 |
+
doesn't exist when opened for writing or appending; it will be truncated
|
| 1512 |
+
when opened for writing. A FileExistsError will be raised if it already
|
| 1513 |
+
exists when opened for creating. Opening a file for creating implies
|
| 1514 |
+
writing so this mode behaves in a similar way to 'w'. Add a '+' to the mode
|
| 1515 |
+
to allow simultaneous reading and writing. A custom opener can be used by
|
| 1516 |
+
passing a callable as *opener*. The underlying file descriptor for the file
|
| 1517 |
+
object is then obtained by calling opener with (*name*, *flags*).
|
| 1518 |
+
*opener* must return an open file descriptor (passing os.open as *opener*
|
| 1519 |
+
results in functionality similar to passing None).
|
| 1520 |
+
"""
|
| 1521 |
+
if self._fd >= 0:
|
| 1522 |
+
# Have to close the existing file first.
|
| 1523 |
+
try:
|
| 1524 |
+
if self._closefd:
|
| 1525 |
+
os.close(self._fd)
|
| 1526 |
+
finally:
|
| 1527 |
+
self._fd = -1
|
| 1528 |
+
|
| 1529 |
+
if isinstance(file, float):
|
| 1530 |
+
raise TypeError('integer argument expected, got float')
|
| 1531 |
+
if isinstance(file, int):
|
| 1532 |
+
fd = file
|
| 1533 |
+
if fd < 0:
|
| 1534 |
+
raise ValueError('negative file descriptor')
|
| 1535 |
+
else:
|
| 1536 |
+
fd = -1
|
| 1537 |
+
|
| 1538 |
+
if not isinstance(mode, str):
|
| 1539 |
+
raise TypeError('invalid mode: %s' % (mode,))
|
| 1540 |
+
if not set(mode) <= set('xrwab+'):
|
| 1541 |
+
raise ValueError('invalid mode: %s' % (mode,))
|
| 1542 |
+
if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1:
|
| 1543 |
+
raise ValueError('Must have exactly one of create/read/write/append '
|
| 1544 |
+
'mode and at most one plus')
|
| 1545 |
+
|
| 1546 |
+
if 'x' in mode:
|
| 1547 |
+
self._created = True
|
| 1548 |
+
self._writable = True
|
| 1549 |
+
flags = os.O_EXCL | os.O_CREAT
|
| 1550 |
+
elif 'r' in mode:
|
| 1551 |
+
self._readable = True
|
| 1552 |
+
flags = 0
|
| 1553 |
+
elif 'w' in mode:
|
| 1554 |
+
self._writable = True
|
| 1555 |
+
flags = os.O_CREAT | os.O_TRUNC
|
| 1556 |
+
elif 'a' in mode:
|
| 1557 |
+
self._writable = True
|
| 1558 |
+
self._appending = True
|
| 1559 |
+
flags = os.O_APPEND | os.O_CREAT
|
| 1560 |
+
|
| 1561 |
+
if '+' in mode:
|
| 1562 |
+
self._readable = True
|
| 1563 |
+
self._writable = True
|
| 1564 |
+
|
| 1565 |
+
if self._readable and self._writable:
|
| 1566 |
+
flags |= os.O_RDWR
|
| 1567 |
+
elif self._readable:
|
| 1568 |
+
flags |= os.O_RDONLY
|
| 1569 |
+
else:
|
| 1570 |
+
flags |= os.O_WRONLY
|
| 1571 |
+
|
| 1572 |
+
flags |= getattr(os, 'O_BINARY', 0)
|
| 1573 |
+
|
| 1574 |
+
noinherit_flag = (getattr(os, 'O_NOINHERIT', 0) or
|
| 1575 |
+
getattr(os, 'O_CLOEXEC', 0))
|
| 1576 |
+
flags |= noinherit_flag
|
| 1577 |
+
|
| 1578 |
+
owned_fd = None
|
| 1579 |
+
try:
|
| 1580 |
+
if fd < 0:
|
| 1581 |
+
if not closefd:
|
| 1582 |
+
raise ValueError('Cannot use closefd=False with file name')
|
| 1583 |
+
if opener is None:
|
| 1584 |
+
fd = os.open(file, flags, 0o666)
|
| 1585 |
+
else:
|
| 1586 |
+
fd = opener(file, flags)
|
| 1587 |
+
if not isinstance(fd, int):
|
| 1588 |
+
raise TypeError('expected integer from opener')
|
| 1589 |
+
if fd < 0:
|
| 1590 |
+
raise OSError('Negative file descriptor')
|
| 1591 |
+
owned_fd = fd
|
| 1592 |
+
if not noinherit_flag:
|
| 1593 |
+
os.set_inheritable(fd, False)
|
| 1594 |
+
|
| 1595 |
+
self._closefd = closefd
|
| 1596 |
+
fdfstat = os.fstat(fd)
|
| 1597 |
+
try:
|
| 1598 |
+
if stat.S_ISDIR(fdfstat.st_mode):
|
| 1599 |
+
raise IsADirectoryError(errno.EISDIR,
|
| 1600 |
+
os.strerror(errno.EISDIR), file)
|
| 1601 |
+
except AttributeError:
|
| 1602 |
+
# Ignore the AttributeError if stat.S_ISDIR or errno.EISDIR
|
| 1603 |
+
# don't exist.
|
| 1604 |
+
pass
|
| 1605 |
+
self._blksize = getattr(fdfstat, 'st_blksize', 0)
|
| 1606 |
+
if self._blksize <= 1:
|
| 1607 |
+
self._blksize = DEFAULT_BUFFER_SIZE
|
| 1608 |
+
|
| 1609 |
+
if _setmode:
|
| 1610 |
+
# don't translate newlines (\r\n <=> \n)
|
| 1611 |
+
_setmode(fd, os.O_BINARY)
|
| 1612 |
+
|
| 1613 |
+
self.name = file
|
| 1614 |
+
if self._appending:
|
| 1615 |
+
# For consistent behaviour, we explicitly seek to the
|
| 1616 |
+
# end of file (otherwise, it might be done only on the
|
| 1617 |
+
# first write()).
|
| 1618 |
+
try:
|
| 1619 |
+
os.lseek(fd, 0, SEEK_END)
|
| 1620 |
+
except OSError as e:
|
| 1621 |
+
if e.errno != errno.ESPIPE:
|
| 1622 |
+
raise
|
| 1623 |
+
except:
|
| 1624 |
+
if owned_fd is not None:
|
| 1625 |
+
os.close(owned_fd)
|
| 1626 |
+
raise
|
| 1627 |
+
self._fd = fd
|
| 1628 |
+
|
| 1629 |
+
def __del__(self):
|
| 1630 |
+
if self._fd >= 0 and self._closefd and not self.closed:
|
| 1631 |
+
import warnings
|
| 1632 |
+
warnings.warn('unclosed file %r' % (self,), ResourceWarning,
|
| 1633 |
+
stacklevel=2, source=self)
|
| 1634 |
+
self.close()
|
| 1635 |
+
|
| 1636 |
+
def __getstate__(self):
|
| 1637 |
+
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
|
| 1638 |
+
|
| 1639 |
+
def __repr__(self):
|
| 1640 |
+
class_name = '%s.%s' % (self.__class__.__module__,
|
| 1641 |
+
self.__class__.__qualname__)
|
| 1642 |
+
if self.closed:
|
| 1643 |
+
return '<%s [closed]>' % class_name
|
| 1644 |
+
try:
|
| 1645 |
+
name = self.name
|
| 1646 |
+
except AttributeError:
|
| 1647 |
+
return ('<%s fd=%d mode=%r closefd=%r>' %
|
| 1648 |
+
(class_name, self._fd, self.mode, self._closefd))
|
| 1649 |
+
else:
|
| 1650 |
+
return ('<%s name=%r mode=%r closefd=%r>' %
|
| 1651 |
+
(class_name, name, self.mode, self._closefd))
|
| 1652 |
+
|
| 1653 |
+
def _checkReadable(self):
|
| 1654 |
+
if not self._readable:
|
| 1655 |
+
raise UnsupportedOperation('File not open for reading')
|
| 1656 |
+
|
| 1657 |
+
def _checkWritable(self, msg=None):
|
| 1658 |
+
if not self._writable:
|
| 1659 |
+
raise UnsupportedOperation('File not open for writing')
|
| 1660 |
+
|
| 1661 |
+
def read(self, size=None):
|
| 1662 |
+
"""Read at most size bytes, returned as bytes.
|
| 1663 |
+
|
| 1664 |
+
Only makes one system call, so less data may be returned than requested
|
| 1665 |
+
In non-blocking mode, returns None if no data is available.
|
| 1666 |
+
Return an empty bytes object at EOF.
|
| 1667 |
+
"""
|
| 1668 |
+
self._checkClosed()
|
| 1669 |
+
self._checkReadable()
|
| 1670 |
+
if size is None or size < 0:
|
| 1671 |
+
return self.readall()
|
| 1672 |
+
try:
|
| 1673 |
+
return os.read(self._fd, size)
|
| 1674 |
+
except BlockingIOError:
|
| 1675 |
+
return None
|
| 1676 |
+
|
| 1677 |
+
def readall(self):
|
| 1678 |
+
"""Read all data from the file, returned as bytes.
|
| 1679 |
+
|
| 1680 |
+
In non-blocking mode, returns as much as is immediately available,
|
| 1681 |
+
or None if no data is available. Return an empty bytes object at EOF.
|
| 1682 |
+
"""
|
| 1683 |
+
self._checkClosed()
|
| 1684 |
+
self._checkReadable()
|
| 1685 |
+
bufsize = DEFAULT_BUFFER_SIZE
|
| 1686 |
+
try:
|
| 1687 |
+
pos = os.lseek(self._fd, 0, SEEK_CUR)
|
| 1688 |
+
end = os.fstat(self._fd).st_size
|
| 1689 |
+
if end >= pos:
|
| 1690 |
+
bufsize = end - pos + 1
|
| 1691 |
+
except OSError:
|
| 1692 |
+
pass
|
| 1693 |
+
|
| 1694 |
+
result = bytearray()
|
| 1695 |
+
while True:
|
| 1696 |
+
if len(result) >= bufsize:
|
| 1697 |
+
bufsize = len(result)
|
| 1698 |
+
bufsize += max(bufsize, DEFAULT_BUFFER_SIZE)
|
| 1699 |
+
n = bufsize - len(result)
|
| 1700 |
+
try:
|
| 1701 |
+
chunk = os.read(self._fd, n)
|
| 1702 |
+
except BlockingIOError:
|
| 1703 |
+
if result:
|
| 1704 |
+
break
|
| 1705 |
+
return None
|
| 1706 |
+
if not chunk: # reached the end of the file
|
| 1707 |
+
break
|
| 1708 |
+
result += chunk
|
| 1709 |
+
|
| 1710 |
+
return bytes(result)
|
| 1711 |
+
|
| 1712 |
+
def readinto(self, b):
|
| 1713 |
+
"""Same as RawIOBase.readinto()."""
|
| 1714 |
+
m = memoryview(b).cast('B')
|
| 1715 |
+
data = self.read(len(m))
|
| 1716 |
+
n = len(data)
|
| 1717 |
+
m[:n] = data
|
| 1718 |
+
return n
|
| 1719 |
+
|
| 1720 |
+
def write(self, b):
|
| 1721 |
+
"""Write bytes b to file, return number written.
|
| 1722 |
+
|
| 1723 |
+
Only makes one system call, so not all of the data may be written.
|
| 1724 |
+
The number of bytes actually written is returned. In non-blocking mode,
|
| 1725 |
+
returns None if the write would block.
|
| 1726 |
+
"""
|
| 1727 |
+
self._checkClosed()
|
| 1728 |
+
self._checkWritable()
|
| 1729 |
+
try:
|
| 1730 |
+
return os.write(self._fd, b)
|
| 1731 |
+
except BlockingIOError:
|
| 1732 |
+
return None
|
| 1733 |
+
|
| 1734 |
+
def seek(self, pos, whence=SEEK_SET):
|
| 1735 |
+
"""Move to new file position.
|
| 1736 |
+
|
| 1737 |
+
Argument offset is a byte count. Optional argument whence defaults to
|
| 1738 |
+
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
|
| 1739 |
+
are SEEK_CUR or 1 (move relative to current position, positive or negative),
|
| 1740 |
+
and SEEK_END or 2 (move relative to end of file, usually negative, although
|
| 1741 |
+
many platforms allow seeking beyond the end of a file).
|
| 1742 |
+
|
| 1743 |
+
Note that not all file objects are seekable.
|
| 1744 |
+
"""
|
| 1745 |
+
if isinstance(pos, float):
|
| 1746 |
+
raise TypeError('an integer is required')
|
| 1747 |
+
self._checkClosed()
|
| 1748 |
+
return os.lseek(self._fd, pos, whence)
|
| 1749 |
+
|
| 1750 |
+
def tell(self):
|
| 1751 |
+
"""tell() -> int. Current file position.
|
| 1752 |
+
|
| 1753 |
+
Can raise OSError for non seekable files."""
|
| 1754 |
+
self._checkClosed()
|
| 1755 |
+
return os.lseek(self._fd, 0, SEEK_CUR)
|
| 1756 |
+
|
| 1757 |
+
def truncate(self, size=None):
|
| 1758 |
+
"""Truncate the file to at most size bytes.
|
| 1759 |
+
|
| 1760 |
+
Size defaults to the current file position, as returned by tell().
|
| 1761 |
+
The current file position is changed to the value of size.
|
| 1762 |
+
"""
|
| 1763 |
+
self._checkClosed()
|
| 1764 |
+
self._checkWritable()
|
| 1765 |
+
if size is None:
|
| 1766 |
+
size = self.tell()
|
| 1767 |
+
os.ftruncate(self._fd, size)
|
| 1768 |
+
return size
|
| 1769 |
+
|
| 1770 |
+
def close(self):
|
| 1771 |
+
"""Close the file.
|
| 1772 |
+
|
| 1773 |
+
A closed file cannot be used for further I/O operations. close() may be
|
| 1774 |
+
called more than once without error.
|
| 1775 |
+
"""
|
| 1776 |
+
if not self.closed:
|
| 1777 |
+
try:
|
| 1778 |
+
if self._closefd:
|
| 1779 |
+
os.close(self._fd)
|
| 1780 |
+
finally:
|
| 1781 |
+
super().close()
|
| 1782 |
+
|
| 1783 |
+
def seekable(self):
|
| 1784 |
+
"""True if file supports random-access."""
|
| 1785 |
+
self._checkClosed()
|
| 1786 |
+
if self._seekable is None:
|
| 1787 |
+
try:
|
| 1788 |
+
self.tell()
|
| 1789 |
+
except OSError:
|
| 1790 |
+
self._seekable = False
|
| 1791 |
+
else:
|
| 1792 |
+
self._seekable = True
|
| 1793 |
+
return self._seekable
|
| 1794 |
+
|
| 1795 |
+
def readable(self):
|
| 1796 |
+
"""True if file was opened in a read mode."""
|
| 1797 |
+
self._checkClosed()
|
| 1798 |
+
return self._readable
|
| 1799 |
+
|
| 1800 |
+
def writable(self):
|
| 1801 |
+
"""True if file was opened in a write mode."""
|
| 1802 |
+
self._checkClosed()
|
| 1803 |
+
return self._writable
|
| 1804 |
+
|
| 1805 |
+
def fileno(self):
|
| 1806 |
+
"""Return the underlying file descriptor (an integer)."""
|
| 1807 |
+
self._checkClosed()
|
| 1808 |
+
return self._fd
|
| 1809 |
+
|
| 1810 |
+
def isatty(self):
|
| 1811 |
+
"""True if the file is connected to a TTY device."""
|
| 1812 |
+
self._checkClosed()
|
| 1813 |
+
return os.isatty(self._fd)
|
| 1814 |
+
|
| 1815 |
+
@property
|
| 1816 |
+
def closefd(self):
|
| 1817 |
+
"""True if the file descriptor will be closed by close()."""
|
| 1818 |
+
return self._closefd
|
| 1819 |
+
|
| 1820 |
+
@property
|
| 1821 |
+
def mode(self):
|
| 1822 |
+
"""String giving the file mode"""
|
| 1823 |
+
if self._created:
|
| 1824 |
+
if self._readable:
|
| 1825 |
+
return 'xb+'
|
| 1826 |
+
else:
|
| 1827 |
+
return 'xb'
|
| 1828 |
+
elif self._appending:
|
| 1829 |
+
if self._readable:
|
| 1830 |
+
return 'ab+'
|
| 1831 |
+
else:
|
| 1832 |
+
return 'ab'
|
| 1833 |
+
elif self._readable:
|
| 1834 |
+
if self._writable:
|
| 1835 |
+
return 'rb+'
|
| 1836 |
+
else:
|
| 1837 |
+
return 'rb'
|
| 1838 |
+
else:
|
| 1839 |
+
return 'wb'
|
| 1840 |
+
|
| 1841 |
+
|
| 1842 |
+
class TextIOBase(IOBase):
|
| 1843 |
+
|
| 1844 |
+
"""Base class for text I/O.
|
| 1845 |
+
|
| 1846 |
+
This class provides a character and line based interface to stream
|
| 1847 |
+
I/O.
|
| 1848 |
+
"""
|
| 1849 |
+
|
| 1850 |
+
def read(self, size=-1):
|
| 1851 |
+
"""Read at most size characters from stream, where size is an int.
|
| 1852 |
+
|
| 1853 |
+
Read from underlying buffer until we have size characters or we hit EOF.
|
| 1854 |
+
If size is negative or omitted, read until EOF.
|
| 1855 |
+
|
| 1856 |
+
Returns a string.
|
| 1857 |
+
"""
|
| 1858 |
+
self._unsupported("read")
|
| 1859 |
+
|
| 1860 |
+
def write(self, s):
|
| 1861 |
+
"""Write string s to stream and returning an int."""
|
| 1862 |
+
self._unsupported("write")
|
| 1863 |
+
|
| 1864 |
+
def truncate(self, pos=None):
|
| 1865 |
+
"""Truncate size to pos, where pos is an int."""
|
| 1866 |
+
self._unsupported("truncate")
|
| 1867 |
+
|
| 1868 |
+
def readline(self):
|
| 1869 |
+
"""Read until newline or EOF.
|
| 1870 |
+
|
| 1871 |
+
Returns an empty string if EOF is hit immediately.
|
| 1872 |
+
"""
|
| 1873 |
+
self._unsupported("readline")
|
| 1874 |
+
|
| 1875 |
+
def detach(self):
|
| 1876 |
+
"""
|
| 1877 |
+
Separate the underlying buffer from the TextIOBase and return it.
|
| 1878 |
+
|
| 1879 |
+
After the underlying buffer has been detached, the TextIO is in an
|
| 1880 |
+
unusable state.
|
| 1881 |
+
"""
|
| 1882 |
+
self._unsupported("detach")
|
| 1883 |
+
|
| 1884 |
+
@property
|
| 1885 |
+
def encoding(self):
|
| 1886 |
+
"""Subclasses should override."""
|
| 1887 |
+
return None
|
| 1888 |
+
|
| 1889 |
+
@property
|
| 1890 |
+
def newlines(self):
|
| 1891 |
+
"""Line endings translated so far.
|
| 1892 |
+
|
| 1893 |
+
Only line endings translated during reading are considered.
|
| 1894 |
+
|
| 1895 |
+
Subclasses should override.
|
| 1896 |
+
"""
|
| 1897 |
+
return None
|
| 1898 |
+
|
| 1899 |
+
@property
|
| 1900 |
+
def errors(self):
|
| 1901 |
+
"""Error setting of the decoder or encoder.
|
| 1902 |
+
|
| 1903 |
+
Subclasses should override."""
|
| 1904 |
+
return None
|
| 1905 |
+
|
| 1906 |
+
io.TextIOBase.register(TextIOBase)
|
| 1907 |
+
|
| 1908 |
+
|
| 1909 |
+
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
|
| 1910 |
+
r"""Codec used when reading a file in universal newlines mode. It wraps
|
| 1911 |
+
another incremental decoder, translating \r\n and \r into \n. It also
|
| 1912 |
+
records the types of newlines encountered. When used with
|
| 1913 |
+
translate=False, it ensures that the newline sequence is returned in
|
| 1914 |
+
one piece.
|
| 1915 |
+
"""
|
| 1916 |
+
def __init__(self, decoder, translate, errors='strict'):
|
| 1917 |
+
codecs.IncrementalDecoder.__init__(self, errors=errors)
|
| 1918 |
+
self.translate = translate
|
| 1919 |
+
self.decoder = decoder
|
| 1920 |
+
self.seennl = 0
|
| 1921 |
+
self.pendingcr = False
|
| 1922 |
+
|
| 1923 |
+
def decode(self, input, final=False):
|
| 1924 |
+
# decode input (with the eventual \r from a previous pass)
|
| 1925 |
+
if self.decoder is None:
|
| 1926 |
+
output = input
|
| 1927 |
+
else:
|
| 1928 |
+
output = self.decoder.decode(input, final=final)
|
| 1929 |
+
if self.pendingcr and (output or final):
|
| 1930 |
+
output = "\r" + output
|
| 1931 |
+
self.pendingcr = False
|
| 1932 |
+
|
| 1933 |
+
# retain last \r even when not translating data:
|
| 1934 |
+
# then readline() is sure to get \r\n in one pass
|
| 1935 |
+
if output.endswith("\r") and not final:
|
| 1936 |
+
output = output[:-1]
|
| 1937 |
+
self.pendingcr = True
|
| 1938 |
+
|
| 1939 |
+
# Record which newlines are read
|
| 1940 |
+
crlf = output.count('\r\n')
|
| 1941 |
+
cr = output.count('\r') - crlf
|
| 1942 |
+
lf = output.count('\n') - crlf
|
| 1943 |
+
self.seennl |= (lf and self._LF) | (cr and self._CR) \
|
| 1944 |
+
| (crlf and self._CRLF)
|
| 1945 |
+
|
| 1946 |
+
if self.translate:
|
| 1947 |
+
if crlf:
|
| 1948 |
+
output = output.replace("\r\n", "\n")
|
| 1949 |
+
if cr:
|
| 1950 |
+
output = output.replace("\r", "\n")
|
| 1951 |
+
|
| 1952 |
+
return output
|
| 1953 |
+
|
| 1954 |
+
def getstate(self):
|
| 1955 |
+
if self.decoder is None:
|
| 1956 |
+
buf = b""
|
| 1957 |
+
flag = 0
|
| 1958 |
+
else:
|
| 1959 |
+
buf, flag = self.decoder.getstate()
|
| 1960 |
+
flag <<= 1
|
| 1961 |
+
if self.pendingcr:
|
| 1962 |
+
flag |= 1
|
| 1963 |
+
return buf, flag
|
| 1964 |
+
|
| 1965 |
+
def setstate(self, state):
|
| 1966 |
+
buf, flag = state
|
| 1967 |
+
self.pendingcr = bool(flag & 1)
|
| 1968 |
+
if self.decoder is not None:
|
| 1969 |
+
self.decoder.setstate((buf, flag >> 1))
|
| 1970 |
+
|
| 1971 |
+
def reset(self):
|
| 1972 |
+
self.seennl = 0
|
| 1973 |
+
self.pendingcr = False
|
| 1974 |
+
if self.decoder is not None:
|
| 1975 |
+
self.decoder.reset()
|
| 1976 |
+
|
| 1977 |
+
_LF = 1
|
| 1978 |
+
_CR = 2
|
| 1979 |
+
_CRLF = 4
|
| 1980 |
+
|
| 1981 |
+
@property
|
| 1982 |
+
def newlines(self):
|
| 1983 |
+
return (None,
|
| 1984 |
+
"\n",
|
| 1985 |
+
"\r",
|
| 1986 |
+
("\r", "\n"),
|
| 1987 |
+
"\r\n",
|
| 1988 |
+
("\n", "\r\n"),
|
| 1989 |
+
("\r", "\r\n"),
|
| 1990 |
+
("\r", "\n", "\r\n")
|
| 1991 |
+
)[self.seennl]
|
| 1992 |
+
|
| 1993 |
+
|
| 1994 |
+
class TextIOWrapper(TextIOBase):
|
| 1995 |
+
|
| 1996 |
+
r"""Character and line based layer over a BufferedIOBase object, buffer.
|
| 1997 |
+
|
| 1998 |
+
encoding gives the name of the encoding that the stream will be
|
| 1999 |
+
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
|
| 2000 |
+
|
| 2001 |
+
errors determines the strictness of encoding and decoding (see the
|
| 2002 |
+
codecs.register) and defaults to "strict".
|
| 2003 |
+
|
| 2004 |
+
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
|
| 2005 |
+
handling of line endings. If it is None, universal newlines is
|
| 2006 |
+
enabled. With this enabled, on input, the lines endings '\n', '\r',
|
| 2007 |
+
or '\r\n' are translated to '\n' before being returned to the
|
| 2008 |
+
caller. Conversely, on output, '\n' is translated to the system
|
| 2009 |
+
default line separator, os.linesep. If newline is any other of its
|
| 2010 |
+
legal values, that newline becomes the newline when the file is read
|
| 2011 |
+
and it is returned untranslated. On output, '\n' is converted to the
|
| 2012 |
+
newline.
|
| 2013 |
+
|
| 2014 |
+
If line_buffering is True, a call to flush is implied when a call to
|
| 2015 |
+
write contains a newline character.
|
| 2016 |
+
"""
|
| 2017 |
+
|
| 2018 |
+
_CHUNK_SIZE = 2048
|
| 2019 |
+
|
| 2020 |
+
# Initialize _buffer as soon as possible since it's used by __del__()
|
| 2021 |
+
# which calls close()
|
| 2022 |
+
_buffer = None
|
| 2023 |
+
|
| 2024 |
+
# The write_through argument has no effect here since this
|
| 2025 |
+
# implementation always writes through. The argument is present only
|
| 2026 |
+
# so that the signature can match the signature of the C version.
|
| 2027 |
+
def __init__(self, buffer, encoding=None, errors=None, newline=None,
|
| 2028 |
+
line_buffering=False, write_through=False):
|
| 2029 |
+
self._check_newline(newline)
|
| 2030 |
+
encoding = text_encoding(encoding)
|
| 2031 |
+
|
| 2032 |
+
if encoding == "locale":
|
| 2033 |
+
try:
|
| 2034 |
+
encoding = os.device_encoding(buffer.fileno()) or "locale"
|
| 2035 |
+
except (AttributeError, UnsupportedOperation):
|
| 2036 |
+
pass
|
| 2037 |
+
|
| 2038 |
+
if encoding == "locale":
|
| 2039 |
+
try:
|
| 2040 |
+
import locale
|
| 2041 |
+
except ImportError:
|
| 2042 |
+
# Importing locale may fail if Python is being built
|
| 2043 |
+
encoding = "utf-8"
|
| 2044 |
+
else:
|
| 2045 |
+
encoding = locale.getpreferredencoding(False)
|
| 2046 |
+
|
| 2047 |
+
if not isinstance(encoding, str):
|
| 2048 |
+
raise ValueError("invalid encoding: %r" % encoding)
|
| 2049 |
+
|
| 2050 |
+
if not codecs.lookup(encoding)._is_text_encoding:
|
| 2051 |
+
msg = ("%r is not a text encoding; "
|
| 2052 |
+
"use codecs.open() to handle arbitrary codecs")
|
| 2053 |
+
raise LookupError(msg % encoding)
|
| 2054 |
+
|
| 2055 |
+
if errors is None:
|
| 2056 |
+
errors = "strict"
|
| 2057 |
+
else:
|
| 2058 |
+
if not isinstance(errors, str):
|
| 2059 |
+
raise ValueError("invalid errors: %r" % errors)
|
| 2060 |
+
if _CHECK_ERRORS:
|
| 2061 |
+
codecs.lookup_error(errors)
|
| 2062 |
+
|
| 2063 |
+
self._buffer = buffer
|
| 2064 |
+
self._decoded_chars = '' # buffer for text returned from decoder
|
| 2065 |
+
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
|
| 2066 |
+
self._snapshot = None # info for reconstructing decoder state
|
| 2067 |
+
self._seekable = self._telling = self.buffer.seekable()
|
| 2068 |
+
self._has_read1 = hasattr(self.buffer, 'read1')
|
| 2069 |
+
self._configure(encoding, errors, newline,
|
| 2070 |
+
line_buffering, write_through)
|
| 2071 |
+
|
| 2072 |
+
def _check_newline(self, newline):
|
| 2073 |
+
if newline is not None and not isinstance(newline, str):
|
| 2074 |
+
raise TypeError("illegal newline type: %r" % (type(newline),))
|
| 2075 |
+
if newline not in (None, "", "\n", "\r", "\r\n"):
|
| 2076 |
+
raise ValueError("illegal newline value: %r" % (newline,))
|
| 2077 |
+
|
| 2078 |
+
def _configure(self, encoding=None, errors=None, newline=None,
|
| 2079 |
+
line_buffering=False, write_through=False):
|
| 2080 |
+
self._encoding = encoding
|
| 2081 |
+
self._errors = errors
|
| 2082 |
+
self._encoder = None
|
| 2083 |
+
self._decoder = None
|
| 2084 |
+
self._b2cratio = 0.0
|
| 2085 |
+
|
| 2086 |
+
self._readuniversal = not newline
|
| 2087 |
+
self._readtranslate = newline is None
|
| 2088 |
+
self._readnl = newline
|
| 2089 |
+
self._writetranslate = newline != ''
|
| 2090 |
+
self._writenl = newline or os.linesep
|
| 2091 |
+
|
| 2092 |
+
self._line_buffering = line_buffering
|
| 2093 |
+
self._write_through = write_through
|
| 2094 |
+
|
| 2095 |
+
# don't write a BOM in the middle of a file
|
| 2096 |
+
if self._seekable and self.writable():
|
| 2097 |
+
position = self.buffer.tell()
|
| 2098 |
+
if position != 0:
|
| 2099 |
+
try:
|
| 2100 |
+
self._get_encoder().setstate(0)
|
| 2101 |
+
except LookupError:
|
| 2102 |
+
# Sometimes the encoder doesn't exist
|
| 2103 |
+
pass
|
| 2104 |
+
|
| 2105 |
+
# self._snapshot is either None, or a tuple (dec_flags, next_input)
|
| 2106 |
+
# where dec_flags is the second (integer) item of the decoder state
|
| 2107 |
+
# and next_input is the chunk of input bytes that comes next after the
|
| 2108 |
+
# snapshot point. We use this to reconstruct decoder states in tell().
|
| 2109 |
+
|
| 2110 |
+
# Naming convention:
|
| 2111 |
+
# - "bytes_..." for integer variables that count input bytes
|
| 2112 |
+
# - "chars_..." for integer variables that count decoded characters
|
| 2113 |
+
|
| 2114 |
+
def __repr__(self):
|
| 2115 |
+
result = "<{}.{}".format(self.__class__.__module__,
|
| 2116 |
+
self.__class__.__qualname__)
|
| 2117 |
+
try:
|
| 2118 |
+
name = self.name
|
| 2119 |
+
except AttributeError:
|
| 2120 |
+
pass
|
| 2121 |
+
else:
|
| 2122 |
+
result += " name={0!r}".format(name)
|
| 2123 |
+
try:
|
| 2124 |
+
mode = self.mode
|
| 2125 |
+
except AttributeError:
|
| 2126 |
+
pass
|
| 2127 |
+
else:
|
| 2128 |
+
result += " mode={0!r}".format(mode)
|
| 2129 |
+
return result + " encoding={0!r}>".format(self.encoding)
|
| 2130 |
+
|
| 2131 |
+
@property
|
| 2132 |
+
def encoding(self):
|
| 2133 |
+
return self._encoding
|
| 2134 |
+
|
| 2135 |
+
@property
|
| 2136 |
+
def errors(self):
|
| 2137 |
+
return self._errors
|
| 2138 |
+
|
| 2139 |
+
@property
|
| 2140 |
+
def line_buffering(self):
|
| 2141 |
+
return self._line_buffering
|
| 2142 |
+
|
| 2143 |
+
@property
|
| 2144 |
+
def write_through(self):
|
| 2145 |
+
return self._write_through
|
| 2146 |
+
|
| 2147 |
+
@property
|
| 2148 |
+
def buffer(self):
|
| 2149 |
+
return self._buffer
|
| 2150 |
+
|
| 2151 |
+
def reconfigure(self, *,
|
| 2152 |
+
encoding=None, errors=None, newline=Ellipsis,
|
| 2153 |
+
line_buffering=None, write_through=None):
|
| 2154 |
+
"""Reconfigure the text stream with new parameters.
|
| 2155 |
+
|
| 2156 |
+
This also flushes the stream.
|
| 2157 |
+
"""
|
| 2158 |
+
if (self._decoder is not None
|
| 2159 |
+
and (encoding is not None or errors is not None
|
| 2160 |
+
or newline is not Ellipsis)):
|
| 2161 |
+
raise UnsupportedOperation(
|
| 2162 |
+
"It is not possible to set the encoding or newline of stream "
|
| 2163 |
+
"after the first read")
|
| 2164 |
+
|
| 2165 |
+
if errors is None:
|
| 2166 |
+
if encoding is None:
|
| 2167 |
+
errors = self._errors
|
| 2168 |
+
else:
|
| 2169 |
+
errors = 'strict'
|
| 2170 |
+
elif not isinstance(errors, str):
|
| 2171 |
+
raise TypeError("invalid errors: %r" % errors)
|
| 2172 |
+
|
| 2173 |
+
if encoding is None:
|
| 2174 |
+
encoding = self._encoding
|
| 2175 |
+
else:
|
| 2176 |
+
if not isinstance(encoding, str):
|
| 2177 |
+
raise TypeError("invalid encoding: %r" % encoding)
|
| 2178 |
+
|
| 2179 |
+
if newline is Ellipsis:
|
| 2180 |
+
newline = self._readnl
|
| 2181 |
+
self._check_newline(newline)
|
| 2182 |
+
|
| 2183 |
+
if line_buffering is None:
|
| 2184 |
+
line_buffering = self.line_buffering
|
| 2185 |
+
if write_through is None:
|
| 2186 |
+
write_through = self.write_through
|
| 2187 |
+
|
| 2188 |
+
self.flush()
|
| 2189 |
+
self._configure(encoding, errors, newline,
|
| 2190 |
+
line_buffering, write_through)
|
| 2191 |
+
|
| 2192 |
+
def seekable(self):
|
| 2193 |
+
if self.closed:
|
| 2194 |
+
raise ValueError("I/O operation on closed file.")
|
| 2195 |
+
return self._seekable
|
| 2196 |
+
|
| 2197 |
+
def readable(self):
|
| 2198 |
+
return self.buffer.readable()
|
| 2199 |
+
|
| 2200 |
+
def writable(self):
|
| 2201 |
+
return self.buffer.writable()
|
| 2202 |
+
|
| 2203 |
+
def flush(self):
|
| 2204 |
+
self.buffer.flush()
|
| 2205 |
+
self._telling = self._seekable
|
| 2206 |
+
|
| 2207 |
+
def close(self):
|
| 2208 |
+
if self.buffer is not None and not self.closed:
|
| 2209 |
+
try:
|
| 2210 |
+
self.flush()
|
| 2211 |
+
finally:
|
| 2212 |
+
self.buffer.close()
|
| 2213 |
+
|
| 2214 |
+
@property
|
| 2215 |
+
def closed(self):
|
| 2216 |
+
return self.buffer.closed
|
| 2217 |
+
|
| 2218 |
+
@property
|
| 2219 |
+
def name(self):
|
| 2220 |
+
return self.buffer.name
|
| 2221 |
+
|
| 2222 |
+
def fileno(self):
|
| 2223 |
+
return self.buffer.fileno()
|
| 2224 |
+
|
| 2225 |
+
def isatty(self):
|
| 2226 |
+
return self.buffer.isatty()
|
| 2227 |
+
|
| 2228 |
+
def write(self, s):
|
| 2229 |
+
'Write data, where s is a str'
|
| 2230 |
+
if self.closed:
|
| 2231 |
+
raise ValueError("write to closed file")
|
| 2232 |
+
if not isinstance(s, str):
|
| 2233 |
+
raise TypeError("can't write %s to text stream" %
|
| 2234 |
+
s.__class__.__name__)
|
| 2235 |
+
length = len(s)
|
| 2236 |
+
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
|
| 2237 |
+
if haslf and self._writetranslate and self._writenl != "\n":
|
| 2238 |
+
s = s.replace("\n", self._writenl)
|
| 2239 |
+
encoder = self._encoder or self._get_encoder()
|
| 2240 |
+
# XXX What if we were just reading?
|
| 2241 |
+
b = encoder.encode(s)
|
| 2242 |
+
self.buffer.write(b)
|
| 2243 |
+
if self._line_buffering and (haslf or "\r" in s):
|
| 2244 |
+
self.flush()
|
| 2245 |
+
self._set_decoded_chars('')
|
| 2246 |
+
self._snapshot = None
|
| 2247 |
+
if self._decoder:
|
| 2248 |
+
self._decoder.reset()
|
| 2249 |
+
return length
|
| 2250 |
+
|
| 2251 |
+
def _get_encoder(self):
|
| 2252 |
+
make_encoder = codecs.getincrementalencoder(self._encoding)
|
| 2253 |
+
self._encoder = make_encoder(self._errors)
|
| 2254 |
+
return self._encoder
|
| 2255 |
+
|
| 2256 |
+
def _get_decoder(self):
|
| 2257 |
+
make_decoder = codecs.getincrementaldecoder(self._encoding)
|
| 2258 |
+
decoder = make_decoder(self._errors)
|
| 2259 |
+
if self._readuniversal:
|
| 2260 |
+
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
|
| 2261 |
+
self._decoder = decoder
|
| 2262 |
+
return decoder
|
| 2263 |
+
|
| 2264 |
+
# The following three methods implement an ADT for _decoded_chars.
|
| 2265 |
+
# Text returned from the decoder is buffered here until the client
|
| 2266 |
+
# requests it by calling our read() or readline() method.
|
| 2267 |
+
def _set_decoded_chars(self, chars):
|
| 2268 |
+
"""Set the _decoded_chars buffer."""
|
| 2269 |
+
self._decoded_chars = chars
|
| 2270 |
+
self._decoded_chars_used = 0
|
| 2271 |
+
|
| 2272 |
+
def _get_decoded_chars(self, n=None):
|
| 2273 |
+
"""Advance into the _decoded_chars buffer."""
|
| 2274 |
+
offset = self._decoded_chars_used
|
| 2275 |
+
if n is None:
|
| 2276 |
+
chars = self._decoded_chars[offset:]
|
| 2277 |
+
else:
|
| 2278 |
+
chars = self._decoded_chars[offset:offset + n]
|
| 2279 |
+
self._decoded_chars_used += len(chars)
|
| 2280 |
+
return chars
|
| 2281 |
+
|
| 2282 |
+
def _rewind_decoded_chars(self, n):
|
| 2283 |
+
"""Rewind the _decoded_chars buffer."""
|
| 2284 |
+
if self._decoded_chars_used < n:
|
| 2285 |
+
raise AssertionError("rewind decoded_chars out of bounds")
|
| 2286 |
+
self._decoded_chars_used -= n
|
| 2287 |
+
|
| 2288 |
+
def _read_chunk(self):
|
| 2289 |
+
"""
|
| 2290 |
+
Read and decode the next chunk of data from the BufferedReader.
|
| 2291 |
+
"""
|
| 2292 |
+
|
| 2293 |
+
# The return value is True unless EOF was reached. The decoded
|
| 2294 |
+
# string is placed in self._decoded_chars (replacing its previous
|
| 2295 |
+
# value). The entire input chunk is sent to the decoder, though
|
| 2296 |
+
# some of it may remain buffered in the decoder, yet to be
|
| 2297 |
+
# converted.
|
| 2298 |
+
|
| 2299 |
+
if self._decoder is None:
|
| 2300 |
+
raise ValueError("no decoder")
|
| 2301 |
+
|
| 2302 |
+
if self._telling:
|
| 2303 |
+
# To prepare for tell(), we need to snapshot a point in the
|
| 2304 |
+
# file where the decoder's input buffer is empty.
|
| 2305 |
+
|
| 2306 |
+
dec_buffer, dec_flags = self._decoder.getstate()
|
| 2307 |
+
# Given this, we know there was a valid snapshot point
|
| 2308 |
+
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
|
| 2309 |
+
|
| 2310 |
+
# Read a chunk, decode it, and put the result in self._decoded_chars.
|
| 2311 |
+
if self._has_read1:
|
| 2312 |
+
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
|
| 2313 |
+
else:
|
| 2314 |
+
input_chunk = self.buffer.read(self._CHUNK_SIZE)
|
| 2315 |
+
eof = not input_chunk
|
| 2316 |
+
decoded_chars = self._decoder.decode(input_chunk, eof)
|
| 2317 |
+
self._set_decoded_chars(decoded_chars)
|
| 2318 |
+
if decoded_chars:
|
| 2319 |
+
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
|
| 2320 |
+
else:
|
| 2321 |
+
self._b2cratio = 0.0
|
| 2322 |
+
|
| 2323 |
+
if self._telling:
|
| 2324 |
+
# At the snapshot point, len(dec_buffer) bytes before the read,
|
| 2325 |
+
# the next input to be decoded is dec_buffer + input_chunk.
|
| 2326 |
+
self._snapshot = (dec_flags, dec_buffer + input_chunk)
|
| 2327 |
+
|
| 2328 |
+
return not eof
|
| 2329 |
+
|
| 2330 |
+
def _pack_cookie(self, position, dec_flags=0,
|
| 2331 |
+
bytes_to_feed=0, need_eof=False, chars_to_skip=0):
|
| 2332 |
+
# The meaning of a tell() cookie is: seek to position, set the
|
| 2333 |
+
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
|
| 2334 |
+
# into the decoder with need_eof as the EOF flag, then skip
|
| 2335 |
+
# chars_to_skip characters of the decoded result. For most simple
|
| 2336 |
+
# decoders, tell() will often just give a byte offset in the file.
|
| 2337 |
+
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
|
| 2338 |
+
(chars_to_skip<<192) | bool(need_eof)<<256)
|
| 2339 |
+
|
| 2340 |
+
def _unpack_cookie(self, bigint):
|
| 2341 |
+
rest, position = divmod(bigint, 1<<64)
|
| 2342 |
+
rest, dec_flags = divmod(rest, 1<<64)
|
| 2343 |
+
rest, bytes_to_feed = divmod(rest, 1<<64)
|
| 2344 |
+
need_eof, chars_to_skip = divmod(rest, 1<<64)
|
| 2345 |
+
return position, dec_flags, bytes_to_feed, bool(need_eof), chars_to_skip
|
| 2346 |
+
|
| 2347 |
+
def tell(self):
|
| 2348 |
+
if not self._seekable:
|
| 2349 |
+
raise UnsupportedOperation("underlying stream is not seekable")
|
| 2350 |
+
if not self._telling:
|
| 2351 |
+
raise OSError("telling position disabled by next() call")
|
| 2352 |
+
self.flush()
|
| 2353 |
+
position = self.buffer.tell()
|
| 2354 |
+
decoder = self._decoder
|
| 2355 |
+
if decoder is None or self._snapshot is None:
|
| 2356 |
+
if self._decoded_chars:
|
| 2357 |
+
# This should never happen.
|
| 2358 |
+
raise AssertionError("pending decoded text")
|
| 2359 |
+
return position
|
| 2360 |
+
|
| 2361 |
+
# Skip backward to the snapshot point (see _read_chunk).
|
| 2362 |
+
dec_flags, next_input = self._snapshot
|
| 2363 |
+
position -= len(next_input)
|
| 2364 |
+
|
| 2365 |
+
# How many decoded characters have been used up since the snapshot?
|
| 2366 |
+
chars_to_skip = self._decoded_chars_used
|
| 2367 |
+
if chars_to_skip == 0:
|
| 2368 |
+
# We haven't moved from the snapshot point.
|
| 2369 |
+
return self._pack_cookie(position, dec_flags)
|
| 2370 |
+
|
| 2371 |
+
# Starting from the snapshot position, we will walk the decoder
|
| 2372 |
+
# forward until it gives us enough decoded characters.
|
| 2373 |
+
saved_state = decoder.getstate()
|
| 2374 |
+
try:
|
| 2375 |
+
# Fast search for an acceptable start point, close to our
|
| 2376 |
+
# current pos.
|
| 2377 |
+
# Rationale: calling decoder.decode() has a large overhead
|
| 2378 |
+
# regardless of chunk size; we want the number of such calls to
|
| 2379 |
+
# be O(1) in most situations (common decoders, sensible input).
|
| 2380 |
+
# Actually, it will be exactly 1 for fixed-size codecs (all
|
| 2381 |
+
# 8-bit codecs, also UTF-16 and UTF-32).
|
| 2382 |
+
skip_bytes = int(self._b2cratio * chars_to_skip)
|
| 2383 |
+
skip_back = 1
|
| 2384 |
+
assert skip_bytes <= len(next_input)
|
| 2385 |
+
while skip_bytes > 0:
|
| 2386 |
+
decoder.setstate((b'', dec_flags))
|
| 2387 |
+
# Decode up to temptative start point
|
| 2388 |
+
n = len(decoder.decode(next_input[:skip_bytes]))
|
| 2389 |
+
if n <= chars_to_skip:
|
| 2390 |
+
b, d = decoder.getstate()
|
| 2391 |
+
if not b:
|
| 2392 |
+
# Before pos and no bytes buffered in decoder => OK
|
| 2393 |
+
dec_flags = d
|
| 2394 |
+
chars_to_skip -= n
|
| 2395 |
+
break
|
| 2396 |
+
# Skip back by buffered amount and reset heuristic
|
| 2397 |
+
skip_bytes -= len(b)
|
| 2398 |
+
skip_back = 1
|
| 2399 |
+
else:
|
| 2400 |
+
# We're too far ahead, skip back a bit
|
| 2401 |
+
skip_bytes -= skip_back
|
| 2402 |
+
skip_back = skip_back * 2
|
| 2403 |
+
else:
|
| 2404 |
+
skip_bytes = 0
|
| 2405 |
+
decoder.setstate((b'', dec_flags))
|
| 2406 |
+
|
| 2407 |
+
# Note our initial start point.
|
| 2408 |
+
start_pos = position + skip_bytes
|
| 2409 |
+
start_flags = dec_flags
|
| 2410 |
+
if chars_to_skip == 0:
|
| 2411 |
+
# We haven't moved from the start point.
|
| 2412 |
+
return self._pack_cookie(start_pos, start_flags)
|
| 2413 |
+
|
| 2414 |
+
# Feed the decoder one byte at a time. As we go, note the
|
| 2415 |
+
# nearest "safe start point" before the current location
|
| 2416 |
+
# (a point where the decoder has nothing buffered, so seek()
|
| 2417 |
+
# can safely start from there and advance to this location).
|
| 2418 |
+
bytes_fed = 0
|
| 2419 |
+
need_eof = False
|
| 2420 |
+
# Chars decoded since `start_pos`
|
| 2421 |
+
chars_decoded = 0
|
| 2422 |
+
for i in range(skip_bytes, len(next_input)):
|
| 2423 |
+
bytes_fed += 1
|
| 2424 |
+
chars_decoded += len(decoder.decode(next_input[i:i+1]))
|
| 2425 |
+
dec_buffer, dec_flags = decoder.getstate()
|
| 2426 |
+
if not dec_buffer and chars_decoded <= chars_to_skip:
|
| 2427 |
+
# Decoder buffer is empty, so this is a safe start point.
|
| 2428 |
+
start_pos += bytes_fed
|
| 2429 |
+
chars_to_skip -= chars_decoded
|
| 2430 |
+
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
|
| 2431 |
+
if chars_decoded >= chars_to_skip:
|
| 2432 |
+
break
|
| 2433 |
+
else:
|
| 2434 |
+
# We didn't get enough decoded data; signal EOF to get more.
|
| 2435 |
+
chars_decoded += len(decoder.decode(b'', final=True))
|
| 2436 |
+
need_eof = True
|
| 2437 |
+
if chars_decoded < chars_to_skip:
|
| 2438 |
+
raise OSError("can't reconstruct logical file position")
|
| 2439 |
+
|
| 2440 |
+
# The returned cookie corresponds to the last safe start point.
|
| 2441 |
+
return self._pack_cookie(
|
| 2442 |
+
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
|
| 2443 |
+
finally:
|
| 2444 |
+
decoder.setstate(saved_state)
|
| 2445 |
+
|
| 2446 |
+
def truncate(self, pos=None):
|
| 2447 |
+
self.flush()
|
| 2448 |
+
if pos is None:
|
| 2449 |
+
pos = self.tell()
|
| 2450 |
+
return self.buffer.truncate(pos)
|
| 2451 |
+
|
| 2452 |
+
def detach(self):
|
| 2453 |
+
if self.buffer is None:
|
| 2454 |
+
raise ValueError("buffer is already detached")
|
| 2455 |
+
self.flush()
|
| 2456 |
+
buffer = self._buffer
|
| 2457 |
+
self._buffer = None
|
| 2458 |
+
return buffer
|
| 2459 |
+
|
| 2460 |
+
def seek(self, cookie, whence=0):
|
| 2461 |
+
def _reset_encoder(position):
|
| 2462 |
+
"""Reset the encoder (merely useful for proper BOM handling)"""
|
| 2463 |
+
try:
|
| 2464 |
+
encoder = self._encoder or self._get_encoder()
|
| 2465 |
+
except LookupError:
|
| 2466 |
+
# Sometimes the encoder doesn't exist
|
| 2467 |
+
pass
|
| 2468 |
+
else:
|
| 2469 |
+
if position != 0:
|
| 2470 |
+
encoder.setstate(0)
|
| 2471 |
+
else:
|
| 2472 |
+
encoder.reset()
|
| 2473 |
+
|
| 2474 |
+
if self.closed:
|
| 2475 |
+
raise ValueError("tell on closed file")
|
| 2476 |
+
if not self._seekable:
|
| 2477 |
+
raise UnsupportedOperation("underlying stream is not seekable")
|
| 2478 |
+
if whence == SEEK_CUR:
|
| 2479 |
+
if cookie != 0:
|
| 2480 |
+
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
|
| 2481 |
+
# Seeking to the current position should attempt to
|
| 2482 |
+
# sync the underlying buffer with the current position.
|
| 2483 |
+
whence = 0
|
| 2484 |
+
cookie = self.tell()
|
| 2485 |
+
elif whence == SEEK_END:
|
| 2486 |
+
if cookie != 0:
|
| 2487 |
+
raise UnsupportedOperation("can't do nonzero end-relative seeks")
|
| 2488 |
+
self.flush()
|
| 2489 |
+
position = self.buffer.seek(0, whence)
|
| 2490 |
+
self._set_decoded_chars('')
|
| 2491 |
+
self._snapshot = None
|
| 2492 |
+
if self._decoder:
|
| 2493 |
+
self._decoder.reset()
|
| 2494 |
+
_reset_encoder(position)
|
| 2495 |
+
return position
|
| 2496 |
+
if whence != 0:
|
| 2497 |
+
raise ValueError("unsupported whence (%r)" % (whence,))
|
| 2498 |
+
if cookie < 0:
|
| 2499 |
+
raise ValueError("negative seek position %r" % (cookie,))
|
| 2500 |
+
self.flush()
|
| 2501 |
+
|
| 2502 |
+
# The strategy of seek() is to go back to the safe start point
|
| 2503 |
+
# and replay the effect of read(chars_to_skip) from there.
|
| 2504 |
+
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
|
| 2505 |
+
self._unpack_cookie(cookie)
|
| 2506 |
+
|
| 2507 |
+
# Seek back to the safe start point.
|
| 2508 |
+
self.buffer.seek(start_pos)
|
| 2509 |
+
self._set_decoded_chars('')
|
| 2510 |
+
self._snapshot = None
|
| 2511 |
+
|
| 2512 |
+
# Restore the decoder to its state from the safe start point.
|
| 2513 |
+
if cookie == 0 and self._decoder:
|
| 2514 |
+
self._decoder.reset()
|
| 2515 |
+
elif self._decoder or dec_flags or chars_to_skip:
|
| 2516 |
+
self._decoder = self._decoder or self._get_decoder()
|
| 2517 |
+
self._decoder.setstate((b'', dec_flags))
|
| 2518 |
+
self._snapshot = (dec_flags, b'')
|
| 2519 |
+
|
| 2520 |
+
if chars_to_skip:
|
| 2521 |
+
# Just like _read_chunk, feed the decoder and save a snapshot.
|
| 2522 |
+
input_chunk = self.buffer.read(bytes_to_feed)
|
| 2523 |
+
self._set_decoded_chars(
|
| 2524 |
+
self._decoder.decode(input_chunk, need_eof))
|
| 2525 |
+
self._snapshot = (dec_flags, input_chunk)
|
| 2526 |
+
|
| 2527 |
+
# Skip chars_to_skip of the decoded characters.
|
| 2528 |
+
if len(self._decoded_chars) < chars_to_skip:
|
| 2529 |
+
raise OSError("can't restore logical file position")
|
| 2530 |
+
self._decoded_chars_used = chars_to_skip
|
| 2531 |
+
|
| 2532 |
+
_reset_encoder(cookie)
|
| 2533 |
+
return cookie
|
| 2534 |
+
|
| 2535 |
+
def read(self, size=None):
|
| 2536 |
+
self._checkReadable()
|
| 2537 |
+
if size is None:
|
| 2538 |
+
size = -1
|
| 2539 |
+
else:
|
| 2540 |
+
try:
|
| 2541 |
+
size_index = size.__index__
|
| 2542 |
+
except AttributeError:
|
| 2543 |
+
raise TypeError(f"{size!r} is not an integer")
|
| 2544 |
+
else:
|
| 2545 |
+
size = size_index()
|
| 2546 |
+
decoder = self._decoder or self._get_decoder()
|
| 2547 |
+
if size < 0:
|
| 2548 |
+
# Read everything.
|
| 2549 |
+
result = (self._get_decoded_chars() +
|
| 2550 |
+
decoder.decode(self.buffer.read(), final=True))
|
| 2551 |
+
self._set_decoded_chars('')
|
| 2552 |
+
self._snapshot = None
|
| 2553 |
+
return result
|
| 2554 |
+
else:
|
| 2555 |
+
# Keep reading chunks until we have size characters to return.
|
| 2556 |
+
eof = False
|
| 2557 |
+
result = self._get_decoded_chars(size)
|
| 2558 |
+
while len(result) < size and not eof:
|
| 2559 |
+
eof = not self._read_chunk()
|
| 2560 |
+
result += self._get_decoded_chars(size - len(result))
|
| 2561 |
+
return result
|
| 2562 |
+
|
| 2563 |
+
def __next__(self):
|
| 2564 |
+
self._telling = False
|
| 2565 |
+
line = self.readline()
|
| 2566 |
+
if not line:
|
| 2567 |
+
self._snapshot = None
|
| 2568 |
+
self._telling = self._seekable
|
| 2569 |
+
raise StopIteration
|
| 2570 |
+
return line
|
| 2571 |
+
|
| 2572 |
+
def readline(self, size=None):
|
| 2573 |
+
if self.closed:
|
| 2574 |
+
raise ValueError("read from closed file")
|
| 2575 |
+
if size is None:
|
| 2576 |
+
size = -1
|
| 2577 |
+
else:
|
| 2578 |
+
try:
|
| 2579 |
+
size_index = size.__index__
|
| 2580 |
+
except AttributeError:
|
| 2581 |
+
raise TypeError(f"{size!r} is not an integer")
|
| 2582 |
+
else:
|
| 2583 |
+
size = size_index()
|
| 2584 |
+
|
| 2585 |
+
# Grab all the decoded text (we will rewind any extra bits later).
|
| 2586 |
+
line = self._get_decoded_chars()
|
| 2587 |
+
|
| 2588 |
+
start = 0
|
| 2589 |
+
# Make the decoder if it doesn't already exist.
|
| 2590 |
+
if not self._decoder:
|
| 2591 |
+
self._get_decoder()
|
| 2592 |
+
|
| 2593 |
+
pos = endpos = None
|
| 2594 |
+
while True:
|
| 2595 |
+
if self._readtranslate:
|
| 2596 |
+
# Newlines are already translated, only search for \n
|
| 2597 |
+
pos = line.find('\n', start)
|
| 2598 |
+
if pos >= 0:
|
| 2599 |
+
endpos = pos + 1
|
| 2600 |
+
break
|
| 2601 |
+
else:
|
| 2602 |
+
start = len(line)
|
| 2603 |
+
|
| 2604 |
+
elif self._readuniversal:
|
| 2605 |
+
# Universal newline search. Find any of \r, \r\n, \n
|
| 2606 |
+
# The decoder ensures that \r\n are not split in two pieces
|
| 2607 |
+
|
| 2608 |
+
# In C we'd look for these in parallel of course.
|
| 2609 |
+
nlpos = line.find("\n", start)
|
| 2610 |
+
crpos = line.find("\r", start)
|
| 2611 |
+
if crpos == -1:
|
| 2612 |
+
if nlpos == -1:
|
| 2613 |
+
# Nothing found
|
| 2614 |
+
start = len(line)
|
| 2615 |
+
else:
|
| 2616 |
+
# Found \n
|
| 2617 |
+
endpos = nlpos + 1
|
| 2618 |
+
break
|
| 2619 |
+
elif nlpos == -1:
|
| 2620 |
+
# Found lone \r
|
| 2621 |
+
endpos = crpos + 1
|
| 2622 |
+
break
|
| 2623 |
+
elif nlpos < crpos:
|
| 2624 |
+
# Found \n
|
| 2625 |
+
endpos = nlpos + 1
|
| 2626 |
+
break
|
| 2627 |
+
elif nlpos == crpos + 1:
|
| 2628 |
+
# Found \r\n
|
| 2629 |
+
endpos = crpos + 2
|
| 2630 |
+
break
|
| 2631 |
+
else:
|
| 2632 |
+
# Found \r
|
| 2633 |
+
endpos = crpos + 1
|
| 2634 |
+
break
|
| 2635 |
+
else:
|
| 2636 |
+
# non-universal
|
| 2637 |
+
pos = line.find(self._readnl)
|
| 2638 |
+
if pos >= 0:
|
| 2639 |
+
endpos = pos + len(self._readnl)
|
| 2640 |
+
break
|
| 2641 |
+
|
| 2642 |
+
if size >= 0 and len(line) >= size:
|
| 2643 |
+
endpos = size # reached length size
|
| 2644 |
+
break
|
| 2645 |
+
|
| 2646 |
+
# No line ending seen yet - get more data'
|
| 2647 |
+
while self._read_chunk():
|
| 2648 |
+
if self._decoded_chars:
|
| 2649 |
+
break
|
| 2650 |
+
if self._decoded_chars:
|
| 2651 |
+
line += self._get_decoded_chars()
|
| 2652 |
+
else:
|
| 2653 |
+
# end of file
|
| 2654 |
+
self._set_decoded_chars('')
|
| 2655 |
+
self._snapshot = None
|
| 2656 |
+
return line
|
| 2657 |
+
|
| 2658 |
+
if size >= 0 and endpos > size:
|
| 2659 |
+
endpos = size # don't exceed size
|
| 2660 |
+
|
| 2661 |
+
# Rewind _decoded_chars to just after the line ending we found.
|
| 2662 |
+
self._rewind_decoded_chars(len(line) - endpos)
|
| 2663 |
+
return line[:endpos]
|
| 2664 |
+
|
| 2665 |
+
@property
|
| 2666 |
+
def newlines(self):
|
| 2667 |
+
return self._decoder.newlines if self._decoder else None
|
| 2668 |
+
|
| 2669 |
+
|
| 2670 |
+
class StringIO(TextIOWrapper):
|
| 2671 |
+
"""Text I/O implementation using an in-memory buffer.
|
| 2672 |
+
|
| 2673 |
+
The initial_value argument sets the value of object. The newline
|
| 2674 |
+
argument is like the one of TextIOWrapper's constructor.
|
| 2675 |
+
"""
|
| 2676 |
+
|
| 2677 |
+
def __init__(self, initial_value="", newline="\n"):
|
| 2678 |
+
super(StringIO, self).__init__(BytesIO(),
|
| 2679 |
+
encoding="utf-8",
|
| 2680 |
+
errors="surrogatepass",
|
| 2681 |
+
newline=newline)
|
| 2682 |
+
# Issue #5645: make universal newlines semantics the same as in the
|
| 2683 |
+
# C version, even under Windows.
|
| 2684 |
+
if newline is None:
|
| 2685 |
+
self._writetranslate = False
|
| 2686 |
+
if initial_value is not None:
|
| 2687 |
+
if not isinstance(initial_value, str):
|
| 2688 |
+
raise TypeError("initial_value must be str or None, not {0}"
|
| 2689 |
+
.format(type(initial_value).__name__))
|
| 2690 |
+
self.write(initial_value)
|
| 2691 |
+
self.seek(0)
|
| 2692 |
+
|
| 2693 |
+
def getvalue(self):
|
| 2694 |
+
self.flush()
|
| 2695 |
+
decoder = self._decoder or self._get_decoder()
|
| 2696 |
+
old_state = decoder.getstate()
|
| 2697 |
+
decoder.reset()
|
| 2698 |
+
try:
|
| 2699 |
+
return decoder.decode(self.buffer.getvalue(), final=True)
|
| 2700 |
+
finally:
|
| 2701 |
+
decoder.setstate(old_state)
|
| 2702 |
+
|
| 2703 |
+
def __repr__(self):
|
| 2704 |
+
# TextIOWrapper tells the encoding in its repr. In StringIO,
|
| 2705 |
+
# that's an implementation detail.
|
| 2706 |
+
return object.__repr__(self)
|
| 2707 |
+
|
| 2708 |
+
@property
|
| 2709 |
+
def errors(self):
|
| 2710 |
+
return None
|
| 2711 |
+
|
| 2712 |
+
@property
|
| 2713 |
+
def encoding(self):
|
| 2714 |
+
return None
|
| 2715 |
+
|
| 2716 |
+
def detach(self):
|
| 2717 |
+
# This doesn't make sense on StringIO.
|
| 2718 |
+
self._unsupported("detach")
|
parrot/lib/python3.10/_weakrefset.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Access WeakSet through the weakref module.
|
| 2 |
+
# This code is separated-out because it is needed
|
| 3 |
+
# by abc.py to load everything else at startup.
|
| 4 |
+
|
| 5 |
+
from _weakref import ref
|
| 6 |
+
from types import GenericAlias
|
| 7 |
+
|
| 8 |
+
__all__ = ['WeakSet']
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class _IterationGuard:
|
| 12 |
+
# This context manager registers itself in the current iterators of the
|
| 13 |
+
# weak container, such as to delay all removals until the context manager
|
| 14 |
+
# exits.
|
| 15 |
+
# This technique should be relatively thread-safe (since sets are).
|
| 16 |
+
|
| 17 |
+
def __init__(self, weakcontainer):
|
| 18 |
+
# Don't create cycles
|
| 19 |
+
self.weakcontainer = ref(weakcontainer)
|
| 20 |
+
|
| 21 |
+
def __enter__(self):
|
| 22 |
+
w = self.weakcontainer()
|
| 23 |
+
if w is not None:
|
| 24 |
+
w._iterating.add(self)
|
| 25 |
+
return self
|
| 26 |
+
|
| 27 |
+
def __exit__(self, e, t, b):
|
| 28 |
+
w = self.weakcontainer()
|
| 29 |
+
if w is not None:
|
| 30 |
+
s = w._iterating
|
| 31 |
+
s.remove(self)
|
| 32 |
+
if not s:
|
| 33 |
+
w._commit_removals()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class WeakSet:
|
| 37 |
+
def __init__(self, data=None):
|
| 38 |
+
self.data = set()
|
| 39 |
+
def _remove(item, selfref=ref(self)):
|
| 40 |
+
self = selfref()
|
| 41 |
+
if self is not None:
|
| 42 |
+
if self._iterating:
|
| 43 |
+
self._pending_removals.append(item)
|
| 44 |
+
else:
|
| 45 |
+
self.data.discard(item)
|
| 46 |
+
self._remove = _remove
|
| 47 |
+
# A list of keys to be removed
|
| 48 |
+
self._pending_removals = []
|
| 49 |
+
self._iterating = set()
|
| 50 |
+
if data is not None:
|
| 51 |
+
self.update(data)
|
| 52 |
+
|
| 53 |
+
def _commit_removals(self):
|
| 54 |
+
pop = self._pending_removals.pop
|
| 55 |
+
discard = self.data.discard
|
| 56 |
+
while True:
|
| 57 |
+
try:
|
| 58 |
+
item = pop()
|
| 59 |
+
except IndexError:
|
| 60 |
+
return
|
| 61 |
+
discard(item)
|
| 62 |
+
|
| 63 |
+
def __iter__(self):
|
| 64 |
+
with _IterationGuard(self):
|
| 65 |
+
for itemref in self.data:
|
| 66 |
+
item = itemref()
|
| 67 |
+
if item is not None:
|
| 68 |
+
# Caveat: the iterator will keep a strong reference to
|
| 69 |
+
# `item` until it is resumed or closed.
|
| 70 |
+
yield item
|
| 71 |
+
|
| 72 |
+
def __len__(self):
|
| 73 |
+
return len(self.data) - len(self._pending_removals)
|
| 74 |
+
|
| 75 |
+
def __contains__(self, item):
|
| 76 |
+
try:
|
| 77 |
+
wr = ref(item)
|
| 78 |
+
except TypeError:
|
| 79 |
+
return False
|
| 80 |
+
return wr in self.data
|
| 81 |
+
|
| 82 |
+
def __reduce__(self):
|
| 83 |
+
return (self.__class__, (list(self),),
|
| 84 |
+
getattr(self, '__dict__', None))
|
| 85 |
+
|
| 86 |
+
def add(self, item):
|
| 87 |
+
if self._pending_removals:
|
| 88 |
+
self._commit_removals()
|
| 89 |
+
self.data.add(ref(item, self._remove))
|
| 90 |
+
|
| 91 |
+
def clear(self):
|
| 92 |
+
if self._pending_removals:
|
| 93 |
+
self._commit_removals()
|
| 94 |
+
self.data.clear()
|
| 95 |
+
|
| 96 |
+
def copy(self):
|
| 97 |
+
return self.__class__(self)
|
| 98 |
+
|
| 99 |
+
def pop(self):
|
| 100 |
+
if self._pending_removals:
|
| 101 |
+
self._commit_removals()
|
| 102 |
+
while True:
|
| 103 |
+
try:
|
| 104 |
+
itemref = self.data.pop()
|
| 105 |
+
except KeyError:
|
| 106 |
+
raise KeyError('pop from empty WeakSet') from None
|
| 107 |
+
item = itemref()
|
| 108 |
+
if item is not None:
|
| 109 |
+
return item
|
| 110 |
+
|
| 111 |
+
def remove(self, item):
|
| 112 |
+
if self._pending_removals:
|
| 113 |
+
self._commit_removals()
|
| 114 |
+
self.data.remove(ref(item))
|
| 115 |
+
|
| 116 |
+
def discard(self, item):
|
| 117 |
+
if self._pending_removals:
|
| 118 |
+
self._commit_removals()
|
| 119 |
+
self.data.discard(ref(item))
|
| 120 |
+
|
| 121 |
+
def update(self, other):
|
| 122 |
+
if self._pending_removals:
|
| 123 |
+
self._commit_removals()
|
| 124 |
+
for element in other:
|
| 125 |
+
self.add(element)
|
| 126 |
+
|
| 127 |
+
def __ior__(self, other):
|
| 128 |
+
self.update(other)
|
| 129 |
+
return self
|
| 130 |
+
|
| 131 |
+
def difference(self, other):
|
| 132 |
+
newset = self.copy()
|
| 133 |
+
newset.difference_update(other)
|
| 134 |
+
return newset
|
| 135 |
+
__sub__ = difference
|
| 136 |
+
|
| 137 |
+
def difference_update(self, other):
|
| 138 |
+
self.__isub__(other)
|
| 139 |
+
def __isub__(self, other):
|
| 140 |
+
if self._pending_removals:
|
| 141 |
+
self._commit_removals()
|
| 142 |
+
if self is other:
|
| 143 |
+
self.data.clear()
|
| 144 |
+
else:
|
| 145 |
+
self.data.difference_update(ref(item) for item in other)
|
| 146 |
+
return self
|
| 147 |
+
|
| 148 |
+
def intersection(self, other):
|
| 149 |
+
return self.__class__(item for item in other if item in self)
|
| 150 |
+
__and__ = intersection
|
| 151 |
+
|
| 152 |
+
def intersection_update(self, other):
|
| 153 |
+
self.__iand__(other)
|
| 154 |
+
def __iand__(self, other):
|
| 155 |
+
if self._pending_removals:
|
| 156 |
+
self._commit_removals()
|
| 157 |
+
self.data.intersection_update(ref(item) for item in other)
|
| 158 |
+
return self
|
| 159 |
+
|
| 160 |
+
def issubset(self, other):
|
| 161 |
+
return self.data.issubset(ref(item) for item in other)
|
| 162 |
+
__le__ = issubset
|
| 163 |
+
|
| 164 |
+
def __lt__(self, other):
|
| 165 |
+
return self.data < set(map(ref, other))
|
| 166 |
+
|
| 167 |
+
def issuperset(self, other):
|
| 168 |
+
return self.data.issuperset(ref(item) for item in other)
|
| 169 |
+
__ge__ = issuperset
|
| 170 |
+
|
| 171 |
+
def __gt__(self, other):
|
| 172 |
+
return self.data > set(map(ref, other))
|
| 173 |
+
|
| 174 |
+
def __eq__(self, other):
|
| 175 |
+
if not isinstance(other, self.__class__):
|
| 176 |
+
return NotImplemented
|
| 177 |
+
return self.data == set(map(ref, other))
|
| 178 |
+
|
| 179 |
+
def symmetric_difference(self, other):
|
| 180 |
+
newset = self.copy()
|
| 181 |
+
newset.symmetric_difference_update(other)
|
| 182 |
+
return newset
|
| 183 |
+
__xor__ = symmetric_difference
|
| 184 |
+
|
| 185 |
+
def symmetric_difference_update(self, other):
|
| 186 |
+
self.__ixor__(other)
|
| 187 |
+
def __ixor__(self, other):
|
| 188 |
+
if self._pending_removals:
|
| 189 |
+
self._commit_removals()
|
| 190 |
+
if self is other:
|
| 191 |
+
self.data.clear()
|
| 192 |
+
else:
|
| 193 |
+
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
|
| 194 |
+
return self
|
| 195 |
+
|
| 196 |
+
def union(self, other):
|
| 197 |
+
return self.__class__(e for s in (self, other) for e in s)
|
| 198 |
+
__or__ = union
|
| 199 |
+
|
| 200 |
+
def isdisjoint(self, other):
|
| 201 |
+
return len(self.intersection(other)) == 0
|
| 202 |
+
|
| 203 |
+
def __repr__(self):
|
| 204 |
+
return repr(self.data)
|
| 205 |
+
|
| 206 |
+
__class_getitem__ = classmethod(GenericAlias)
|
parrot/lib/python3.10/asyncore.py
ADDED
|
@@ -0,0 +1,649 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- Mode: Python -*-
|
| 2 |
+
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
|
| 3 |
+
# Author: Sam Rushing <rushing@nightmare.com>
|
| 4 |
+
|
| 5 |
+
# ======================================================================
|
| 6 |
+
# Copyright 1996 by Sam Rushing
|
| 7 |
+
#
|
| 8 |
+
# All Rights Reserved
|
| 9 |
+
#
|
| 10 |
+
# Permission to use, copy, modify, and distribute this software and
|
| 11 |
+
# its documentation for any purpose and without fee is hereby
|
| 12 |
+
# granted, provided that the above copyright notice appear in all
|
| 13 |
+
# copies and that both that copyright notice and this permission
|
| 14 |
+
# notice appear in supporting documentation, and that the name of Sam
|
| 15 |
+
# Rushing not be used in advertising or publicity pertaining to
|
| 16 |
+
# distribution of the software without specific, written prior
|
| 17 |
+
# permission.
|
| 18 |
+
#
|
| 19 |
+
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
| 20 |
+
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
| 21 |
+
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
| 22 |
+
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 23 |
+
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
| 24 |
+
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
| 25 |
+
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 26 |
+
# ======================================================================
|
| 27 |
+
|
| 28 |
+
"""Basic infrastructure for asynchronous socket service clients and servers.
|
| 29 |
+
|
| 30 |
+
There are only two ways to have a program on a single processor do "more
|
| 31 |
+
than one thing at a time". Multi-threaded programming is the simplest and
|
| 32 |
+
most popular way to do it, but there is another very different technique,
|
| 33 |
+
that lets you have nearly all the advantages of multi-threading, without
|
| 34 |
+
actually using multiple threads. it's really only practical if your program
|
| 35 |
+
is largely I/O bound. If your program is CPU bound, then pre-emptive
|
| 36 |
+
scheduled threads are probably what you really need. Network servers are
|
| 37 |
+
rarely CPU-bound, however.
|
| 38 |
+
|
| 39 |
+
If your operating system supports the select() system call in its I/O
|
| 40 |
+
library (and nearly all do), then you can use it to juggle multiple
|
| 41 |
+
communication channels at once; doing other work while your I/O is taking
|
| 42 |
+
place in the "background." Although this strategy can seem strange and
|
| 43 |
+
complex, especially at first, it is in many ways easier to understand and
|
| 44 |
+
control than multi-threaded programming. The module documented here solves
|
| 45 |
+
many of the difficult problems for you, making the task of building
|
| 46 |
+
sophisticated high-performance network servers and clients a snap.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
import select
|
| 50 |
+
import socket
|
| 51 |
+
import sys
|
| 52 |
+
import time
|
| 53 |
+
import warnings
|
| 54 |
+
|
| 55 |
+
import os
|
| 56 |
+
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
|
| 57 |
+
ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
|
| 58 |
+
errorcode
|
| 59 |
+
|
| 60 |
+
warnings.warn(
|
| 61 |
+
'The asyncore module is deprecated and will be removed in Python 3.12. '
|
| 62 |
+
'The recommended replacement is asyncio',
|
| 63 |
+
DeprecationWarning,
|
| 64 |
+
stacklevel=2)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
|
| 68 |
+
EBADF})
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
socket_map
|
| 72 |
+
except NameError:
|
| 73 |
+
socket_map = {}
|
| 74 |
+
|
| 75 |
+
def _strerror(err):
|
| 76 |
+
try:
|
| 77 |
+
return os.strerror(err)
|
| 78 |
+
except (ValueError, OverflowError, NameError):
|
| 79 |
+
if err in errorcode:
|
| 80 |
+
return errorcode[err]
|
| 81 |
+
return "Unknown error %s" %err
|
| 82 |
+
|
| 83 |
+
class ExitNow(Exception):
|
| 84 |
+
pass
|
| 85 |
+
|
| 86 |
+
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
|
| 87 |
+
|
| 88 |
+
def read(obj):
|
| 89 |
+
try:
|
| 90 |
+
obj.handle_read_event()
|
| 91 |
+
except _reraised_exceptions:
|
| 92 |
+
raise
|
| 93 |
+
except:
|
| 94 |
+
obj.handle_error()
|
| 95 |
+
|
| 96 |
+
def write(obj):
|
| 97 |
+
try:
|
| 98 |
+
obj.handle_write_event()
|
| 99 |
+
except _reraised_exceptions:
|
| 100 |
+
raise
|
| 101 |
+
except:
|
| 102 |
+
obj.handle_error()
|
| 103 |
+
|
| 104 |
+
def _exception(obj):
|
| 105 |
+
try:
|
| 106 |
+
obj.handle_expt_event()
|
| 107 |
+
except _reraised_exceptions:
|
| 108 |
+
raise
|
| 109 |
+
except:
|
| 110 |
+
obj.handle_error()
|
| 111 |
+
|
| 112 |
+
def readwrite(obj, flags):
|
| 113 |
+
try:
|
| 114 |
+
if flags & select.POLLIN:
|
| 115 |
+
obj.handle_read_event()
|
| 116 |
+
if flags & select.POLLOUT:
|
| 117 |
+
obj.handle_write_event()
|
| 118 |
+
if flags & select.POLLPRI:
|
| 119 |
+
obj.handle_expt_event()
|
| 120 |
+
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
|
| 121 |
+
obj.handle_close()
|
| 122 |
+
except OSError as e:
|
| 123 |
+
if e.errno not in _DISCONNECTED:
|
| 124 |
+
obj.handle_error()
|
| 125 |
+
else:
|
| 126 |
+
obj.handle_close()
|
| 127 |
+
except _reraised_exceptions:
|
| 128 |
+
raise
|
| 129 |
+
except:
|
| 130 |
+
obj.handle_error()
|
| 131 |
+
|
| 132 |
+
def poll(timeout=0.0, map=None):
|
| 133 |
+
if map is None:
|
| 134 |
+
map = socket_map
|
| 135 |
+
if map:
|
| 136 |
+
r = []; w = []; e = []
|
| 137 |
+
for fd, obj in list(map.items()):
|
| 138 |
+
is_r = obj.readable()
|
| 139 |
+
is_w = obj.writable()
|
| 140 |
+
if is_r:
|
| 141 |
+
r.append(fd)
|
| 142 |
+
# accepting sockets should not be writable
|
| 143 |
+
if is_w and not obj.accepting:
|
| 144 |
+
w.append(fd)
|
| 145 |
+
if is_r or is_w:
|
| 146 |
+
e.append(fd)
|
| 147 |
+
if [] == r == w == e:
|
| 148 |
+
time.sleep(timeout)
|
| 149 |
+
return
|
| 150 |
+
|
| 151 |
+
r, w, e = select.select(r, w, e, timeout)
|
| 152 |
+
|
| 153 |
+
for fd in r:
|
| 154 |
+
obj = map.get(fd)
|
| 155 |
+
if obj is None:
|
| 156 |
+
continue
|
| 157 |
+
read(obj)
|
| 158 |
+
|
| 159 |
+
for fd in w:
|
| 160 |
+
obj = map.get(fd)
|
| 161 |
+
if obj is None:
|
| 162 |
+
continue
|
| 163 |
+
write(obj)
|
| 164 |
+
|
| 165 |
+
for fd in e:
|
| 166 |
+
obj = map.get(fd)
|
| 167 |
+
if obj is None:
|
| 168 |
+
continue
|
| 169 |
+
_exception(obj)
|
| 170 |
+
|
| 171 |
+
def poll2(timeout=0.0, map=None):
|
| 172 |
+
# Use the poll() support added to the select module in Python 2.0
|
| 173 |
+
if map is None:
|
| 174 |
+
map = socket_map
|
| 175 |
+
if timeout is not None:
|
| 176 |
+
# timeout is in milliseconds
|
| 177 |
+
timeout = int(timeout*1000)
|
| 178 |
+
pollster = select.poll()
|
| 179 |
+
if map:
|
| 180 |
+
for fd, obj in list(map.items()):
|
| 181 |
+
flags = 0
|
| 182 |
+
if obj.readable():
|
| 183 |
+
flags |= select.POLLIN | select.POLLPRI
|
| 184 |
+
# accepting sockets should not be writable
|
| 185 |
+
if obj.writable() and not obj.accepting:
|
| 186 |
+
flags |= select.POLLOUT
|
| 187 |
+
if flags:
|
| 188 |
+
pollster.register(fd, flags)
|
| 189 |
+
|
| 190 |
+
r = pollster.poll(timeout)
|
| 191 |
+
for fd, flags in r:
|
| 192 |
+
obj = map.get(fd)
|
| 193 |
+
if obj is None:
|
| 194 |
+
continue
|
| 195 |
+
readwrite(obj, flags)
|
| 196 |
+
|
| 197 |
+
poll3 = poll2 # Alias for backward compatibility
|
| 198 |
+
|
| 199 |
+
def loop(timeout=30.0, use_poll=False, map=None, count=None):
|
| 200 |
+
if map is None:
|
| 201 |
+
map = socket_map
|
| 202 |
+
|
| 203 |
+
if use_poll and hasattr(select, 'poll'):
|
| 204 |
+
poll_fun = poll2
|
| 205 |
+
else:
|
| 206 |
+
poll_fun = poll
|
| 207 |
+
|
| 208 |
+
if count is None:
|
| 209 |
+
while map:
|
| 210 |
+
poll_fun(timeout, map)
|
| 211 |
+
|
| 212 |
+
else:
|
| 213 |
+
while map and count > 0:
|
| 214 |
+
poll_fun(timeout, map)
|
| 215 |
+
count = count - 1
|
| 216 |
+
|
| 217 |
+
class dispatcher:
|
| 218 |
+
|
| 219 |
+
debug = False
|
| 220 |
+
connected = False
|
| 221 |
+
accepting = False
|
| 222 |
+
connecting = False
|
| 223 |
+
closing = False
|
| 224 |
+
addr = None
|
| 225 |
+
ignore_log_types = frozenset({'warning'})
|
| 226 |
+
|
| 227 |
+
def __init__(self, sock=None, map=None):
|
| 228 |
+
if map is None:
|
| 229 |
+
self._map = socket_map
|
| 230 |
+
else:
|
| 231 |
+
self._map = map
|
| 232 |
+
|
| 233 |
+
self._fileno = None
|
| 234 |
+
|
| 235 |
+
if sock:
|
| 236 |
+
# Set to nonblocking just to make sure for cases where we
|
| 237 |
+
# get a socket from a blocking source.
|
| 238 |
+
sock.setblocking(False)
|
| 239 |
+
self.set_socket(sock, map)
|
| 240 |
+
self.connected = True
|
| 241 |
+
# The constructor no longer requires that the socket
|
| 242 |
+
# passed be connected.
|
| 243 |
+
try:
|
| 244 |
+
self.addr = sock.getpeername()
|
| 245 |
+
except OSError as err:
|
| 246 |
+
if err.errno in (ENOTCONN, EINVAL):
|
| 247 |
+
# To handle the case where we got an unconnected
|
| 248 |
+
# socket.
|
| 249 |
+
self.connected = False
|
| 250 |
+
else:
|
| 251 |
+
# The socket is broken in some unknown way, alert
|
| 252 |
+
# the user and remove it from the map (to prevent
|
| 253 |
+
# polling of broken sockets).
|
| 254 |
+
self.del_channel(map)
|
| 255 |
+
raise
|
| 256 |
+
else:
|
| 257 |
+
self.socket = None
|
| 258 |
+
|
| 259 |
+
def __repr__(self):
|
| 260 |
+
status = [self.__class__.__module__+"."+self.__class__.__qualname__]
|
| 261 |
+
if self.accepting and self.addr:
|
| 262 |
+
status.append('listening')
|
| 263 |
+
elif self.connected:
|
| 264 |
+
status.append('connected')
|
| 265 |
+
if self.addr is not None:
|
| 266 |
+
try:
|
| 267 |
+
status.append('%s:%d' % self.addr)
|
| 268 |
+
except TypeError:
|
| 269 |
+
status.append(repr(self.addr))
|
| 270 |
+
return '<%s at %#x>' % (' '.join(status), id(self))
|
| 271 |
+
|
| 272 |
+
def add_channel(self, map=None):
|
| 273 |
+
#self.log_info('adding channel %s' % self)
|
| 274 |
+
if map is None:
|
| 275 |
+
map = self._map
|
| 276 |
+
map[self._fileno] = self
|
| 277 |
+
|
| 278 |
+
def del_channel(self, map=None):
|
| 279 |
+
fd = self._fileno
|
| 280 |
+
if map is None:
|
| 281 |
+
map = self._map
|
| 282 |
+
if fd in map:
|
| 283 |
+
#self.log_info('closing channel %d:%s' % (fd, self))
|
| 284 |
+
del map[fd]
|
| 285 |
+
self._fileno = None
|
| 286 |
+
|
| 287 |
+
def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM):
|
| 288 |
+
self.family_and_type = family, type
|
| 289 |
+
sock = socket.socket(family, type)
|
| 290 |
+
sock.setblocking(False)
|
| 291 |
+
self.set_socket(sock)
|
| 292 |
+
|
| 293 |
+
def set_socket(self, sock, map=None):
|
| 294 |
+
self.socket = sock
|
| 295 |
+
self._fileno = sock.fileno()
|
| 296 |
+
self.add_channel(map)
|
| 297 |
+
|
| 298 |
+
def set_reuse_addr(self):
|
| 299 |
+
# try to re-use a server port if possible
|
| 300 |
+
try:
|
| 301 |
+
self.socket.setsockopt(
|
| 302 |
+
socket.SOL_SOCKET, socket.SO_REUSEADDR,
|
| 303 |
+
self.socket.getsockopt(socket.SOL_SOCKET,
|
| 304 |
+
socket.SO_REUSEADDR) | 1
|
| 305 |
+
)
|
| 306 |
+
except OSError:
|
| 307 |
+
pass
|
| 308 |
+
|
| 309 |
+
# ==================================================
|
| 310 |
+
# predicates for select()
|
| 311 |
+
# these are used as filters for the lists of sockets
|
| 312 |
+
# to pass to select().
|
| 313 |
+
# ==================================================
|
| 314 |
+
|
| 315 |
+
def readable(self):
|
| 316 |
+
return True
|
| 317 |
+
|
| 318 |
+
def writable(self):
|
| 319 |
+
return True
|
| 320 |
+
|
| 321 |
+
# ==================================================
|
| 322 |
+
# socket object methods.
|
| 323 |
+
# ==================================================
|
| 324 |
+
|
| 325 |
+
def listen(self, num):
|
| 326 |
+
self.accepting = True
|
| 327 |
+
if os.name == 'nt' and num > 5:
|
| 328 |
+
num = 5
|
| 329 |
+
return self.socket.listen(num)
|
| 330 |
+
|
| 331 |
+
def bind(self, addr):
|
| 332 |
+
self.addr = addr
|
| 333 |
+
return self.socket.bind(addr)
|
| 334 |
+
|
| 335 |
+
def connect(self, address):
|
| 336 |
+
self.connected = False
|
| 337 |
+
self.connecting = True
|
| 338 |
+
err = self.socket.connect_ex(address)
|
| 339 |
+
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
|
| 340 |
+
or err == EINVAL and os.name == 'nt':
|
| 341 |
+
self.addr = address
|
| 342 |
+
return
|
| 343 |
+
if err in (0, EISCONN):
|
| 344 |
+
self.addr = address
|
| 345 |
+
self.handle_connect_event()
|
| 346 |
+
else:
|
| 347 |
+
raise OSError(err, errorcode[err])
|
| 348 |
+
|
| 349 |
+
def accept(self):
|
| 350 |
+
# XXX can return either an address pair or None
|
| 351 |
+
try:
|
| 352 |
+
conn, addr = self.socket.accept()
|
| 353 |
+
except TypeError:
|
| 354 |
+
return None
|
| 355 |
+
except OSError as why:
|
| 356 |
+
if why.errno in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
|
| 357 |
+
return None
|
| 358 |
+
else:
|
| 359 |
+
raise
|
| 360 |
+
else:
|
| 361 |
+
return conn, addr
|
| 362 |
+
|
| 363 |
+
def send(self, data):
|
| 364 |
+
try:
|
| 365 |
+
result = self.socket.send(data)
|
| 366 |
+
return result
|
| 367 |
+
except OSError as why:
|
| 368 |
+
if why.errno == EWOULDBLOCK:
|
| 369 |
+
return 0
|
| 370 |
+
elif why.errno in _DISCONNECTED:
|
| 371 |
+
self.handle_close()
|
| 372 |
+
return 0
|
| 373 |
+
else:
|
| 374 |
+
raise
|
| 375 |
+
|
| 376 |
+
def recv(self, buffer_size):
|
| 377 |
+
try:
|
| 378 |
+
data = self.socket.recv(buffer_size)
|
| 379 |
+
if not data:
|
| 380 |
+
# a closed connection is indicated by signaling
|
| 381 |
+
# a read condition, and having recv() return 0.
|
| 382 |
+
self.handle_close()
|
| 383 |
+
return b''
|
| 384 |
+
else:
|
| 385 |
+
return data
|
| 386 |
+
except OSError as why:
|
| 387 |
+
# winsock sometimes raises ENOTCONN
|
| 388 |
+
if why.errno in _DISCONNECTED:
|
| 389 |
+
self.handle_close()
|
| 390 |
+
return b''
|
| 391 |
+
else:
|
| 392 |
+
raise
|
| 393 |
+
|
| 394 |
+
def close(self):
|
| 395 |
+
self.connected = False
|
| 396 |
+
self.accepting = False
|
| 397 |
+
self.connecting = False
|
| 398 |
+
self.del_channel()
|
| 399 |
+
if self.socket is not None:
|
| 400 |
+
try:
|
| 401 |
+
self.socket.close()
|
| 402 |
+
except OSError as why:
|
| 403 |
+
if why.errno not in (ENOTCONN, EBADF):
|
| 404 |
+
raise
|
| 405 |
+
|
| 406 |
+
# log and log_info may be overridden to provide more sophisticated
|
| 407 |
+
# logging and warning methods. In general, log is for 'hit' logging
|
| 408 |
+
# and 'log_info' is for informational, warning and error logging.
|
| 409 |
+
|
| 410 |
+
def log(self, message):
|
| 411 |
+
sys.stderr.write('log: %s\n' % str(message))
|
| 412 |
+
|
| 413 |
+
def log_info(self, message, type='info'):
|
| 414 |
+
if type not in self.ignore_log_types:
|
| 415 |
+
print('%s: %s' % (type, message))
|
| 416 |
+
|
| 417 |
+
def handle_read_event(self):
|
| 418 |
+
if self.accepting:
|
| 419 |
+
# accepting sockets are never connected, they "spawn" new
|
| 420 |
+
# sockets that are connected
|
| 421 |
+
self.handle_accept()
|
| 422 |
+
elif not self.connected:
|
| 423 |
+
if self.connecting:
|
| 424 |
+
self.handle_connect_event()
|
| 425 |
+
self.handle_read()
|
| 426 |
+
else:
|
| 427 |
+
self.handle_read()
|
| 428 |
+
|
| 429 |
+
def handle_connect_event(self):
|
| 430 |
+
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
| 431 |
+
if err != 0:
|
| 432 |
+
raise OSError(err, _strerror(err))
|
| 433 |
+
self.handle_connect()
|
| 434 |
+
self.connected = True
|
| 435 |
+
self.connecting = False
|
| 436 |
+
|
| 437 |
+
def handle_write_event(self):
|
| 438 |
+
if self.accepting:
|
| 439 |
+
# Accepting sockets shouldn't get a write event.
|
| 440 |
+
# We will pretend it didn't happen.
|
| 441 |
+
return
|
| 442 |
+
|
| 443 |
+
if not self.connected:
|
| 444 |
+
if self.connecting:
|
| 445 |
+
self.handle_connect_event()
|
| 446 |
+
self.handle_write()
|
| 447 |
+
|
| 448 |
+
def handle_expt_event(self):
|
| 449 |
+
# handle_expt_event() is called if there might be an error on the
|
| 450 |
+
# socket, or if there is OOB data
|
| 451 |
+
# check for the error condition first
|
| 452 |
+
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
| 453 |
+
if err != 0:
|
| 454 |
+
# we can get here when select.select() says that there is an
|
| 455 |
+
# exceptional condition on the socket
|
| 456 |
+
# since there is an error, we'll go ahead and close the socket
|
| 457 |
+
# like we would in a subclassed handle_read() that received no
|
| 458 |
+
# data
|
| 459 |
+
self.handle_close()
|
| 460 |
+
else:
|
| 461 |
+
self.handle_expt()
|
| 462 |
+
|
| 463 |
+
def handle_error(self):
|
| 464 |
+
nil, t, v, tbinfo = compact_traceback()
|
| 465 |
+
|
| 466 |
+
# sometimes a user repr method will crash.
|
| 467 |
+
try:
|
| 468 |
+
self_repr = repr(self)
|
| 469 |
+
except:
|
| 470 |
+
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
|
| 471 |
+
|
| 472 |
+
self.log_info(
|
| 473 |
+
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
|
| 474 |
+
self_repr,
|
| 475 |
+
t,
|
| 476 |
+
v,
|
| 477 |
+
tbinfo
|
| 478 |
+
),
|
| 479 |
+
'error'
|
| 480 |
+
)
|
| 481 |
+
self.handle_close()
|
| 482 |
+
|
| 483 |
+
def handle_expt(self):
|
| 484 |
+
self.log_info('unhandled incoming priority event', 'warning')
|
| 485 |
+
|
| 486 |
+
def handle_read(self):
|
| 487 |
+
self.log_info('unhandled read event', 'warning')
|
| 488 |
+
|
| 489 |
+
def handle_write(self):
|
| 490 |
+
self.log_info('unhandled write event', 'warning')
|
| 491 |
+
|
| 492 |
+
def handle_connect(self):
|
| 493 |
+
self.log_info('unhandled connect event', 'warning')
|
| 494 |
+
|
| 495 |
+
def handle_accept(self):
|
| 496 |
+
pair = self.accept()
|
| 497 |
+
if pair is not None:
|
| 498 |
+
self.handle_accepted(*pair)
|
| 499 |
+
|
| 500 |
+
def handle_accepted(self, sock, addr):
|
| 501 |
+
sock.close()
|
| 502 |
+
self.log_info('unhandled accepted event', 'warning')
|
| 503 |
+
|
| 504 |
+
def handle_close(self):
|
| 505 |
+
self.log_info('unhandled close event', 'warning')
|
| 506 |
+
self.close()
|
| 507 |
+
|
| 508 |
+
# ---------------------------------------------------------------------------
|
| 509 |
+
# adds simple buffered output capability, useful for simple clients.
|
| 510 |
+
# [for more sophisticated usage use asynchat.async_chat]
|
| 511 |
+
# ---------------------------------------------------------------------------
|
| 512 |
+
|
| 513 |
+
class dispatcher_with_send(dispatcher):
|
| 514 |
+
|
| 515 |
+
def __init__(self, sock=None, map=None):
|
| 516 |
+
dispatcher.__init__(self, sock, map)
|
| 517 |
+
self.out_buffer = b''
|
| 518 |
+
|
| 519 |
+
def initiate_send(self):
|
| 520 |
+
num_sent = 0
|
| 521 |
+
num_sent = dispatcher.send(self, self.out_buffer[:65536])
|
| 522 |
+
self.out_buffer = self.out_buffer[num_sent:]
|
| 523 |
+
|
| 524 |
+
def handle_write(self):
|
| 525 |
+
self.initiate_send()
|
| 526 |
+
|
| 527 |
+
def writable(self):
|
| 528 |
+
return (not self.connected) or len(self.out_buffer)
|
| 529 |
+
|
| 530 |
+
def send(self, data):
|
| 531 |
+
if self.debug:
|
| 532 |
+
self.log_info('sending %s' % repr(data))
|
| 533 |
+
self.out_buffer = self.out_buffer + data
|
| 534 |
+
self.initiate_send()
|
| 535 |
+
|
| 536 |
+
# ---------------------------------------------------------------------------
|
| 537 |
+
# used for debugging.
|
| 538 |
+
# ---------------------------------------------------------------------------
|
| 539 |
+
|
| 540 |
+
def compact_traceback():
|
| 541 |
+
t, v, tb = sys.exc_info()
|
| 542 |
+
tbinfo = []
|
| 543 |
+
if not tb: # Must have a traceback
|
| 544 |
+
raise AssertionError("traceback does not exist")
|
| 545 |
+
while tb:
|
| 546 |
+
tbinfo.append((
|
| 547 |
+
tb.tb_frame.f_code.co_filename,
|
| 548 |
+
tb.tb_frame.f_code.co_name,
|
| 549 |
+
str(tb.tb_lineno)
|
| 550 |
+
))
|
| 551 |
+
tb = tb.tb_next
|
| 552 |
+
|
| 553 |
+
# just to be safe
|
| 554 |
+
del tb
|
| 555 |
+
|
| 556 |
+
file, function, line = tbinfo[-1]
|
| 557 |
+
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
|
| 558 |
+
return (file, function, line), t, v, info
|
| 559 |
+
|
| 560 |
+
def close_all(map=None, ignore_all=False):
|
| 561 |
+
if map is None:
|
| 562 |
+
map = socket_map
|
| 563 |
+
for x in list(map.values()):
|
| 564 |
+
try:
|
| 565 |
+
x.close()
|
| 566 |
+
except OSError as x:
|
| 567 |
+
if x.errno == EBADF:
|
| 568 |
+
pass
|
| 569 |
+
elif not ignore_all:
|
| 570 |
+
raise
|
| 571 |
+
except _reraised_exceptions:
|
| 572 |
+
raise
|
| 573 |
+
except:
|
| 574 |
+
if not ignore_all:
|
| 575 |
+
raise
|
| 576 |
+
map.clear()
|
| 577 |
+
|
| 578 |
+
# Asynchronous File I/O:
|
| 579 |
+
#
|
| 580 |
+
# After a little research (reading man pages on various unixen, and
|
| 581 |
+
# digging through the linux kernel), I've determined that select()
|
| 582 |
+
# isn't meant for doing asynchronous file i/o.
|
| 583 |
+
# Heartening, though - reading linux/mm/filemap.c shows that linux
|
| 584 |
+
# supports asynchronous read-ahead. So _MOST_ of the time, the data
|
| 585 |
+
# will be sitting in memory for us already when we go to read it.
|
| 586 |
+
#
|
| 587 |
+
# What other OS's (besides NT) support async file i/o? [VMS?]
|
| 588 |
+
#
|
| 589 |
+
# Regardless, this is useful for pipes, and stdin/stdout...
|
| 590 |
+
|
| 591 |
+
if os.name == 'posix':
|
| 592 |
+
class file_wrapper:
|
| 593 |
+
# Here we override just enough to make a file
|
| 594 |
+
# look like a socket for the purposes of asyncore.
|
| 595 |
+
# The passed fd is automatically os.dup()'d
|
| 596 |
+
|
| 597 |
+
def __init__(self, fd):
|
| 598 |
+
self.fd = os.dup(fd)
|
| 599 |
+
|
| 600 |
+
def __del__(self):
|
| 601 |
+
if self.fd >= 0:
|
| 602 |
+
warnings.warn("unclosed file %r" % self, ResourceWarning,
|
| 603 |
+
source=self)
|
| 604 |
+
self.close()
|
| 605 |
+
|
| 606 |
+
def recv(self, *args):
|
| 607 |
+
return os.read(self.fd, *args)
|
| 608 |
+
|
| 609 |
+
def send(self, *args):
|
| 610 |
+
return os.write(self.fd, *args)
|
| 611 |
+
|
| 612 |
+
def getsockopt(self, level, optname, buflen=None):
|
| 613 |
+
if (level == socket.SOL_SOCKET and
|
| 614 |
+
optname == socket.SO_ERROR and
|
| 615 |
+
not buflen):
|
| 616 |
+
return 0
|
| 617 |
+
raise NotImplementedError("Only asyncore specific behaviour "
|
| 618 |
+
"implemented.")
|
| 619 |
+
|
| 620 |
+
read = recv
|
| 621 |
+
write = send
|
| 622 |
+
|
| 623 |
+
def close(self):
|
| 624 |
+
if self.fd < 0:
|
| 625 |
+
return
|
| 626 |
+
fd = self.fd
|
| 627 |
+
self.fd = -1
|
| 628 |
+
os.close(fd)
|
| 629 |
+
|
| 630 |
+
def fileno(self):
|
| 631 |
+
return self.fd
|
| 632 |
+
|
| 633 |
+
class file_dispatcher(dispatcher):
|
| 634 |
+
|
| 635 |
+
def __init__(self, fd, map=None):
|
| 636 |
+
dispatcher.__init__(self, None, map)
|
| 637 |
+
self.connected = True
|
| 638 |
+
try:
|
| 639 |
+
fd = fd.fileno()
|
| 640 |
+
except AttributeError:
|
| 641 |
+
pass
|
| 642 |
+
self.set_file(fd)
|
| 643 |
+
# set it to non-blocking mode
|
| 644 |
+
os.set_blocking(fd, False)
|
| 645 |
+
|
| 646 |
+
def set_file(self, fd):
|
| 647 |
+
self.socket = file_wrapper(fd)
|
| 648 |
+
self._fileno = self.socket.fileno()
|
| 649 |
+
self.add_channel()
|
parrot/lib/python3.10/calendar.py
ADDED
|
@@ -0,0 +1,759 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Calendar printing functions
|
| 2 |
+
|
| 3 |
+
Note when comparing these calendars to the ones printed by cal(1): By
|
| 4 |
+
default, these calendars have Monday as the first day of the week, and
|
| 5 |
+
Sunday as the last (the European convention). Use setfirstweekday() to
|
| 6 |
+
set the first day of the week (0=Monday, 6=Sunday)."""
|
| 7 |
+
|
| 8 |
+
import sys
|
| 9 |
+
import datetime
|
| 10 |
+
import locale as _locale
|
| 11 |
+
from itertools import repeat
|
| 12 |
+
|
| 13 |
+
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
|
| 14 |
+
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
|
| 15 |
+
"monthcalendar", "prmonth", "month", "prcal", "calendar",
|
| 16 |
+
"timegm", "month_name", "month_abbr", "day_name", "day_abbr",
|
| 17 |
+
"Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar",
|
| 18 |
+
"LocaleHTMLCalendar", "weekheader",
|
| 19 |
+
"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY",
|
| 20 |
+
"SATURDAY", "SUNDAY"]
|
| 21 |
+
|
| 22 |
+
# Exception raised for bad input (with string parameter for details)
|
| 23 |
+
error = ValueError
|
| 24 |
+
|
| 25 |
+
# Exceptions raised for bad input
|
| 26 |
+
class IllegalMonthError(ValueError):
|
| 27 |
+
def __init__(self, month):
|
| 28 |
+
self.month = month
|
| 29 |
+
def __str__(self):
|
| 30 |
+
return "bad month number %r; must be 1-12" % self.month
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class IllegalWeekdayError(ValueError):
|
| 34 |
+
def __init__(self, weekday):
|
| 35 |
+
self.weekday = weekday
|
| 36 |
+
def __str__(self):
|
| 37 |
+
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# Constants for months referenced later
|
| 41 |
+
January = 1
|
| 42 |
+
February = 2
|
| 43 |
+
|
| 44 |
+
# Number of days per month (except for February in leap years)
|
| 45 |
+
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
|
| 46 |
+
|
| 47 |
+
# This module used to have hard-coded lists of day and month names, as
|
| 48 |
+
# English strings. The classes following emulate a read-only version of
|
| 49 |
+
# that, but supply localized names. Note that the values are computed
|
| 50 |
+
# fresh on each call, in case the user changes locale between calls.
|
| 51 |
+
|
| 52 |
+
class _localized_month:
|
| 53 |
+
|
| 54 |
+
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
|
| 55 |
+
_months.insert(0, lambda x: "")
|
| 56 |
+
|
| 57 |
+
def __init__(self, format):
|
| 58 |
+
self.format = format
|
| 59 |
+
|
| 60 |
+
def __getitem__(self, i):
|
| 61 |
+
funcs = self._months[i]
|
| 62 |
+
if isinstance(i, slice):
|
| 63 |
+
return [f(self.format) for f in funcs]
|
| 64 |
+
else:
|
| 65 |
+
return funcs(self.format)
|
| 66 |
+
|
| 67 |
+
def __len__(self):
|
| 68 |
+
return 13
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class _localized_day:
|
| 72 |
+
|
| 73 |
+
# January 1, 2001, was a Monday.
|
| 74 |
+
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
|
| 75 |
+
|
| 76 |
+
def __init__(self, format):
|
| 77 |
+
self.format = format
|
| 78 |
+
|
| 79 |
+
def __getitem__(self, i):
|
| 80 |
+
funcs = self._days[i]
|
| 81 |
+
if isinstance(i, slice):
|
| 82 |
+
return [f(self.format) for f in funcs]
|
| 83 |
+
else:
|
| 84 |
+
return funcs(self.format)
|
| 85 |
+
|
| 86 |
+
def __len__(self):
|
| 87 |
+
return 7
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# Full and abbreviated names of weekdays
|
| 91 |
+
day_name = _localized_day('%A')
|
| 92 |
+
day_abbr = _localized_day('%a')
|
| 93 |
+
|
| 94 |
+
# Full and abbreviated names of months (1-based arrays!!!)
|
| 95 |
+
month_name = _localized_month('%B')
|
| 96 |
+
month_abbr = _localized_month('%b')
|
| 97 |
+
|
| 98 |
+
# Constants for weekdays
|
| 99 |
+
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def isleap(year):
|
| 103 |
+
"""Return True for leap years, False for non-leap years."""
|
| 104 |
+
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def leapdays(y1, y2):
|
| 108 |
+
"""Return number of leap years in range [y1, y2).
|
| 109 |
+
Assume y1 <= y2."""
|
| 110 |
+
y1 -= 1
|
| 111 |
+
y2 -= 1
|
| 112 |
+
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def weekday(year, month, day):
|
| 116 |
+
"""Return weekday (0-6 ~ Mon-Sun) for year, month (1-12), day (1-31)."""
|
| 117 |
+
if not datetime.MINYEAR <= year <= datetime.MAXYEAR:
|
| 118 |
+
year = 2000 + year % 400
|
| 119 |
+
return datetime.date(year, month, day).weekday()
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def monthrange(year, month):
|
| 123 |
+
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
|
| 124 |
+
year, month."""
|
| 125 |
+
if not 1 <= month <= 12:
|
| 126 |
+
raise IllegalMonthError(month)
|
| 127 |
+
day1 = weekday(year, month, 1)
|
| 128 |
+
ndays = mdays[month] + (month == February and isleap(year))
|
| 129 |
+
return day1, ndays
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _monthlen(year, month):
|
| 133 |
+
return mdays[month] + (month == February and isleap(year))
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def _prevmonth(year, month):
|
| 137 |
+
if month == 1:
|
| 138 |
+
return year-1, 12
|
| 139 |
+
else:
|
| 140 |
+
return year, month-1
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _nextmonth(year, month):
|
| 144 |
+
if month == 12:
|
| 145 |
+
return year+1, 1
|
| 146 |
+
else:
|
| 147 |
+
return year, month+1
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class Calendar(object):
|
| 151 |
+
"""
|
| 152 |
+
Base calendar class. This class doesn't do any formatting. It simply
|
| 153 |
+
provides data to subclasses.
|
| 154 |
+
"""
|
| 155 |
+
|
| 156 |
+
def __init__(self, firstweekday=0):
|
| 157 |
+
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
|
| 158 |
+
|
| 159 |
+
def getfirstweekday(self):
|
| 160 |
+
return self._firstweekday % 7
|
| 161 |
+
|
| 162 |
+
def setfirstweekday(self, firstweekday):
|
| 163 |
+
self._firstweekday = firstweekday
|
| 164 |
+
|
| 165 |
+
firstweekday = property(getfirstweekday, setfirstweekday)
|
| 166 |
+
|
| 167 |
+
def iterweekdays(self):
|
| 168 |
+
"""
|
| 169 |
+
Return an iterator for one week of weekday numbers starting with the
|
| 170 |
+
configured first one.
|
| 171 |
+
"""
|
| 172 |
+
for i in range(self.firstweekday, self.firstweekday + 7):
|
| 173 |
+
yield i%7
|
| 174 |
+
|
| 175 |
+
def itermonthdates(self, year, month):
|
| 176 |
+
"""
|
| 177 |
+
Return an iterator for one month. The iterator will yield datetime.date
|
| 178 |
+
values and will always iterate through complete weeks, so it will yield
|
| 179 |
+
dates outside the specified month.
|
| 180 |
+
"""
|
| 181 |
+
for y, m, d in self.itermonthdays3(year, month):
|
| 182 |
+
yield datetime.date(y, m, d)
|
| 183 |
+
|
| 184 |
+
def itermonthdays(self, year, month):
|
| 185 |
+
"""
|
| 186 |
+
Like itermonthdates(), but will yield day numbers. For days outside
|
| 187 |
+
the specified month the day number is 0.
|
| 188 |
+
"""
|
| 189 |
+
day1, ndays = monthrange(year, month)
|
| 190 |
+
days_before = (day1 - self.firstweekday) % 7
|
| 191 |
+
yield from repeat(0, days_before)
|
| 192 |
+
yield from range(1, ndays + 1)
|
| 193 |
+
days_after = (self.firstweekday - day1 - ndays) % 7
|
| 194 |
+
yield from repeat(0, days_after)
|
| 195 |
+
|
| 196 |
+
def itermonthdays2(self, year, month):
|
| 197 |
+
"""
|
| 198 |
+
Like itermonthdates(), but will yield (day number, weekday number)
|
| 199 |
+
tuples. For days outside the specified month the day number is 0.
|
| 200 |
+
"""
|
| 201 |
+
for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday):
|
| 202 |
+
yield d, i % 7
|
| 203 |
+
|
| 204 |
+
def itermonthdays3(self, year, month):
|
| 205 |
+
"""
|
| 206 |
+
Like itermonthdates(), but will yield (year, month, day) tuples. Can be
|
| 207 |
+
used for dates outside of datetime.date range.
|
| 208 |
+
"""
|
| 209 |
+
day1, ndays = monthrange(year, month)
|
| 210 |
+
days_before = (day1 - self.firstweekday) % 7
|
| 211 |
+
days_after = (self.firstweekday - day1 - ndays) % 7
|
| 212 |
+
y, m = _prevmonth(year, month)
|
| 213 |
+
end = _monthlen(y, m) + 1
|
| 214 |
+
for d in range(end-days_before, end):
|
| 215 |
+
yield y, m, d
|
| 216 |
+
for d in range(1, ndays + 1):
|
| 217 |
+
yield year, month, d
|
| 218 |
+
y, m = _nextmonth(year, month)
|
| 219 |
+
for d in range(1, days_after + 1):
|
| 220 |
+
yield y, m, d
|
| 221 |
+
|
| 222 |
+
def itermonthdays4(self, year, month):
|
| 223 |
+
"""
|
| 224 |
+
Like itermonthdates(), but will yield (year, month, day, day_of_week) tuples.
|
| 225 |
+
Can be used for dates outside of datetime.date range.
|
| 226 |
+
"""
|
| 227 |
+
for i, (y, m, d) in enumerate(self.itermonthdays3(year, month)):
|
| 228 |
+
yield y, m, d, (self.firstweekday + i) % 7
|
| 229 |
+
|
| 230 |
+
def monthdatescalendar(self, year, month):
|
| 231 |
+
"""
|
| 232 |
+
Return a matrix (list of lists) representing a month's calendar.
|
| 233 |
+
Each row represents a week; week entries are datetime.date values.
|
| 234 |
+
"""
|
| 235 |
+
dates = list(self.itermonthdates(year, month))
|
| 236 |
+
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
|
| 237 |
+
|
| 238 |
+
def monthdays2calendar(self, year, month):
|
| 239 |
+
"""
|
| 240 |
+
Return a matrix representing a month's calendar.
|
| 241 |
+
Each row represents a week; week entries are
|
| 242 |
+
(day number, weekday number) tuples. Day numbers outside this month
|
| 243 |
+
are zero.
|
| 244 |
+
"""
|
| 245 |
+
days = list(self.itermonthdays2(year, month))
|
| 246 |
+
return [ days[i:i+7] for i in range(0, len(days), 7) ]
|
| 247 |
+
|
| 248 |
+
def monthdayscalendar(self, year, month):
|
| 249 |
+
"""
|
| 250 |
+
Return a matrix representing a month's calendar.
|
| 251 |
+
Each row represents a week; days outside this month are zero.
|
| 252 |
+
"""
|
| 253 |
+
days = list(self.itermonthdays(year, month))
|
| 254 |
+
return [ days[i:i+7] for i in range(0, len(days), 7) ]
|
| 255 |
+
|
| 256 |
+
def yeardatescalendar(self, year, width=3):
|
| 257 |
+
"""
|
| 258 |
+
Return the data for the specified year ready for formatting. The return
|
| 259 |
+
value is a list of month rows. Each month row contains up to width months.
|
| 260 |
+
Each month contains between 4 and 6 weeks and each week contains 1-7
|
| 261 |
+
days. Days are datetime.date objects.
|
| 262 |
+
"""
|
| 263 |
+
months = [
|
| 264 |
+
self.monthdatescalendar(year, i)
|
| 265 |
+
for i in range(January, January+12)
|
| 266 |
+
]
|
| 267 |
+
return [months[i:i+width] for i in range(0, len(months), width) ]
|
| 268 |
+
|
| 269 |
+
def yeardays2calendar(self, year, width=3):
|
| 270 |
+
"""
|
| 271 |
+
Return the data for the specified year ready for formatting (similar to
|
| 272 |
+
yeardatescalendar()). Entries in the week lists are
|
| 273 |
+
(day number, weekday number) tuples. Day numbers outside this month are
|
| 274 |
+
zero.
|
| 275 |
+
"""
|
| 276 |
+
months = [
|
| 277 |
+
self.monthdays2calendar(year, i)
|
| 278 |
+
for i in range(January, January+12)
|
| 279 |
+
]
|
| 280 |
+
return [months[i:i+width] for i in range(0, len(months), width) ]
|
| 281 |
+
|
| 282 |
+
def yeardayscalendar(self, year, width=3):
|
| 283 |
+
"""
|
| 284 |
+
Return the data for the specified year ready for formatting (similar to
|
| 285 |
+
yeardatescalendar()). Entries in the week lists are day numbers.
|
| 286 |
+
Day numbers outside this month are zero.
|
| 287 |
+
"""
|
| 288 |
+
months = [
|
| 289 |
+
self.monthdayscalendar(year, i)
|
| 290 |
+
for i in range(January, January+12)
|
| 291 |
+
]
|
| 292 |
+
return [months[i:i+width] for i in range(0, len(months), width) ]
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class TextCalendar(Calendar):
|
| 296 |
+
"""
|
| 297 |
+
Subclass of Calendar that outputs a calendar as a simple plain text
|
| 298 |
+
similar to the UNIX program cal.
|
| 299 |
+
"""
|
| 300 |
+
|
| 301 |
+
def prweek(self, theweek, width):
|
| 302 |
+
"""
|
| 303 |
+
Print a single week (no newline).
|
| 304 |
+
"""
|
| 305 |
+
print(self.formatweek(theweek, width), end='')
|
| 306 |
+
|
| 307 |
+
def formatday(self, day, weekday, width):
|
| 308 |
+
"""
|
| 309 |
+
Returns a formatted day.
|
| 310 |
+
"""
|
| 311 |
+
if day == 0:
|
| 312 |
+
s = ''
|
| 313 |
+
else:
|
| 314 |
+
s = '%2i' % day # right-align single-digit days
|
| 315 |
+
return s.center(width)
|
| 316 |
+
|
| 317 |
+
def formatweek(self, theweek, width):
|
| 318 |
+
"""
|
| 319 |
+
Returns a single week in a string (no newline).
|
| 320 |
+
"""
|
| 321 |
+
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
|
| 322 |
+
|
| 323 |
+
def formatweekday(self, day, width):
|
| 324 |
+
"""
|
| 325 |
+
Returns a formatted week day name.
|
| 326 |
+
"""
|
| 327 |
+
if width >= 9:
|
| 328 |
+
names = day_name
|
| 329 |
+
else:
|
| 330 |
+
names = day_abbr
|
| 331 |
+
return names[day][:width].center(width)
|
| 332 |
+
|
| 333 |
+
def formatweekheader(self, width):
|
| 334 |
+
"""
|
| 335 |
+
Return a header for a week.
|
| 336 |
+
"""
|
| 337 |
+
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
|
| 338 |
+
|
| 339 |
+
def formatmonthname(self, theyear, themonth, width, withyear=True):
|
| 340 |
+
"""
|
| 341 |
+
Return a formatted month name.
|
| 342 |
+
"""
|
| 343 |
+
s = month_name[themonth]
|
| 344 |
+
if withyear:
|
| 345 |
+
s = "%s %r" % (s, theyear)
|
| 346 |
+
return s.center(width)
|
| 347 |
+
|
| 348 |
+
def prmonth(self, theyear, themonth, w=0, l=0):
|
| 349 |
+
"""
|
| 350 |
+
Print a month's calendar.
|
| 351 |
+
"""
|
| 352 |
+
print(self.formatmonth(theyear, themonth, w, l), end='')
|
| 353 |
+
|
| 354 |
+
def formatmonth(self, theyear, themonth, w=0, l=0):
|
| 355 |
+
"""
|
| 356 |
+
Return a month's calendar string (multi-line).
|
| 357 |
+
"""
|
| 358 |
+
w = max(2, w)
|
| 359 |
+
l = max(1, l)
|
| 360 |
+
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
|
| 361 |
+
s = s.rstrip()
|
| 362 |
+
s += '\n' * l
|
| 363 |
+
s += self.formatweekheader(w).rstrip()
|
| 364 |
+
s += '\n' * l
|
| 365 |
+
for week in self.monthdays2calendar(theyear, themonth):
|
| 366 |
+
s += self.formatweek(week, w).rstrip()
|
| 367 |
+
s += '\n' * l
|
| 368 |
+
return s
|
| 369 |
+
|
| 370 |
+
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
|
| 371 |
+
"""
|
| 372 |
+
Returns a year's calendar as a multi-line string.
|
| 373 |
+
"""
|
| 374 |
+
w = max(2, w)
|
| 375 |
+
l = max(1, l)
|
| 376 |
+
c = max(2, c)
|
| 377 |
+
colwidth = (w + 1) * 7 - 1
|
| 378 |
+
v = []
|
| 379 |
+
a = v.append
|
| 380 |
+
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
|
| 381 |
+
a('\n'*l)
|
| 382 |
+
header = self.formatweekheader(w)
|
| 383 |
+
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
|
| 384 |
+
# months in this row
|
| 385 |
+
months = range(m*i+1, min(m*(i+1)+1, 13))
|
| 386 |
+
a('\n'*l)
|
| 387 |
+
names = (self.formatmonthname(theyear, k, colwidth, False)
|
| 388 |
+
for k in months)
|
| 389 |
+
a(formatstring(names, colwidth, c).rstrip())
|
| 390 |
+
a('\n'*l)
|
| 391 |
+
headers = (header for k in months)
|
| 392 |
+
a(formatstring(headers, colwidth, c).rstrip())
|
| 393 |
+
a('\n'*l)
|
| 394 |
+
# max number of weeks for this row
|
| 395 |
+
height = max(len(cal) for cal in row)
|
| 396 |
+
for j in range(height):
|
| 397 |
+
weeks = []
|
| 398 |
+
for cal in row:
|
| 399 |
+
if j >= len(cal):
|
| 400 |
+
weeks.append('')
|
| 401 |
+
else:
|
| 402 |
+
weeks.append(self.formatweek(cal[j], w))
|
| 403 |
+
a(formatstring(weeks, colwidth, c).rstrip())
|
| 404 |
+
a('\n' * l)
|
| 405 |
+
return ''.join(v)
|
| 406 |
+
|
| 407 |
+
def pryear(self, theyear, w=0, l=0, c=6, m=3):
|
| 408 |
+
"""Print a year's calendar."""
|
| 409 |
+
print(self.formatyear(theyear, w, l, c, m), end='')
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class HTMLCalendar(Calendar):
|
| 413 |
+
"""
|
| 414 |
+
This calendar returns complete HTML pages.
|
| 415 |
+
"""
|
| 416 |
+
|
| 417 |
+
# CSS classes for the day <td>s
|
| 418 |
+
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
|
| 419 |
+
|
| 420 |
+
# CSS classes for the day <th>s
|
| 421 |
+
cssclasses_weekday_head = cssclasses
|
| 422 |
+
|
| 423 |
+
# CSS class for the days before and after current month
|
| 424 |
+
cssclass_noday = "noday"
|
| 425 |
+
|
| 426 |
+
# CSS class for the month's head
|
| 427 |
+
cssclass_month_head = "month"
|
| 428 |
+
|
| 429 |
+
# CSS class for the month
|
| 430 |
+
cssclass_month = "month"
|
| 431 |
+
|
| 432 |
+
# CSS class for the year's table head
|
| 433 |
+
cssclass_year_head = "year"
|
| 434 |
+
|
| 435 |
+
# CSS class for the whole year table
|
| 436 |
+
cssclass_year = "year"
|
| 437 |
+
|
| 438 |
+
def formatday(self, day, weekday):
|
| 439 |
+
"""
|
| 440 |
+
Return a day as a table cell.
|
| 441 |
+
"""
|
| 442 |
+
if day == 0:
|
| 443 |
+
# day outside month
|
| 444 |
+
return '<td class="%s"> </td>' % self.cssclass_noday
|
| 445 |
+
else:
|
| 446 |
+
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
|
| 447 |
+
|
| 448 |
+
def formatweek(self, theweek):
|
| 449 |
+
"""
|
| 450 |
+
Return a complete week as a table row.
|
| 451 |
+
"""
|
| 452 |
+
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
|
| 453 |
+
return '<tr>%s</tr>' % s
|
| 454 |
+
|
| 455 |
+
def formatweekday(self, day):
|
| 456 |
+
"""
|
| 457 |
+
Return a weekday name as a table header.
|
| 458 |
+
"""
|
| 459 |
+
return '<th class="%s">%s</th>' % (
|
| 460 |
+
self.cssclasses_weekday_head[day], day_abbr[day])
|
| 461 |
+
|
| 462 |
+
def formatweekheader(self):
|
| 463 |
+
"""
|
| 464 |
+
Return a header for a week as a table row.
|
| 465 |
+
"""
|
| 466 |
+
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
|
| 467 |
+
return '<tr>%s</tr>' % s
|
| 468 |
+
|
| 469 |
+
def formatmonthname(self, theyear, themonth, withyear=True):
|
| 470 |
+
"""
|
| 471 |
+
Return a month name as a table row.
|
| 472 |
+
"""
|
| 473 |
+
if withyear:
|
| 474 |
+
s = '%s %s' % (month_name[themonth], theyear)
|
| 475 |
+
else:
|
| 476 |
+
s = '%s' % month_name[themonth]
|
| 477 |
+
return '<tr><th colspan="7" class="%s">%s</th></tr>' % (
|
| 478 |
+
self.cssclass_month_head, s)
|
| 479 |
+
|
| 480 |
+
def formatmonth(self, theyear, themonth, withyear=True):
|
| 481 |
+
"""
|
| 482 |
+
Return a formatted month as a table.
|
| 483 |
+
"""
|
| 484 |
+
v = []
|
| 485 |
+
a = v.append
|
| 486 |
+
a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % (
|
| 487 |
+
self.cssclass_month))
|
| 488 |
+
a('\n')
|
| 489 |
+
a(self.formatmonthname(theyear, themonth, withyear=withyear))
|
| 490 |
+
a('\n')
|
| 491 |
+
a(self.formatweekheader())
|
| 492 |
+
a('\n')
|
| 493 |
+
for week in self.monthdays2calendar(theyear, themonth):
|
| 494 |
+
a(self.formatweek(week))
|
| 495 |
+
a('\n')
|
| 496 |
+
a('</table>')
|
| 497 |
+
a('\n')
|
| 498 |
+
return ''.join(v)
|
| 499 |
+
|
| 500 |
+
def formatyear(self, theyear, width=3):
|
| 501 |
+
"""
|
| 502 |
+
Return a formatted year as a table of tables.
|
| 503 |
+
"""
|
| 504 |
+
v = []
|
| 505 |
+
a = v.append
|
| 506 |
+
width = max(width, 1)
|
| 507 |
+
a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' %
|
| 508 |
+
self.cssclass_year)
|
| 509 |
+
a('\n')
|
| 510 |
+
a('<tr><th colspan="%d" class="%s">%s</th></tr>' % (
|
| 511 |
+
width, self.cssclass_year_head, theyear))
|
| 512 |
+
for i in range(January, January+12, width):
|
| 513 |
+
# months in this row
|
| 514 |
+
months = range(i, min(i+width, 13))
|
| 515 |
+
a('<tr>')
|
| 516 |
+
for m in months:
|
| 517 |
+
a('<td>')
|
| 518 |
+
a(self.formatmonth(theyear, m, withyear=False))
|
| 519 |
+
a('</td>')
|
| 520 |
+
a('</tr>')
|
| 521 |
+
a('</table>')
|
| 522 |
+
return ''.join(v)
|
| 523 |
+
|
| 524 |
+
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
|
| 525 |
+
"""
|
| 526 |
+
Return a formatted year as a complete HTML page.
|
| 527 |
+
"""
|
| 528 |
+
if encoding is None:
|
| 529 |
+
encoding = sys.getdefaultencoding()
|
| 530 |
+
v = []
|
| 531 |
+
a = v.append
|
| 532 |
+
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
|
| 533 |
+
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
|
| 534 |
+
a('<html>\n')
|
| 535 |
+
a('<head>\n')
|
| 536 |
+
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
|
| 537 |
+
if css is not None:
|
| 538 |
+
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
|
| 539 |
+
a('<title>Calendar for %d</title>\n' % theyear)
|
| 540 |
+
a('</head>\n')
|
| 541 |
+
a('<body>\n')
|
| 542 |
+
a(self.formatyear(theyear, width))
|
| 543 |
+
a('</body>\n')
|
| 544 |
+
a('</html>\n')
|
| 545 |
+
return ''.join(v).encode(encoding, "xmlcharrefreplace")
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
class different_locale:
|
| 549 |
+
def __init__(self, locale):
|
| 550 |
+
self.locale = locale
|
| 551 |
+
|
| 552 |
+
def __enter__(self):
|
| 553 |
+
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
|
| 554 |
+
_locale.setlocale(_locale.LC_TIME, self.locale)
|
| 555 |
+
|
| 556 |
+
def __exit__(self, *args):
|
| 557 |
+
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
class LocaleTextCalendar(TextCalendar):
|
| 561 |
+
"""
|
| 562 |
+
This class can be passed a locale name in the constructor and will return
|
| 563 |
+
month and weekday names in the specified locale. If this locale includes
|
| 564 |
+
an encoding all strings containing month and weekday names will be returned
|
| 565 |
+
as unicode.
|
| 566 |
+
"""
|
| 567 |
+
|
| 568 |
+
def __init__(self, firstweekday=0, locale=None):
|
| 569 |
+
TextCalendar.__init__(self, firstweekday)
|
| 570 |
+
if locale is None:
|
| 571 |
+
locale = _locale.getdefaultlocale()
|
| 572 |
+
self.locale = locale
|
| 573 |
+
|
| 574 |
+
def formatweekday(self, day, width):
|
| 575 |
+
with different_locale(self.locale):
|
| 576 |
+
return super().formatweekday(day, width)
|
| 577 |
+
|
| 578 |
+
def formatmonthname(self, theyear, themonth, width, withyear=True):
|
| 579 |
+
with different_locale(self.locale):
|
| 580 |
+
return super().formatmonthname(theyear, themonth, width, withyear)
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
class LocaleHTMLCalendar(HTMLCalendar):
|
| 584 |
+
"""
|
| 585 |
+
This class can be passed a locale name in the constructor and will return
|
| 586 |
+
month and weekday names in the specified locale. If this locale includes
|
| 587 |
+
an encoding all strings containing month and weekday names will be returned
|
| 588 |
+
as unicode.
|
| 589 |
+
"""
|
| 590 |
+
def __init__(self, firstweekday=0, locale=None):
|
| 591 |
+
HTMLCalendar.__init__(self, firstweekday)
|
| 592 |
+
if locale is None:
|
| 593 |
+
locale = _locale.getdefaultlocale()
|
| 594 |
+
self.locale = locale
|
| 595 |
+
|
| 596 |
+
def formatweekday(self, day):
|
| 597 |
+
with different_locale(self.locale):
|
| 598 |
+
return super().formatweekday(day)
|
| 599 |
+
|
| 600 |
+
def formatmonthname(self, theyear, themonth, withyear=True):
|
| 601 |
+
with different_locale(self.locale):
|
| 602 |
+
return super().formatmonthname(theyear, themonth, withyear)
|
| 603 |
+
|
| 604 |
+
# Support for old module level interface
|
| 605 |
+
c = TextCalendar()
|
| 606 |
+
|
| 607 |
+
firstweekday = c.getfirstweekday
|
| 608 |
+
|
| 609 |
+
def setfirstweekday(firstweekday):
|
| 610 |
+
if not MONDAY <= firstweekday <= SUNDAY:
|
| 611 |
+
raise IllegalWeekdayError(firstweekday)
|
| 612 |
+
c.firstweekday = firstweekday
|
| 613 |
+
|
| 614 |
+
monthcalendar = c.monthdayscalendar
|
| 615 |
+
prweek = c.prweek
|
| 616 |
+
week = c.formatweek
|
| 617 |
+
weekheader = c.formatweekheader
|
| 618 |
+
prmonth = c.prmonth
|
| 619 |
+
month = c.formatmonth
|
| 620 |
+
calendar = c.formatyear
|
| 621 |
+
prcal = c.pryear
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
# Spacing of month columns for multi-column year calendar
|
| 625 |
+
_colwidth = 7*3 - 1 # Amount printed by prweek()
|
| 626 |
+
_spacing = 6 # Number of spaces between columns
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def format(cols, colwidth=_colwidth, spacing=_spacing):
|
| 630 |
+
"""Prints multi-column formatting for year calendars"""
|
| 631 |
+
print(formatstring(cols, colwidth, spacing))
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
|
| 635 |
+
"""Returns a string formatted from n strings, centered within n columns."""
|
| 636 |
+
spacing *= ' '
|
| 637 |
+
return spacing.join(c.center(colwidth) for c in cols)
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
EPOCH = 1970
|
| 641 |
+
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
def timegm(tuple):
|
| 645 |
+
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
|
| 646 |
+
year, month, day, hour, minute, second = tuple[:6]
|
| 647 |
+
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
|
| 648 |
+
hours = days*24 + hour
|
| 649 |
+
minutes = hours*60 + minute
|
| 650 |
+
seconds = minutes*60 + second
|
| 651 |
+
return seconds
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def main(args):
|
| 655 |
+
import argparse
|
| 656 |
+
parser = argparse.ArgumentParser()
|
| 657 |
+
textgroup = parser.add_argument_group('text only arguments')
|
| 658 |
+
htmlgroup = parser.add_argument_group('html only arguments')
|
| 659 |
+
textgroup.add_argument(
|
| 660 |
+
"-w", "--width",
|
| 661 |
+
type=int, default=2,
|
| 662 |
+
help="width of date column (default 2)"
|
| 663 |
+
)
|
| 664 |
+
textgroup.add_argument(
|
| 665 |
+
"-l", "--lines",
|
| 666 |
+
type=int, default=1,
|
| 667 |
+
help="number of lines for each week (default 1)"
|
| 668 |
+
)
|
| 669 |
+
textgroup.add_argument(
|
| 670 |
+
"-s", "--spacing",
|
| 671 |
+
type=int, default=6,
|
| 672 |
+
help="spacing between months (default 6)"
|
| 673 |
+
)
|
| 674 |
+
textgroup.add_argument(
|
| 675 |
+
"-m", "--months",
|
| 676 |
+
type=int, default=3,
|
| 677 |
+
help="months per row (default 3)"
|
| 678 |
+
)
|
| 679 |
+
htmlgroup.add_argument(
|
| 680 |
+
"-c", "--css",
|
| 681 |
+
default="calendar.css",
|
| 682 |
+
help="CSS to use for page"
|
| 683 |
+
)
|
| 684 |
+
parser.add_argument(
|
| 685 |
+
"-L", "--locale",
|
| 686 |
+
default=None,
|
| 687 |
+
help="locale to be used from month and weekday names"
|
| 688 |
+
)
|
| 689 |
+
parser.add_argument(
|
| 690 |
+
"-e", "--encoding",
|
| 691 |
+
default=None,
|
| 692 |
+
help="encoding to use for output"
|
| 693 |
+
)
|
| 694 |
+
parser.add_argument(
|
| 695 |
+
"-t", "--type",
|
| 696 |
+
default="text",
|
| 697 |
+
choices=("text", "html"),
|
| 698 |
+
help="output type (text or html)"
|
| 699 |
+
)
|
| 700 |
+
parser.add_argument(
|
| 701 |
+
"year",
|
| 702 |
+
nargs='?', type=int,
|
| 703 |
+
help="year number (1-9999)"
|
| 704 |
+
)
|
| 705 |
+
parser.add_argument(
|
| 706 |
+
"month",
|
| 707 |
+
nargs='?', type=int,
|
| 708 |
+
help="month number (1-12, text only)"
|
| 709 |
+
)
|
| 710 |
+
|
| 711 |
+
options = parser.parse_args(args[1:])
|
| 712 |
+
|
| 713 |
+
if options.locale and not options.encoding:
|
| 714 |
+
parser.error("if --locale is specified --encoding is required")
|
| 715 |
+
sys.exit(1)
|
| 716 |
+
|
| 717 |
+
locale = options.locale, options.encoding
|
| 718 |
+
|
| 719 |
+
if options.type == "html":
|
| 720 |
+
if options.locale:
|
| 721 |
+
cal = LocaleHTMLCalendar(locale=locale)
|
| 722 |
+
else:
|
| 723 |
+
cal = HTMLCalendar()
|
| 724 |
+
encoding = options.encoding
|
| 725 |
+
if encoding is None:
|
| 726 |
+
encoding = sys.getdefaultencoding()
|
| 727 |
+
optdict = dict(encoding=encoding, css=options.css)
|
| 728 |
+
write = sys.stdout.buffer.write
|
| 729 |
+
if options.year is None:
|
| 730 |
+
write(cal.formatyearpage(datetime.date.today().year, **optdict))
|
| 731 |
+
elif options.month is None:
|
| 732 |
+
write(cal.formatyearpage(options.year, **optdict))
|
| 733 |
+
else:
|
| 734 |
+
parser.error("incorrect number of arguments")
|
| 735 |
+
sys.exit(1)
|
| 736 |
+
else:
|
| 737 |
+
if options.locale:
|
| 738 |
+
cal = LocaleTextCalendar(locale=locale)
|
| 739 |
+
else:
|
| 740 |
+
cal = TextCalendar()
|
| 741 |
+
optdict = dict(w=options.width, l=options.lines)
|
| 742 |
+
if options.month is None:
|
| 743 |
+
optdict["c"] = options.spacing
|
| 744 |
+
optdict["m"] = options.months
|
| 745 |
+
if options.year is None:
|
| 746 |
+
result = cal.formatyear(datetime.date.today().year, **optdict)
|
| 747 |
+
elif options.month is None:
|
| 748 |
+
result = cal.formatyear(options.year, **optdict)
|
| 749 |
+
else:
|
| 750 |
+
result = cal.formatmonth(options.year, options.month, **optdict)
|
| 751 |
+
write = sys.stdout.write
|
| 752 |
+
if options.encoding:
|
| 753 |
+
result = result.encode(options.encoding)
|
| 754 |
+
write = sys.stdout.buffer.write
|
| 755 |
+
write(result)
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
if __name__ == "__main__":
|
| 759 |
+
main(sys.argv)
|
parrot/lib/python3.10/colorsys.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Conversion functions between RGB and other color systems.
|
| 2 |
+
|
| 3 |
+
This modules provides two functions for each color system ABC:
|
| 4 |
+
|
| 5 |
+
rgb_to_abc(r, g, b) --> a, b, c
|
| 6 |
+
abc_to_rgb(a, b, c) --> r, g, b
|
| 7 |
+
|
| 8 |
+
All inputs and outputs are triples of floats in the range [0.0...1.0]
|
| 9 |
+
(with the exception of I and Q, which covers a slightly larger range).
|
| 10 |
+
Inputs outside the valid range may cause exceptions or invalid outputs.
|
| 11 |
+
|
| 12 |
+
Supported color systems:
|
| 13 |
+
RGB: Red, Green, Blue components
|
| 14 |
+
YIQ: Luminance, Chrominance (used by composite video signals)
|
| 15 |
+
HLS: Hue, Luminance, Saturation
|
| 16 |
+
HSV: Hue, Saturation, Value
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
# References:
|
| 20 |
+
# http://en.wikipedia.org/wiki/YIQ
|
| 21 |
+
# http://en.wikipedia.org/wiki/HLS_color_space
|
| 22 |
+
# http://en.wikipedia.org/wiki/HSV_color_space
|
| 23 |
+
|
| 24 |
+
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
|
| 25 |
+
"rgb_to_hsv","hsv_to_rgb"]
|
| 26 |
+
|
| 27 |
+
# Some floating point constants
|
| 28 |
+
|
| 29 |
+
ONE_THIRD = 1.0/3.0
|
| 30 |
+
ONE_SIXTH = 1.0/6.0
|
| 31 |
+
TWO_THIRD = 2.0/3.0
|
| 32 |
+
|
| 33 |
+
# YIQ: used by composite video signals (linear combinations of RGB)
|
| 34 |
+
# Y: perceived grey level (0.0 == black, 1.0 == white)
|
| 35 |
+
# I, Q: color components
|
| 36 |
+
#
|
| 37 |
+
# There are a great many versions of the constants used in these formulae.
|
| 38 |
+
# The ones in this library uses constants from the FCC version of NTSC.
|
| 39 |
+
|
| 40 |
+
def rgb_to_yiq(r, g, b):
|
| 41 |
+
y = 0.30*r + 0.59*g + 0.11*b
|
| 42 |
+
i = 0.74*(r-y) - 0.27*(b-y)
|
| 43 |
+
q = 0.48*(r-y) + 0.41*(b-y)
|
| 44 |
+
return (y, i, q)
|
| 45 |
+
|
| 46 |
+
def yiq_to_rgb(y, i, q):
|
| 47 |
+
# r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48)
|
| 48 |
+
# b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48)
|
| 49 |
+
# g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59
|
| 50 |
+
|
| 51 |
+
r = y + 0.9468822170900693*i + 0.6235565819861433*q
|
| 52 |
+
g = y - 0.27478764629897834*i - 0.6356910791873801*q
|
| 53 |
+
b = y - 1.1085450346420322*i + 1.7090069284064666*q
|
| 54 |
+
|
| 55 |
+
if r < 0.0:
|
| 56 |
+
r = 0.0
|
| 57 |
+
if g < 0.0:
|
| 58 |
+
g = 0.0
|
| 59 |
+
if b < 0.0:
|
| 60 |
+
b = 0.0
|
| 61 |
+
if r > 1.0:
|
| 62 |
+
r = 1.0
|
| 63 |
+
if g > 1.0:
|
| 64 |
+
g = 1.0
|
| 65 |
+
if b > 1.0:
|
| 66 |
+
b = 1.0
|
| 67 |
+
return (r, g, b)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# HLS: Hue, Luminance, Saturation
|
| 71 |
+
# H: position in the spectrum
|
| 72 |
+
# L: color lightness
|
| 73 |
+
# S: color saturation
|
| 74 |
+
|
| 75 |
+
def rgb_to_hls(r, g, b):
|
| 76 |
+
maxc = max(r, g, b)
|
| 77 |
+
minc = min(r, g, b)
|
| 78 |
+
sumc = (maxc+minc)
|
| 79 |
+
rangec = (maxc-minc)
|
| 80 |
+
l = sumc/2.0
|
| 81 |
+
if minc == maxc:
|
| 82 |
+
return 0.0, l, 0.0
|
| 83 |
+
if l <= 0.5:
|
| 84 |
+
s = rangec / sumc
|
| 85 |
+
else:
|
| 86 |
+
s = rangec / (2.0-sumc)
|
| 87 |
+
rc = (maxc-r) / rangec
|
| 88 |
+
gc = (maxc-g) / rangec
|
| 89 |
+
bc = (maxc-b) / rangec
|
| 90 |
+
if r == maxc:
|
| 91 |
+
h = bc-gc
|
| 92 |
+
elif g == maxc:
|
| 93 |
+
h = 2.0+rc-bc
|
| 94 |
+
else:
|
| 95 |
+
h = 4.0+gc-rc
|
| 96 |
+
h = (h/6.0) % 1.0
|
| 97 |
+
return h, l, s
|
| 98 |
+
|
| 99 |
+
def hls_to_rgb(h, l, s):
|
| 100 |
+
if s == 0.0:
|
| 101 |
+
return l, l, l
|
| 102 |
+
if l <= 0.5:
|
| 103 |
+
m2 = l * (1.0+s)
|
| 104 |
+
else:
|
| 105 |
+
m2 = l+s-(l*s)
|
| 106 |
+
m1 = 2.0*l - m2
|
| 107 |
+
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
|
| 108 |
+
|
| 109 |
+
def _v(m1, m2, hue):
|
| 110 |
+
hue = hue % 1.0
|
| 111 |
+
if hue < ONE_SIXTH:
|
| 112 |
+
return m1 + (m2-m1)*hue*6.0
|
| 113 |
+
if hue < 0.5:
|
| 114 |
+
return m2
|
| 115 |
+
if hue < TWO_THIRD:
|
| 116 |
+
return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
|
| 117 |
+
return m1
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# HSV: Hue, Saturation, Value
|
| 121 |
+
# H: position in the spectrum
|
| 122 |
+
# S: color saturation ("purity")
|
| 123 |
+
# V: color brightness
|
| 124 |
+
|
| 125 |
+
def rgb_to_hsv(r, g, b):
|
| 126 |
+
maxc = max(r, g, b)
|
| 127 |
+
minc = min(r, g, b)
|
| 128 |
+
v = maxc
|
| 129 |
+
if minc == maxc:
|
| 130 |
+
return 0.0, 0.0, v
|
| 131 |
+
s = (maxc-minc) / maxc
|
| 132 |
+
rc = (maxc-r) / (maxc-minc)
|
| 133 |
+
gc = (maxc-g) / (maxc-minc)
|
| 134 |
+
bc = (maxc-b) / (maxc-minc)
|
| 135 |
+
if r == maxc:
|
| 136 |
+
h = bc-gc
|
| 137 |
+
elif g == maxc:
|
| 138 |
+
h = 2.0+rc-bc
|
| 139 |
+
else:
|
| 140 |
+
h = 4.0+gc-rc
|
| 141 |
+
h = (h/6.0) % 1.0
|
| 142 |
+
return h, s, v
|
| 143 |
+
|
| 144 |
+
def hsv_to_rgb(h, s, v):
|
| 145 |
+
if s == 0.0:
|
| 146 |
+
return v, v, v
|
| 147 |
+
i = int(h*6.0) # XXX assume int() truncates!
|
| 148 |
+
f = (h*6.0) - i
|
| 149 |
+
p = v*(1.0 - s)
|
| 150 |
+
q = v*(1.0 - s*f)
|
| 151 |
+
t = v*(1.0 - s*(1.0-f))
|
| 152 |
+
i = i%6
|
| 153 |
+
if i == 0:
|
| 154 |
+
return v, t, p
|
| 155 |
+
if i == 1:
|
| 156 |
+
return q, v, p
|
| 157 |
+
if i == 2:
|
| 158 |
+
return p, v, t
|
| 159 |
+
if i == 3:
|
| 160 |
+
return p, q, v
|
| 161 |
+
if i == 4:
|
| 162 |
+
return t, p, v
|
| 163 |
+
if i == 5:
|
| 164 |
+
return v, p, q
|
| 165 |
+
# Cannot get here
|
parrot/lib/python3.10/compileall.py
ADDED
|
@@ -0,0 +1,463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Module/script to byte-compile all .py files to .pyc files.
|
| 2 |
+
|
| 3 |
+
When called as a script with arguments, this compiles the directories
|
| 4 |
+
given as arguments recursively; the -l option prevents it from
|
| 5 |
+
recursing into directories.
|
| 6 |
+
|
| 7 |
+
Without arguments, if compiles all modules on sys.path, without
|
| 8 |
+
recursing into subdirectories. (Even though it should do so for
|
| 9 |
+
packages -- for now, you'll have to deal with packages separately.)
|
| 10 |
+
|
| 11 |
+
See module py_compile for details of the actual byte-compilation.
|
| 12 |
+
"""
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import importlib.util
|
| 16 |
+
import py_compile
|
| 17 |
+
import struct
|
| 18 |
+
import filecmp
|
| 19 |
+
|
| 20 |
+
from functools import partial
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
|
| 23 |
+
__all__ = ["compile_dir","compile_file","compile_path"]
|
| 24 |
+
|
| 25 |
+
def _walk_dir(dir, maxlevels, quiet=0):
|
| 26 |
+
if quiet < 2 and isinstance(dir, os.PathLike):
|
| 27 |
+
dir = os.fspath(dir)
|
| 28 |
+
if not quiet:
|
| 29 |
+
print('Listing {!r}...'.format(dir))
|
| 30 |
+
try:
|
| 31 |
+
names = os.listdir(dir)
|
| 32 |
+
except OSError:
|
| 33 |
+
if quiet < 2:
|
| 34 |
+
print("Can't list {!r}".format(dir))
|
| 35 |
+
names = []
|
| 36 |
+
names.sort()
|
| 37 |
+
for name in names:
|
| 38 |
+
if name == '__pycache__':
|
| 39 |
+
continue
|
| 40 |
+
fullname = os.path.join(dir, name)
|
| 41 |
+
if not os.path.isdir(fullname):
|
| 42 |
+
yield fullname
|
| 43 |
+
elif (maxlevels > 0 and name != os.curdir and name != os.pardir and
|
| 44 |
+
os.path.isdir(fullname) and not os.path.islink(fullname)):
|
| 45 |
+
yield from _walk_dir(fullname, maxlevels=maxlevels - 1,
|
| 46 |
+
quiet=quiet)
|
| 47 |
+
|
| 48 |
+
def compile_dir(dir, maxlevels=None, ddir=None, force=False,
|
| 49 |
+
rx=None, quiet=0, legacy=False, optimize=-1, workers=1,
|
| 50 |
+
invalidation_mode=None, *, stripdir=None,
|
| 51 |
+
prependdir=None, limit_sl_dest=None, hardlink_dupes=False):
|
| 52 |
+
"""Byte-compile all modules in the given directory tree.
|
| 53 |
+
|
| 54 |
+
Arguments (only dir is required):
|
| 55 |
+
|
| 56 |
+
dir: the directory to byte-compile
|
| 57 |
+
maxlevels: maximum recursion level (default `sys.getrecursionlimit()`)
|
| 58 |
+
ddir: the directory that will be prepended to the path to the
|
| 59 |
+
file as it is compiled into each byte-code file.
|
| 60 |
+
force: if True, force compilation, even if timestamps are up-to-date
|
| 61 |
+
quiet: full output with False or 0, errors only with 1,
|
| 62 |
+
no output with 2
|
| 63 |
+
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
|
| 64 |
+
optimize: int or list of optimization levels or -1 for level of
|
| 65 |
+
the interpreter. Multiple levels leads to multiple compiled
|
| 66 |
+
files each with one optimization level.
|
| 67 |
+
workers: maximum number of parallel workers
|
| 68 |
+
invalidation_mode: how the up-to-dateness of the pyc will be checked
|
| 69 |
+
stripdir: part of path to left-strip from source file path
|
| 70 |
+
prependdir: path to prepend to beginning of original file path, applied
|
| 71 |
+
after stripdir
|
| 72 |
+
limit_sl_dest: ignore symlinks if they are pointing outside of
|
| 73 |
+
the defined path
|
| 74 |
+
hardlink_dupes: hardlink duplicated pyc files
|
| 75 |
+
"""
|
| 76 |
+
ProcessPoolExecutor = None
|
| 77 |
+
if ddir is not None and (stripdir is not None or prependdir is not None):
|
| 78 |
+
raise ValueError(("Destination dir (ddir) cannot be used "
|
| 79 |
+
"in combination with stripdir or prependdir"))
|
| 80 |
+
if ddir is not None:
|
| 81 |
+
stripdir = dir
|
| 82 |
+
prependdir = ddir
|
| 83 |
+
ddir = None
|
| 84 |
+
if workers < 0:
|
| 85 |
+
raise ValueError('workers must be greater or equal to 0')
|
| 86 |
+
if workers != 1:
|
| 87 |
+
# Check if this is a system where ProcessPoolExecutor can function.
|
| 88 |
+
from concurrent.futures.process import _check_system_limits
|
| 89 |
+
try:
|
| 90 |
+
_check_system_limits()
|
| 91 |
+
except NotImplementedError:
|
| 92 |
+
workers = 1
|
| 93 |
+
else:
|
| 94 |
+
from concurrent.futures import ProcessPoolExecutor
|
| 95 |
+
if maxlevels is None:
|
| 96 |
+
maxlevels = sys.getrecursionlimit()
|
| 97 |
+
files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels)
|
| 98 |
+
success = True
|
| 99 |
+
if workers != 1 and ProcessPoolExecutor is not None:
|
| 100 |
+
# If workers == 0, let ProcessPoolExecutor choose
|
| 101 |
+
workers = workers or None
|
| 102 |
+
with ProcessPoolExecutor(max_workers=workers) as executor:
|
| 103 |
+
results = executor.map(partial(compile_file,
|
| 104 |
+
ddir=ddir, force=force,
|
| 105 |
+
rx=rx, quiet=quiet,
|
| 106 |
+
legacy=legacy,
|
| 107 |
+
optimize=optimize,
|
| 108 |
+
invalidation_mode=invalidation_mode,
|
| 109 |
+
stripdir=stripdir,
|
| 110 |
+
prependdir=prependdir,
|
| 111 |
+
limit_sl_dest=limit_sl_dest,
|
| 112 |
+
hardlink_dupes=hardlink_dupes),
|
| 113 |
+
files)
|
| 114 |
+
success = min(results, default=True)
|
| 115 |
+
else:
|
| 116 |
+
for file in files:
|
| 117 |
+
if not compile_file(file, ddir, force, rx, quiet,
|
| 118 |
+
legacy, optimize, invalidation_mode,
|
| 119 |
+
stripdir=stripdir, prependdir=prependdir,
|
| 120 |
+
limit_sl_dest=limit_sl_dest,
|
| 121 |
+
hardlink_dupes=hardlink_dupes):
|
| 122 |
+
success = False
|
| 123 |
+
return success
|
| 124 |
+
|
| 125 |
+
def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0,
|
| 126 |
+
legacy=False, optimize=-1,
|
| 127 |
+
invalidation_mode=None, *, stripdir=None, prependdir=None,
|
| 128 |
+
limit_sl_dest=None, hardlink_dupes=False):
|
| 129 |
+
"""Byte-compile one file.
|
| 130 |
+
|
| 131 |
+
Arguments (only fullname is required):
|
| 132 |
+
|
| 133 |
+
fullname: the file to byte-compile
|
| 134 |
+
ddir: if given, the directory name compiled in to the
|
| 135 |
+
byte-code file.
|
| 136 |
+
force: if True, force compilation, even if timestamps are up-to-date
|
| 137 |
+
quiet: full output with False or 0, errors only with 1,
|
| 138 |
+
no output with 2
|
| 139 |
+
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
|
| 140 |
+
optimize: int or list of optimization levels or -1 for level of
|
| 141 |
+
the interpreter. Multiple levels leads to multiple compiled
|
| 142 |
+
files each with one optimization level.
|
| 143 |
+
invalidation_mode: how the up-to-dateness of the pyc will be checked
|
| 144 |
+
stripdir: part of path to left-strip from source file path
|
| 145 |
+
prependdir: path to prepend to beginning of original file path, applied
|
| 146 |
+
after stripdir
|
| 147 |
+
limit_sl_dest: ignore symlinks if they are pointing outside of
|
| 148 |
+
the defined path.
|
| 149 |
+
hardlink_dupes: hardlink duplicated pyc files
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
if ddir is not None and (stripdir is not None or prependdir is not None):
|
| 153 |
+
raise ValueError(("Destination dir (ddir) cannot be used "
|
| 154 |
+
"in combination with stripdir or prependdir"))
|
| 155 |
+
|
| 156 |
+
success = True
|
| 157 |
+
fullname = os.fspath(fullname)
|
| 158 |
+
stripdir = os.fspath(stripdir) if stripdir is not None else None
|
| 159 |
+
name = os.path.basename(fullname)
|
| 160 |
+
|
| 161 |
+
dfile = None
|
| 162 |
+
|
| 163 |
+
if ddir is not None:
|
| 164 |
+
dfile = os.path.join(ddir, name)
|
| 165 |
+
|
| 166 |
+
if stripdir is not None:
|
| 167 |
+
fullname_parts = fullname.split(os.path.sep)
|
| 168 |
+
stripdir_parts = stripdir.split(os.path.sep)
|
| 169 |
+
ddir_parts = list(fullname_parts)
|
| 170 |
+
|
| 171 |
+
for spart, opart in zip(stripdir_parts, fullname_parts):
|
| 172 |
+
if spart == opart:
|
| 173 |
+
ddir_parts.remove(spart)
|
| 174 |
+
|
| 175 |
+
dfile = os.path.join(*ddir_parts)
|
| 176 |
+
|
| 177 |
+
if prependdir is not None:
|
| 178 |
+
if dfile is None:
|
| 179 |
+
dfile = os.path.join(prependdir, fullname)
|
| 180 |
+
else:
|
| 181 |
+
dfile = os.path.join(prependdir, dfile)
|
| 182 |
+
|
| 183 |
+
if isinstance(optimize, int):
|
| 184 |
+
optimize = [optimize]
|
| 185 |
+
|
| 186 |
+
# Use set() to remove duplicates.
|
| 187 |
+
# Use sorted() to create pyc files in a deterministic order.
|
| 188 |
+
optimize = sorted(set(optimize))
|
| 189 |
+
|
| 190 |
+
if hardlink_dupes and len(optimize) < 2:
|
| 191 |
+
raise ValueError("Hardlinking of duplicated bytecode makes sense "
|
| 192 |
+
"only for more than one optimization level")
|
| 193 |
+
|
| 194 |
+
if rx is not None:
|
| 195 |
+
mo = rx.search(fullname)
|
| 196 |
+
if mo:
|
| 197 |
+
return success
|
| 198 |
+
|
| 199 |
+
if limit_sl_dest is not None and os.path.islink(fullname):
|
| 200 |
+
if Path(limit_sl_dest).resolve() not in Path(fullname).resolve().parents:
|
| 201 |
+
return success
|
| 202 |
+
|
| 203 |
+
opt_cfiles = {}
|
| 204 |
+
|
| 205 |
+
if os.path.isfile(fullname):
|
| 206 |
+
for opt_level in optimize:
|
| 207 |
+
if legacy:
|
| 208 |
+
opt_cfiles[opt_level] = fullname + 'c'
|
| 209 |
+
else:
|
| 210 |
+
if opt_level >= 0:
|
| 211 |
+
opt = opt_level if opt_level >= 1 else ''
|
| 212 |
+
cfile = (importlib.util.cache_from_source(
|
| 213 |
+
fullname, optimization=opt))
|
| 214 |
+
opt_cfiles[opt_level] = cfile
|
| 215 |
+
else:
|
| 216 |
+
cfile = importlib.util.cache_from_source(fullname)
|
| 217 |
+
opt_cfiles[opt_level] = cfile
|
| 218 |
+
|
| 219 |
+
head, tail = name[:-3], name[-3:]
|
| 220 |
+
if tail == '.py':
|
| 221 |
+
if not force:
|
| 222 |
+
try:
|
| 223 |
+
mtime = int(os.stat(fullname).st_mtime)
|
| 224 |
+
expect = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER,
|
| 225 |
+
0, mtime & 0xFFFF_FFFF)
|
| 226 |
+
for cfile in opt_cfiles.values():
|
| 227 |
+
with open(cfile, 'rb') as chandle:
|
| 228 |
+
actual = chandle.read(12)
|
| 229 |
+
if expect != actual:
|
| 230 |
+
break
|
| 231 |
+
else:
|
| 232 |
+
return success
|
| 233 |
+
except OSError:
|
| 234 |
+
pass
|
| 235 |
+
if not quiet:
|
| 236 |
+
print('Compiling {!r}...'.format(fullname))
|
| 237 |
+
try:
|
| 238 |
+
for index, opt_level in enumerate(optimize):
|
| 239 |
+
cfile = opt_cfiles[opt_level]
|
| 240 |
+
ok = py_compile.compile(fullname, cfile, dfile, True,
|
| 241 |
+
optimize=opt_level,
|
| 242 |
+
invalidation_mode=invalidation_mode)
|
| 243 |
+
if index > 0 and hardlink_dupes:
|
| 244 |
+
previous_cfile = opt_cfiles[optimize[index - 1]]
|
| 245 |
+
if filecmp.cmp(cfile, previous_cfile, shallow=False):
|
| 246 |
+
os.unlink(cfile)
|
| 247 |
+
os.link(previous_cfile, cfile)
|
| 248 |
+
except py_compile.PyCompileError as err:
|
| 249 |
+
success = False
|
| 250 |
+
if quiet >= 2:
|
| 251 |
+
return success
|
| 252 |
+
elif quiet:
|
| 253 |
+
print('*** Error compiling {!r}...'.format(fullname))
|
| 254 |
+
else:
|
| 255 |
+
print('*** ', end='')
|
| 256 |
+
# escape non-printable characters in msg
|
| 257 |
+
encoding = sys.stdout.encoding or sys.getdefaultencoding()
|
| 258 |
+
msg = err.msg.encode(encoding, errors='backslashreplace').decode(encoding)
|
| 259 |
+
print(msg)
|
| 260 |
+
except (SyntaxError, UnicodeError, OSError) as e:
|
| 261 |
+
success = False
|
| 262 |
+
if quiet >= 2:
|
| 263 |
+
return success
|
| 264 |
+
elif quiet:
|
| 265 |
+
print('*** Error compiling {!r}...'.format(fullname))
|
| 266 |
+
else:
|
| 267 |
+
print('*** ', end='')
|
| 268 |
+
print(e.__class__.__name__ + ':', e)
|
| 269 |
+
else:
|
| 270 |
+
if ok == 0:
|
| 271 |
+
success = False
|
| 272 |
+
return success
|
| 273 |
+
|
| 274 |
+
def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0,
|
| 275 |
+
legacy=False, optimize=-1,
|
| 276 |
+
invalidation_mode=None):
|
| 277 |
+
"""Byte-compile all module on sys.path.
|
| 278 |
+
|
| 279 |
+
Arguments (all optional):
|
| 280 |
+
|
| 281 |
+
skip_curdir: if true, skip current directory (default True)
|
| 282 |
+
maxlevels: max recursion level (default 0)
|
| 283 |
+
force: as for compile_dir() (default False)
|
| 284 |
+
quiet: as for compile_dir() (default 0)
|
| 285 |
+
legacy: as for compile_dir() (default False)
|
| 286 |
+
optimize: as for compile_dir() (default -1)
|
| 287 |
+
invalidation_mode: as for compiler_dir()
|
| 288 |
+
"""
|
| 289 |
+
success = True
|
| 290 |
+
for dir in sys.path:
|
| 291 |
+
if (not dir or dir == os.curdir) and skip_curdir:
|
| 292 |
+
if quiet < 2:
|
| 293 |
+
print('Skipping current directory')
|
| 294 |
+
else:
|
| 295 |
+
success = success and compile_dir(
|
| 296 |
+
dir,
|
| 297 |
+
maxlevels,
|
| 298 |
+
None,
|
| 299 |
+
force,
|
| 300 |
+
quiet=quiet,
|
| 301 |
+
legacy=legacy,
|
| 302 |
+
optimize=optimize,
|
| 303 |
+
invalidation_mode=invalidation_mode,
|
| 304 |
+
)
|
| 305 |
+
return success
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def main():
|
| 309 |
+
"""Script main program."""
|
| 310 |
+
import argparse
|
| 311 |
+
|
| 312 |
+
parser = argparse.ArgumentParser(
|
| 313 |
+
description='Utilities to support installing Python libraries.')
|
| 314 |
+
parser.add_argument('-l', action='store_const', const=0,
|
| 315 |
+
default=None, dest='maxlevels',
|
| 316 |
+
help="don't recurse into subdirectories")
|
| 317 |
+
parser.add_argument('-r', type=int, dest='recursion',
|
| 318 |
+
help=('control the maximum recursion level. '
|
| 319 |
+
'if `-l` and `-r` options are specified, '
|
| 320 |
+
'then `-r` takes precedence.'))
|
| 321 |
+
parser.add_argument('-f', action='store_true', dest='force',
|
| 322 |
+
help='force rebuild even if timestamps are up to date')
|
| 323 |
+
parser.add_argument('-q', action='count', dest='quiet', default=0,
|
| 324 |
+
help='output only error messages; -qq will suppress '
|
| 325 |
+
'the error messages as well.')
|
| 326 |
+
parser.add_argument('-b', action='store_true', dest='legacy',
|
| 327 |
+
help='use legacy (pre-PEP3147) compiled file locations')
|
| 328 |
+
parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None,
|
| 329 |
+
help=('directory to prepend to file paths for use in '
|
| 330 |
+
'compile-time tracebacks and in runtime '
|
| 331 |
+
'tracebacks in cases where the source file is '
|
| 332 |
+
'unavailable'))
|
| 333 |
+
parser.add_argument('-s', metavar='STRIPDIR', dest='stripdir',
|
| 334 |
+
default=None,
|
| 335 |
+
help=('part of path to left-strip from path '
|
| 336 |
+
'to source file - for example buildroot. '
|
| 337 |
+
'`-d` and `-s` options cannot be '
|
| 338 |
+
'specified together.'))
|
| 339 |
+
parser.add_argument('-p', metavar='PREPENDDIR', dest='prependdir',
|
| 340 |
+
default=None,
|
| 341 |
+
help=('path to add as prefix to path '
|
| 342 |
+
'to source file - for example / to make '
|
| 343 |
+
'it absolute when some part is removed '
|
| 344 |
+
'by `-s` option. '
|
| 345 |
+
'`-d` and `-p` options cannot be '
|
| 346 |
+
'specified together.'))
|
| 347 |
+
parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
|
| 348 |
+
help=('skip files matching the regular expression; '
|
| 349 |
+
'the regexp is searched for in the full path '
|
| 350 |
+
'of each file considered for compilation'))
|
| 351 |
+
parser.add_argument('-i', metavar='FILE', dest='flist',
|
| 352 |
+
help=('add all the files and directories listed in '
|
| 353 |
+
'FILE to the list considered for compilation; '
|
| 354 |
+
'if "-", names are read from stdin'))
|
| 355 |
+
parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*',
|
| 356 |
+
help=('zero or more file and directory names '
|
| 357 |
+
'to compile; if no arguments given, defaults '
|
| 358 |
+
'to the equivalent of -l sys.path'))
|
| 359 |
+
parser.add_argument('-j', '--workers', default=1,
|
| 360 |
+
type=int, help='Run compileall concurrently')
|
| 361 |
+
invalidation_modes = [mode.name.lower().replace('_', '-')
|
| 362 |
+
for mode in py_compile.PycInvalidationMode]
|
| 363 |
+
parser.add_argument('--invalidation-mode',
|
| 364 |
+
choices=sorted(invalidation_modes),
|
| 365 |
+
help=('set .pyc invalidation mode; defaults to '
|
| 366 |
+
'"checked-hash" if the SOURCE_DATE_EPOCH '
|
| 367 |
+
'environment variable is set, and '
|
| 368 |
+
'"timestamp" otherwise.'))
|
| 369 |
+
parser.add_argument('-o', action='append', type=int, dest='opt_levels',
|
| 370 |
+
help=('Optimization levels to run compilation with. '
|
| 371 |
+
'Default is -1 which uses the optimization level '
|
| 372 |
+
'of the Python interpreter itself (see -O).'))
|
| 373 |
+
parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest',
|
| 374 |
+
help='Ignore symlinks pointing outsite of the DIR')
|
| 375 |
+
parser.add_argument('--hardlink-dupes', action='store_true',
|
| 376 |
+
dest='hardlink_dupes',
|
| 377 |
+
help='Hardlink duplicated pyc files')
|
| 378 |
+
|
| 379 |
+
args = parser.parse_args()
|
| 380 |
+
compile_dests = args.compile_dest
|
| 381 |
+
|
| 382 |
+
if args.rx:
|
| 383 |
+
import re
|
| 384 |
+
args.rx = re.compile(args.rx)
|
| 385 |
+
|
| 386 |
+
if args.limit_sl_dest == "":
|
| 387 |
+
args.limit_sl_dest = None
|
| 388 |
+
|
| 389 |
+
if args.recursion is not None:
|
| 390 |
+
maxlevels = args.recursion
|
| 391 |
+
else:
|
| 392 |
+
maxlevels = args.maxlevels
|
| 393 |
+
|
| 394 |
+
if args.opt_levels is None:
|
| 395 |
+
args.opt_levels = [-1]
|
| 396 |
+
|
| 397 |
+
if len(args.opt_levels) == 1 and args.hardlink_dupes:
|
| 398 |
+
parser.error(("Hardlinking of duplicated bytecode makes sense "
|
| 399 |
+
"only for more than one optimization level."))
|
| 400 |
+
|
| 401 |
+
if args.ddir is not None and (
|
| 402 |
+
args.stripdir is not None or args.prependdir is not None
|
| 403 |
+
):
|
| 404 |
+
parser.error("-d cannot be used in combination with -s or -p")
|
| 405 |
+
|
| 406 |
+
# if flist is provided then load it
|
| 407 |
+
if args.flist:
|
| 408 |
+
try:
|
| 409 |
+
with (sys.stdin if args.flist=='-' else
|
| 410 |
+
open(args.flist, encoding="utf-8")) as f:
|
| 411 |
+
for line in f:
|
| 412 |
+
compile_dests.append(line.strip())
|
| 413 |
+
except OSError:
|
| 414 |
+
if args.quiet < 2:
|
| 415 |
+
print("Error reading file list {}".format(args.flist))
|
| 416 |
+
return False
|
| 417 |
+
|
| 418 |
+
if args.invalidation_mode:
|
| 419 |
+
ivl_mode = args.invalidation_mode.replace('-', '_').upper()
|
| 420 |
+
invalidation_mode = py_compile.PycInvalidationMode[ivl_mode]
|
| 421 |
+
else:
|
| 422 |
+
invalidation_mode = None
|
| 423 |
+
|
| 424 |
+
success = True
|
| 425 |
+
try:
|
| 426 |
+
if compile_dests:
|
| 427 |
+
for dest in compile_dests:
|
| 428 |
+
if os.path.isfile(dest):
|
| 429 |
+
if not compile_file(dest, args.ddir, args.force, args.rx,
|
| 430 |
+
args.quiet, args.legacy,
|
| 431 |
+
invalidation_mode=invalidation_mode,
|
| 432 |
+
stripdir=args.stripdir,
|
| 433 |
+
prependdir=args.prependdir,
|
| 434 |
+
optimize=args.opt_levels,
|
| 435 |
+
limit_sl_dest=args.limit_sl_dest,
|
| 436 |
+
hardlink_dupes=args.hardlink_dupes):
|
| 437 |
+
success = False
|
| 438 |
+
else:
|
| 439 |
+
if not compile_dir(dest, maxlevels, args.ddir,
|
| 440 |
+
args.force, args.rx, args.quiet,
|
| 441 |
+
args.legacy, workers=args.workers,
|
| 442 |
+
invalidation_mode=invalidation_mode,
|
| 443 |
+
stripdir=args.stripdir,
|
| 444 |
+
prependdir=args.prependdir,
|
| 445 |
+
optimize=args.opt_levels,
|
| 446 |
+
limit_sl_dest=args.limit_sl_dest,
|
| 447 |
+
hardlink_dupes=args.hardlink_dupes):
|
| 448 |
+
success = False
|
| 449 |
+
return success
|
| 450 |
+
else:
|
| 451 |
+
return compile_path(legacy=args.legacy, force=args.force,
|
| 452 |
+
quiet=args.quiet,
|
| 453 |
+
invalidation_mode=invalidation_mode)
|
| 454 |
+
except KeyboardInterrupt:
|
| 455 |
+
if args.quiet < 2:
|
| 456 |
+
print("\n[interrupted]")
|
| 457 |
+
return False
|
| 458 |
+
return True
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
if __name__ == '__main__':
|
| 462 |
+
exit_status = int(not main())
|
| 463 |
+
sys.exit(exit_status)
|
parrot/lib/python3.10/crypt.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Wrapper to the POSIX crypt library call and associated functionality."""
|
| 2 |
+
|
| 3 |
+
import sys as _sys
|
| 4 |
+
|
| 5 |
+
try:
|
| 6 |
+
import _crypt
|
| 7 |
+
except ModuleNotFoundError:
|
| 8 |
+
if _sys.platform == 'win32':
|
| 9 |
+
raise ImportError("The crypt module is not supported on Windows")
|
| 10 |
+
else:
|
| 11 |
+
raise ImportError("The required _crypt module was not built as part of CPython")
|
| 12 |
+
|
| 13 |
+
import errno
|
| 14 |
+
import string as _string
|
| 15 |
+
from random import SystemRandom as _SystemRandom
|
| 16 |
+
from collections import namedtuple as _namedtuple
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_saltchars = _string.ascii_letters + _string.digits + './'
|
| 20 |
+
_sr = _SystemRandom()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class _Method(_namedtuple('_Method', 'name ident salt_chars total_size')):
|
| 24 |
+
|
| 25 |
+
"""Class representing a salt method per the Modular Crypt Format or the
|
| 26 |
+
legacy 2-character crypt method."""
|
| 27 |
+
|
| 28 |
+
def __repr__(self):
|
| 29 |
+
return '<crypt.METHOD_{}>'.format(self.name)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def mksalt(method=None, *, rounds=None):
|
| 33 |
+
"""Generate a salt for the specified method.
|
| 34 |
+
|
| 35 |
+
If not specified, the strongest available method will be used.
|
| 36 |
+
|
| 37 |
+
"""
|
| 38 |
+
if method is None:
|
| 39 |
+
method = methods[0]
|
| 40 |
+
if rounds is not None and not isinstance(rounds, int):
|
| 41 |
+
raise TypeError(f'{rounds.__class__.__name__} object cannot be '
|
| 42 |
+
f'interpreted as an integer')
|
| 43 |
+
if not method.ident: # traditional
|
| 44 |
+
s = ''
|
| 45 |
+
else: # modular
|
| 46 |
+
s = f'${method.ident}$'
|
| 47 |
+
|
| 48 |
+
if method.ident and method.ident[0] == '2': # Blowfish variants
|
| 49 |
+
if rounds is None:
|
| 50 |
+
log_rounds = 12
|
| 51 |
+
else:
|
| 52 |
+
log_rounds = int.bit_length(rounds-1)
|
| 53 |
+
if rounds != 1 << log_rounds:
|
| 54 |
+
raise ValueError('rounds must be a power of 2')
|
| 55 |
+
if not 4 <= log_rounds <= 31:
|
| 56 |
+
raise ValueError('rounds out of the range 2**4 to 2**31')
|
| 57 |
+
s += f'{log_rounds:02d}$'
|
| 58 |
+
elif method.ident in ('5', '6'): # SHA-2
|
| 59 |
+
if rounds is not None:
|
| 60 |
+
if not 1000 <= rounds <= 999_999_999:
|
| 61 |
+
raise ValueError('rounds out of the range 1000 to 999_999_999')
|
| 62 |
+
s += f'rounds={rounds}$'
|
| 63 |
+
elif rounds is not None:
|
| 64 |
+
raise ValueError(f"{method} doesn't support the rounds argument")
|
| 65 |
+
|
| 66 |
+
s += ''.join(_sr.choice(_saltchars) for char in range(method.salt_chars))
|
| 67 |
+
return s
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def crypt(word, salt=None):
|
| 71 |
+
"""Return a string representing the one-way hash of a password, with a salt
|
| 72 |
+
prepended.
|
| 73 |
+
|
| 74 |
+
If ``salt`` is not specified or is ``None``, the strongest
|
| 75 |
+
available method will be selected and a salt generated. Otherwise,
|
| 76 |
+
``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as
|
| 77 |
+
returned by ``crypt.mksalt()``.
|
| 78 |
+
|
| 79 |
+
"""
|
| 80 |
+
if salt is None or isinstance(salt, _Method):
|
| 81 |
+
salt = mksalt(salt)
|
| 82 |
+
return _crypt.crypt(word, salt)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# available salting/crypto methods
|
| 86 |
+
methods = []
|
| 87 |
+
|
| 88 |
+
def _add_method(name, *args, rounds=None):
|
| 89 |
+
method = _Method(name, *args)
|
| 90 |
+
globals()['METHOD_' + name] = method
|
| 91 |
+
salt = mksalt(method, rounds=rounds)
|
| 92 |
+
result = None
|
| 93 |
+
try:
|
| 94 |
+
result = crypt('', salt)
|
| 95 |
+
except OSError as e:
|
| 96 |
+
# Not all libc libraries support all encryption methods.
|
| 97 |
+
if e.errno in {errno.EINVAL, errno.EPERM, errno.ENOSYS}:
|
| 98 |
+
return False
|
| 99 |
+
raise
|
| 100 |
+
if result and len(result) == method.total_size:
|
| 101 |
+
methods.append(method)
|
| 102 |
+
return True
|
| 103 |
+
return False
|
| 104 |
+
|
| 105 |
+
_add_method('SHA512', '6', 16, 106)
|
| 106 |
+
_add_method('SHA256', '5', 16, 63)
|
| 107 |
+
|
| 108 |
+
# Choose the strongest supported version of Blowfish hashing.
|
| 109 |
+
# Early versions have flaws. Version 'a' fixes flaws of
|
| 110 |
+
# the initial implementation, 'b' fixes flaws of 'a'.
|
| 111 |
+
# 'y' is the same as 'b', for compatibility
|
| 112 |
+
# with openwall crypt_blowfish.
|
| 113 |
+
for _v in 'b', 'y', 'a', '':
|
| 114 |
+
if _add_method('BLOWFISH', '2' + _v, 22, 59 + len(_v), rounds=1<<4):
|
| 115 |
+
break
|
| 116 |
+
|
| 117 |
+
_add_method('MD5', '1', 8, 34)
|
| 118 |
+
_add_method('CRYPT', None, 2, 13)
|
| 119 |
+
|
| 120 |
+
del _v, _add_method
|
parrot/lib/python3.10/csv.py
ADDED
|
@@ -0,0 +1,444 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
"""
|
| 3 |
+
csv.py - read/write/investigate CSV files
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import re
|
| 7 |
+
from _csv import Error, __version__, writer, reader, register_dialect, \
|
| 8 |
+
unregister_dialect, get_dialect, list_dialects, \
|
| 9 |
+
field_size_limit, \
|
| 10 |
+
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
|
| 11 |
+
__doc__
|
| 12 |
+
from _csv import Dialect as _Dialect
|
| 13 |
+
|
| 14 |
+
from io import StringIO
|
| 15 |
+
|
| 16 |
+
__all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
|
| 17 |
+
"Error", "Dialect", "__doc__", "excel", "excel_tab",
|
| 18 |
+
"field_size_limit", "reader", "writer",
|
| 19 |
+
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
|
| 20 |
+
"unregister_dialect", "__version__", "DictReader", "DictWriter",
|
| 21 |
+
"unix_dialect"]
|
| 22 |
+
|
| 23 |
+
class Dialect:
|
| 24 |
+
"""Describe a CSV dialect.
|
| 25 |
+
|
| 26 |
+
This must be subclassed (see csv.excel). Valid attributes are:
|
| 27 |
+
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
|
| 28 |
+
lineterminator, quoting.
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
_name = ""
|
| 32 |
+
_valid = False
|
| 33 |
+
# placeholders
|
| 34 |
+
delimiter = None
|
| 35 |
+
quotechar = None
|
| 36 |
+
escapechar = None
|
| 37 |
+
doublequote = None
|
| 38 |
+
skipinitialspace = None
|
| 39 |
+
lineterminator = None
|
| 40 |
+
quoting = None
|
| 41 |
+
|
| 42 |
+
def __init__(self):
|
| 43 |
+
if self.__class__ != Dialect:
|
| 44 |
+
self._valid = True
|
| 45 |
+
self._validate()
|
| 46 |
+
|
| 47 |
+
def _validate(self):
|
| 48 |
+
try:
|
| 49 |
+
_Dialect(self)
|
| 50 |
+
except TypeError as e:
|
| 51 |
+
# We do this for compatibility with py2.3
|
| 52 |
+
raise Error(str(e))
|
| 53 |
+
|
| 54 |
+
class excel(Dialect):
|
| 55 |
+
"""Describe the usual properties of Excel-generated CSV files."""
|
| 56 |
+
delimiter = ','
|
| 57 |
+
quotechar = '"'
|
| 58 |
+
doublequote = True
|
| 59 |
+
skipinitialspace = False
|
| 60 |
+
lineterminator = '\r\n'
|
| 61 |
+
quoting = QUOTE_MINIMAL
|
| 62 |
+
register_dialect("excel", excel)
|
| 63 |
+
|
| 64 |
+
class excel_tab(excel):
|
| 65 |
+
"""Describe the usual properties of Excel-generated TAB-delimited files."""
|
| 66 |
+
delimiter = '\t'
|
| 67 |
+
register_dialect("excel-tab", excel_tab)
|
| 68 |
+
|
| 69 |
+
class unix_dialect(Dialect):
|
| 70 |
+
"""Describe the usual properties of Unix-generated CSV files."""
|
| 71 |
+
delimiter = ','
|
| 72 |
+
quotechar = '"'
|
| 73 |
+
doublequote = True
|
| 74 |
+
skipinitialspace = False
|
| 75 |
+
lineterminator = '\n'
|
| 76 |
+
quoting = QUOTE_ALL
|
| 77 |
+
register_dialect("unix", unix_dialect)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class DictReader:
|
| 81 |
+
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
|
| 82 |
+
dialect="excel", *args, **kwds):
|
| 83 |
+
self._fieldnames = fieldnames # list of keys for the dict
|
| 84 |
+
self.restkey = restkey # key to catch long rows
|
| 85 |
+
self.restval = restval # default value for short rows
|
| 86 |
+
self.reader = reader(f, dialect, *args, **kwds)
|
| 87 |
+
self.dialect = dialect
|
| 88 |
+
self.line_num = 0
|
| 89 |
+
|
| 90 |
+
def __iter__(self):
|
| 91 |
+
return self
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def fieldnames(self):
|
| 95 |
+
if self._fieldnames is None:
|
| 96 |
+
try:
|
| 97 |
+
self._fieldnames = next(self.reader)
|
| 98 |
+
except StopIteration:
|
| 99 |
+
pass
|
| 100 |
+
self.line_num = self.reader.line_num
|
| 101 |
+
return self._fieldnames
|
| 102 |
+
|
| 103 |
+
@fieldnames.setter
|
| 104 |
+
def fieldnames(self, value):
|
| 105 |
+
self._fieldnames = value
|
| 106 |
+
|
| 107 |
+
def __next__(self):
|
| 108 |
+
if self.line_num == 0:
|
| 109 |
+
# Used only for its side effect.
|
| 110 |
+
self.fieldnames
|
| 111 |
+
row = next(self.reader)
|
| 112 |
+
self.line_num = self.reader.line_num
|
| 113 |
+
|
| 114 |
+
# unlike the basic reader, we prefer not to return blanks,
|
| 115 |
+
# because we will typically wind up with a dict full of None
|
| 116 |
+
# values
|
| 117 |
+
while row == []:
|
| 118 |
+
row = next(self.reader)
|
| 119 |
+
d = dict(zip(self.fieldnames, row))
|
| 120 |
+
lf = len(self.fieldnames)
|
| 121 |
+
lr = len(row)
|
| 122 |
+
if lf < lr:
|
| 123 |
+
d[self.restkey] = row[lf:]
|
| 124 |
+
elif lf > lr:
|
| 125 |
+
for key in self.fieldnames[lr:]:
|
| 126 |
+
d[key] = self.restval
|
| 127 |
+
return d
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class DictWriter:
|
| 131 |
+
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
|
| 132 |
+
dialect="excel", *args, **kwds):
|
| 133 |
+
self.fieldnames = fieldnames # list of keys for the dict
|
| 134 |
+
self.restval = restval # for writing short dicts
|
| 135 |
+
if extrasaction.lower() not in ("raise", "ignore"):
|
| 136 |
+
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
|
| 137 |
+
% extrasaction)
|
| 138 |
+
self.extrasaction = extrasaction
|
| 139 |
+
self.writer = writer(f, dialect, *args, **kwds)
|
| 140 |
+
|
| 141 |
+
def writeheader(self):
|
| 142 |
+
header = dict(zip(self.fieldnames, self.fieldnames))
|
| 143 |
+
return self.writerow(header)
|
| 144 |
+
|
| 145 |
+
def _dict_to_list(self, rowdict):
|
| 146 |
+
if self.extrasaction == "raise":
|
| 147 |
+
wrong_fields = rowdict.keys() - self.fieldnames
|
| 148 |
+
if wrong_fields:
|
| 149 |
+
raise ValueError("dict contains fields not in fieldnames: "
|
| 150 |
+
+ ", ".join([repr(x) for x in wrong_fields]))
|
| 151 |
+
return (rowdict.get(key, self.restval) for key in self.fieldnames)
|
| 152 |
+
|
| 153 |
+
def writerow(self, rowdict):
|
| 154 |
+
return self.writer.writerow(self._dict_to_list(rowdict))
|
| 155 |
+
|
| 156 |
+
def writerows(self, rowdicts):
|
| 157 |
+
return self.writer.writerows(map(self._dict_to_list, rowdicts))
|
| 158 |
+
|
| 159 |
+
# Guard Sniffer's type checking against builds that exclude complex()
|
| 160 |
+
try:
|
| 161 |
+
complex
|
| 162 |
+
except NameError:
|
| 163 |
+
complex = float
|
| 164 |
+
|
| 165 |
+
class Sniffer:
|
| 166 |
+
'''
|
| 167 |
+
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
|
| 168 |
+
Returns a Dialect object.
|
| 169 |
+
'''
|
| 170 |
+
def __init__(self):
|
| 171 |
+
# in case there is more than one possible delimiter
|
| 172 |
+
self.preferred = [',', '\t', ';', ' ', ':']
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def sniff(self, sample, delimiters=None):
|
| 176 |
+
"""
|
| 177 |
+
Returns a dialect (or None) corresponding to the sample
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
quotechar, doublequote, delimiter, skipinitialspace = \
|
| 181 |
+
self._guess_quote_and_delimiter(sample, delimiters)
|
| 182 |
+
if not delimiter:
|
| 183 |
+
delimiter, skipinitialspace = self._guess_delimiter(sample,
|
| 184 |
+
delimiters)
|
| 185 |
+
|
| 186 |
+
if not delimiter:
|
| 187 |
+
raise Error("Could not determine delimiter")
|
| 188 |
+
|
| 189 |
+
class dialect(Dialect):
|
| 190 |
+
_name = "sniffed"
|
| 191 |
+
lineterminator = '\r\n'
|
| 192 |
+
quoting = QUOTE_MINIMAL
|
| 193 |
+
# escapechar = ''
|
| 194 |
+
|
| 195 |
+
dialect.doublequote = doublequote
|
| 196 |
+
dialect.delimiter = delimiter
|
| 197 |
+
# _csv.reader won't accept a quotechar of ''
|
| 198 |
+
dialect.quotechar = quotechar or '"'
|
| 199 |
+
dialect.skipinitialspace = skipinitialspace
|
| 200 |
+
|
| 201 |
+
return dialect
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def _guess_quote_and_delimiter(self, data, delimiters):
|
| 205 |
+
"""
|
| 206 |
+
Looks for text enclosed between two identical quotes
|
| 207 |
+
(the probable quotechar) which are preceded and followed
|
| 208 |
+
by the same character (the probable delimiter).
|
| 209 |
+
For example:
|
| 210 |
+
,'some text',
|
| 211 |
+
The quote with the most wins, same with the delimiter.
|
| 212 |
+
If there is no quotechar the delimiter can't be determined
|
| 213 |
+
this way.
|
| 214 |
+
"""
|
| 215 |
+
|
| 216 |
+
matches = []
|
| 217 |
+
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
|
| 218 |
+
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
|
| 219 |
+
r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
|
| 220 |
+
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
|
| 221 |
+
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
|
| 222 |
+
matches = regexp.findall(data)
|
| 223 |
+
if matches:
|
| 224 |
+
break
|
| 225 |
+
|
| 226 |
+
if not matches:
|
| 227 |
+
# (quotechar, doublequote, delimiter, skipinitialspace)
|
| 228 |
+
return ('', False, None, 0)
|
| 229 |
+
quotes = {}
|
| 230 |
+
delims = {}
|
| 231 |
+
spaces = 0
|
| 232 |
+
groupindex = regexp.groupindex
|
| 233 |
+
for m in matches:
|
| 234 |
+
n = groupindex['quote'] - 1
|
| 235 |
+
key = m[n]
|
| 236 |
+
if key:
|
| 237 |
+
quotes[key] = quotes.get(key, 0) + 1
|
| 238 |
+
try:
|
| 239 |
+
n = groupindex['delim'] - 1
|
| 240 |
+
key = m[n]
|
| 241 |
+
except KeyError:
|
| 242 |
+
continue
|
| 243 |
+
if key and (delimiters is None or key in delimiters):
|
| 244 |
+
delims[key] = delims.get(key, 0) + 1
|
| 245 |
+
try:
|
| 246 |
+
n = groupindex['space'] - 1
|
| 247 |
+
except KeyError:
|
| 248 |
+
continue
|
| 249 |
+
if m[n]:
|
| 250 |
+
spaces += 1
|
| 251 |
+
|
| 252 |
+
quotechar = max(quotes, key=quotes.get)
|
| 253 |
+
|
| 254 |
+
if delims:
|
| 255 |
+
delim = max(delims, key=delims.get)
|
| 256 |
+
skipinitialspace = delims[delim] == spaces
|
| 257 |
+
if delim == '\n': # most likely a file with a single column
|
| 258 |
+
delim = ''
|
| 259 |
+
else:
|
| 260 |
+
# there is *no* delimiter, it's a single column of quoted data
|
| 261 |
+
delim = ''
|
| 262 |
+
skipinitialspace = 0
|
| 263 |
+
|
| 264 |
+
# if we see an extra quote between delimiters, we've got a
|
| 265 |
+
# double quoted format
|
| 266 |
+
dq_regexp = re.compile(
|
| 267 |
+
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
|
| 268 |
+
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
if dq_regexp.search(data):
|
| 273 |
+
doublequote = True
|
| 274 |
+
else:
|
| 275 |
+
doublequote = False
|
| 276 |
+
|
| 277 |
+
return (quotechar, doublequote, delim, skipinitialspace)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def _guess_delimiter(self, data, delimiters):
|
| 281 |
+
"""
|
| 282 |
+
The delimiter /should/ occur the same number of times on
|
| 283 |
+
each row. However, due to malformed data, it may not. We don't want
|
| 284 |
+
an all or nothing approach, so we allow for small variations in this
|
| 285 |
+
number.
|
| 286 |
+
1) build a table of the frequency of each character on every line.
|
| 287 |
+
2) build a table of frequencies of this frequency (meta-frequency?),
|
| 288 |
+
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
|
| 289 |
+
7 times in 2 rows'
|
| 290 |
+
3) use the mode of the meta-frequency to determine the /expected/
|
| 291 |
+
frequency for that character
|
| 292 |
+
4) find out how often the character actually meets that goal
|
| 293 |
+
5) the character that best meets its goal is the delimiter
|
| 294 |
+
For performance reasons, the data is evaluated in chunks, so it can
|
| 295 |
+
try and evaluate the smallest portion of the data possible, evaluating
|
| 296 |
+
additional chunks as necessary.
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
data = list(filter(None, data.split('\n')))
|
| 300 |
+
|
| 301 |
+
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
|
| 302 |
+
|
| 303 |
+
# build frequency tables
|
| 304 |
+
chunkLength = min(10, len(data))
|
| 305 |
+
iteration = 0
|
| 306 |
+
charFrequency = {}
|
| 307 |
+
modes = {}
|
| 308 |
+
delims = {}
|
| 309 |
+
start, end = 0, chunkLength
|
| 310 |
+
while start < len(data):
|
| 311 |
+
iteration += 1
|
| 312 |
+
for line in data[start:end]:
|
| 313 |
+
for char in ascii:
|
| 314 |
+
metaFrequency = charFrequency.get(char, {})
|
| 315 |
+
# must count even if frequency is 0
|
| 316 |
+
freq = line.count(char)
|
| 317 |
+
# value is the mode
|
| 318 |
+
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
|
| 319 |
+
charFrequency[char] = metaFrequency
|
| 320 |
+
|
| 321 |
+
for char in charFrequency.keys():
|
| 322 |
+
items = list(charFrequency[char].items())
|
| 323 |
+
if len(items) == 1 and items[0][0] == 0:
|
| 324 |
+
continue
|
| 325 |
+
# get the mode of the frequencies
|
| 326 |
+
if len(items) > 1:
|
| 327 |
+
modes[char] = max(items, key=lambda x: x[1])
|
| 328 |
+
# adjust the mode - subtract the sum of all
|
| 329 |
+
# other frequencies
|
| 330 |
+
items.remove(modes[char])
|
| 331 |
+
modes[char] = (modes[char][0], modes[char][1]
|
| 332 |
+
- sum(item[1] for item in items))
|
| 333 |
+
else:
|
| 334 |
+
modes[char] = items[0]
|
| 335 |
+
|
| 336 |
+
# build a list of possible delimiters
|
| 337 |
+
modeList = modes.items()
|
| 338 |
+
total = float(min(chunkLength * iteration, len(data)))
|
| 339 |
+
# (rows of consistent data) / (number of rows) = 100%
|
| 340 |
+
consistency = 1.0
|
| 341 |
+
# minimum consistency threshold
|
| 342 |
+
threshold = 0.9
|
| 343 |
+
while len(delims) == 0 and consistency >= threshold:
|
| 344 |
+
for k, v in modeList:
|
| 345 |
+
if v[0] > 0 and v[1] > 0:
|
| 346 |
+
if ((v[1]/total) >= consistency and
|
| 347 |
+
(delimiters is None or k in delimiters)):
|
| 348 |
+
delims[k] = v
|
| 349 |
+
consistency -= 0.01
|
| 350 |
+
|
| 351 |
+
if len(delims) == 1:
|
| 352 |
+
delim = list(delims.keys())[0]
|
| 353 |
+
skipinitialspace = (data[0].count(delim) ==
|
| 354 |
+
data[0].count("%c " % delim))
|
| 355 |
+
return (delim, skipinitialspace)
|
| 356 |
+
|
| 357 |
+
# analyze another chunkLength lines
|
| 358 |
+
start = end
|
| 359 |
+
end += chunkLength
|
| 360 |
+
|
| 361 |
+
if not delims:
|
| 362 |
+
return ('', 0)
|
| 363 |
+
|
| 364 |
+
# if there's more than one, fall back to a 'preferred' list
|
| 365 |
+
if len(delims) > 1:
|
| 366 |
+
for d in self.preferred:
|
| 367 |
+
if d in delims.keys():
|
| 368 |
+
skipinitialspace = (data[0].count(d) ==
|
| 369 |
+
data[0].count("%c " % d))
|
| 370 |
+
return (d, skipinitialspace)
|
| 371 |
+
|
| 372 |
+
# nothing else indicates a preference, pick the character that
|
| 373 |
+
# dominates(?)
|
| 374 |
+
items = [(v,k) for (k,v) in delims.items()]
|
| 375 |
+
items.sort()
|
| 376 |
+
delim = items[-1][1]
|
| 377 |
+
|
| 378 |
+
skipinitialspace = (data[0].count(delim) ==
|
| 379 |
+
data[0].count("%c " % delim))
|
| 380 |
+
return (delim, skipinitialspace)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def has_header(self, sample):
|
| 384 |
+
# Creates a dictionary of types of data in each column. If any
|
| 385 |
+
# column is of a single type (say, integers), *except* for the first
|
| 386 |
+
# row, then the first row is presumed to be labels. If the type
|
| 387 |
+
# can't be determined, it is assumed to be a string in which case
|
| 388 |
+
# the length of the string is the determining factor: if all of the
|
| 389 |
+
# rows except for the first are the same length, it's a header.
|
| 390 |
+
# Finally, a 'vote' is taken at the end for each column, adding or
|
| 391 |
+
# subtracting from the likelihood of the first row being a header.
|
| 392 |
+
|
| 393 |
+
rdr = reader(StringIO(sample), self.sniff(sample))
|
| 394 |
+
|
| 395 |
+
header = next(rdr) # assume first row is header
|
| 396 |
+
|
| 397 |
+
columns = len(header)
|
| 398 |
+
columnTypes = {}
|
| 399 |
+
for i in range(columns): columnTypes[i] = None
|
| 400 |
+
|
| 401 |
+
checked = 0
|
| 402 |
+
for row in rdr:
|
| 403 |
+
# arbitrary number of rows to check, to keep it sane
|
| 404 |
+
if checked > 20:
|
| 405 |
+
break
|
| 406 |
+
checked += 1
|
| 407 |
+
|
| 408 |
+
if len(row) != columns:
|
| 409 |
+
continue # skip rows that have irregular number of columns
|
| 410 |
+
|
| 411 |
+
for col in list(columnTypes.keys()):
|
| 412 |
+
thisType = complex
|
| 413 |
+
try:
|
| 414 |
+
thisType(row[col])
|
| 415 |
+
except (ValueError, OverflowError):
|
| 416 |
+
# fallback to length of string
|
| 417 |
+
thisType = len(row[col])
|
| 418 |
+
|
| 419 |
+
if thisType != columnTypes[col]:
|
| 420 |
+
if columnTypes[col] is None: # add new column type
|
| 421 |
+
columnTypes[col] = thisType
|
| 422 |
+
else:
|
| 423 |
+
# type is inconsistent, remove column from
|
| 424 |
+
# consideration
|
| 425 |
+
del columnTypes[col]
|
| 426 |
+
|
| 427 |
+
# finally, compare results against first row and "vote"
|
| 428 |
+
# on whether it's a header
|
| 429 |
+
hasHeader = 0
|
| 430 |
+
for col, colType in columnTypes.items():
|
| 431 |
+
if type(colType) == type(0): # it's a length
|
| 432 |
+
if len(header[col]) != colType:
|
| 433 |
+
hasHeader += 1
|
| 434 |
+
else:
|
| 435 |
+
hasHeader -= 1
|
| 436 |
+
else: # attempt typecast
|
| 437 |
+
try:
|
| 438 |
+
colType(header[col])
|
| 439 |
+
except (ValueError, TypeError):
|
| 440 |
+
hasHeader += 1
|
| 441 |
+
else:
|
| 442 |
+
hasHeader -= 1
|
| 443 |
+
|
| 444 |
+
return hasHeader > 0
|
parrot/lib/python3.10/dataclasses.py
ADDED
|
@@ -0,0 +1,1453 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import sys
|
| 3 |
+
import copy
|
| 4 |
+
import types
|
| 5 |
+
import inspect
|
| 6 |
+
import keyword
|
| 7 |
+
import builtins
|
| 8 |
+
import functools
|
| 9 |
+
import abc
|
| 10 |
+
import _thread
|
| 11 |
+
from types import FunctionType, GenericAlias
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
__all__ = ['dataclass',
|
| 15 |
+
'field',
|
| 16 |
+
'Field',
|
| 17 |
+
'FrozenInstanceError',
|
| 18 |
+
'InitVar',
|
| 19 |
+
'KW_ONLY',
|
| 20 |
+
'MISSING',
|
| 21 |
+
|
| 22 |
+
# Helper functions.
|
| 23 |
+
'fields',
|
| 24 |
+
'asdict',
|
| 25 |
+
'astuple',
|
| 26 |
+
'make_dataclass',
|
| 27 |
+
'replace',
|
| 28 |
+
'is_dataclass',
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
# Conditions for adding methods. The boxes indicate what action the
|
| 32 |
+
# dataclass decorator takes. For all of these tables, when I talk
|
| 33 |
+
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
|
| 34 |
+
# referring to the arguments to the @dataclass decorator. When
|
| 35 |
+
# checking if a dunder method already exists, I mean check for an
|
| 36 |
+
# entry in the class's __dict__. I never check to see if an attribute
|
| 37 |
+
# is defined in a base class.
|
| 38 |
+
|
| 39 |
+
# Key:
|
| 40 |
+
# +=========+=========================================+
|
| 41 |
+
# + Value | Meaning |
|
| 42 |
+
# +=========+=========================================+
|
| 43 |
+
# | <blank> | No action: no method is added. |
|
| 44 |
+
# +---------+-----------------------------------------+
|
| 45 |
+
# | add | Generated method is added. |
|
| 46 |
+
# +---------+-----------------------------------------+
|
| 47 |
+
# | raise | TypeError is raised. |
|
| 48 |
+
# +---------+-----------------------------------------+
|
| 49 |
+
# | None | Attribute is set to None. |
|
| 50 |
+
# +=========+=========================================+
|
| 51 |
+
|
| 52 |
+
# __init__
|
| 53 |
+
#
|
| 54 |
+
# +--- init= parameter
|
| 55 |
+
# |
|
| 56 |
+
# v | | |
|
| 57 |
+
# | no | yes | <--- class has __init__ in __dict__?
|
| 58 |
+
# +=======+=======+=======+
|
| 59 |
+
# | False | | |
|
| 60 |
+
# +-------+-------+-------+
|
| 61 |
+
# | True | add | | <- the default
|
| 62 |
+
# +=======+=======+=======+
|
| 63 |
+
|
| 64 |
+
# __repr__
|
| 65 |
+
#
|
| 66 |
+
# +--- repr= parameter
|
| 67 |
+
# |
|
| 68 |
+
# v | | |
|
| 69 |
+
# | no | yes | <--- class has __repr__ in __dict__?
|
| 70 |
+
# +=======+=======+=======+
|
| 71 |
+
# | False | | |
|
| 72 |
+
# +-------+-------+-------+
|
| 73 |
+
# | True | add | | <- the default
|
| 74 |
+
# +=======+=======+=======+
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# __setattr__
|
| 78 |
+
# __delattr__
|
| 79 |
+
#
|
| 80 |
+
# +--- frozen= parameter
|
| 81 |
+
# |
|
| 82 |
+
# v | | |
|
| 83 |
+
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
|
| 84 |
+
# +=======+=======+=======+
|
| 85 |
+
# | False | | | <- the default
|
| 86 |
+
# +-------+-------+-------+
|
| 87 |
+
# | True | add | raise |
|
| 88 |
+
# +=======+=======+=======+
|
| 89 |
+
# Raise because not adding these methods would break the "frozen-ness"
|
| 90 |
+
# of the class.
|
| 91 |
+
|
| 92 |
+
# __eq__
|
| 93 |
+
#
|
| 94 |
+
# +--- eq= parameter
|
| 95 |
+
# |
|
| 96 |
+
# v | | |
|
| 97 |
+
# | no | yes | <--- class has __eq__ in __dict__?
|
| 98 |
+
# +=======+=======+=======+
|
| 99 |
+
# | False | | |
|
| 100 |
+
# +-------+-------+-------+
|
| 101 |
+
# | True | add | | <- the default
|
| 102 |
+
# +=======+=======+=======+
|
| 103 |
+
|
| 104 |
+
# __lt__
|
| 105 |
+
# __le__
|
| 106 |
+
# __gt__
|
| 107 |
+
# __ge__
|
| 108 |
+
#
|
| 109 |
+
# +--- order= parameter
|
| 110 |
+
# |
|
| 111 |
+
# v | | |
|
| 112 |
+
# | no | yes | <--- class has any comparison method in __dict__?
|
| 113 |
+
# +=======+=======+=======+
|
| 114 |
+
# | False | | | <- the default
|
| 115 |
+
# +-------+-------+-------+
|
| 116 |
+
# | True | add | raise |
|
| 117 |
+
# +=======+=======+=======+
|
| 118 |
+
# Raise because to allow this case would interfere with using
|
| 119 |
+
# functools.total_ordering.
|
| 120 |
+
|
| 121 |
+
# __hash__
|
| 122 |
+
|
| 123 |
+
# +------------------- unsafe_hash= parameter
|
| 124 |
+
# | +----------- eq= parameter
|
| 125 |
+
# | | +--- frozen= parameter
|
| 126 |
+
# | | |
|
| 127 |
+
# v v v | | |
|
| 128 |
+
# | no | yes | <--- class has explicitly defined __hash__
|
| 129 |
+
# +=======+=======+=======+========+========+
|
| 130 |
+
# | False | False | False | | | No __eq__, use the base class __hash__
|
| 131 |
+
# +-------+-------+-------+--------+--------+
|
| 132 |
+
# | False | False | True | | | No __eq__, use the base class __hash__
|
| 133 |
+
# +-------+-------+-------+--------+--------+
|
| 134 |
+
# | False | True | False | None | | <-- the default, not hashable
|
| 135 |
+
# +-------+-------+-------+--------+--------+
|
| 136 |
+
# | False | True | True | add | | Frozen, so hashable, allows override
|
| 137 |
+
# +-------+-------+-------+--------+--------+
|
| 138 |
+
# | True | False | False | add | raise | Has no __eq__, but hashable
|
| 139 |
+
# +-------+-------+-------+--------+--------+
|
| 140 |
+
# | True | False | True | add | raise | Has no __eq__, but hashable
|
| 141 |
+
# +-------+-------+-------+--------+--------+
|
| 142 |
+
# | True | True | False | add | raise | Not frozen, but hashable
|
| 143 |
+
# +-------+-------+-------+--------+--------+
|
| 144 |
+
# | True | True | True | add | raise | Frozen, so hashable
|
| 145 |
+
# +=======+=======+=======+========+========+
|
| 146 |
+
# For boxes that are blank, __hash__ is untouched and therefore
|
| 147 |
+
# inherited from the base class. If the base is object, then
|
| 148 |
+
# id-based hashing is used.
|
| 149 |
+
#
|
| 150 |
+
# Note that a class may already have __hash__=None if it specified an
|
| 151 |
+
# __eq__ method in the class body (not one that was created by
|
| 152 |
+
# @dataclass).
|
| 153 |
+
#
|
| 154 |
+
# See _hash_action (below) for a coded version of this table.
|
| 155 |
+
|
| 156 |
+
# __match_args__
|
| 157 |
+
#
|
| 158 |
+
# +--- match_args= parameter
|
| 159 |
+
# |
|
| 160 |
+
# v | | |
|
| 161 |
+
# | no | yes | <--- class has __match_args__ in __dict__?
|
| 162 |
+
# +=======+=======+=======+
|
| 163 |
+
# | False | | |
|
| 164 |
+
# +-------+-------+-------+
|
| 165 |
+
# | True | add | | <- the default
|
| 166 |
+
# +=======+=======+=======+
|
| 167 |
+
# __match_args__ is always added unless the class already defines it. It is a
|
| 168 |
+
# tuple of __init__ parameter names; non-init fields must be matched by keyword.
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# Raised when an attempt is made to modify a frozen class.
|
| 172 |
+
class FrozenInstanceError(AttributeError): pass
|
| 173 |
+
|
| 174 |
+
# A sentinel object for default values to signal that a default
|
| 175 |
+
# factory will be used. This is given a nice repr() which will appear
|
| 176 |
+
# in the function signature of dataclasses' constructors.
|
| 177 |
+
class _HAS_DEFAULT_FACTORY_CLASS:
|
| 178 |
+
def __repr__(self):
|
| 179 |
+
return '<factory>'
|
| 180 |
+
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
|
| 181 |
+
|
| 182 |
+
# A sentinel object to detect if a parameter is supplied or not. Use
|
| 183 |
+
# a class to give it a better repr.
|
| 184 |
+
class _MISSING_TYPE:
|
| 185 |
+
pass
|
| 186 |
+
MISSING = _MISSING_TYPE()
|
| 187 |
+
|
| 188 |
+
# A sentinel object to indicate that following fields are keyword-only by
|
| 189 |
+
# default. Use a class to give it a better repr.
|
| 190 |
+
class _KW_ONLY_TYPE:
|
| 191 |
+
pass
|
| 192 |
+
KW_ONLY = _KW_ONLY_TYPE()
|
| 193 |
+
|
| 194 |
+
# Since most per-field metadata will be unused, create an empty
|
| 195 |
+
# read-only proxy that can be shared among all fields.
|
| 196 |
+
_EMPTY_METADATA = types.MappingProxyType({})
|
| 197 |
+
|
| 198 |
+
# Markers for the various kinds of fields and pseudo-fields.
|
| 199 |
+
class _FIELD_BASE:
|
| 200 |
+
def __init__(self, name):
|
| 201 |
+
self.name = name
|
| 202 |
+
def __repr__(self):
|
| 203 |
+
return self.name
|
| 204 |
+
_FIELD = _FIELD_BASE('_FIELD')
|
| 205 |
+
_FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR')
|
| 206 |
+
_FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR')
|
| 207 |
+
|
| 208 |
+
# The name of an attribute on the class where we store the Field
|
| 209 |
+
# objects. Also used to check if a class is a Data Class.
|
| 210 |
+
_FIELDS = '__dataclass_fields__'
|
| 211 |
+
|
| 212 |
+
# The name of an attribute on the class that stores the parameters to
|
| 213 |
+
# @dataclass.
|
| 214 |
+
_PARAMS = '__dataclass_params__'
|
| 215 |
+
|
| 216 |
+
# The name of the function, that if it exists, is called at the end of
|
| 217 |
+
# __init__.
|
| 218 |
+
_POST_INIT_NAME = '__post_init__'
|
| 219 |
+
|
| 220 |
+
# String regex that string annotations for ClassVar or InitVar must match.
|
| 221 |
+
# Allows "identifier.identifier[" or "identifier[".
|
| 222 |
+
# https://bugs.python.org/issue33453 for details.
|
| 223 |
+
_MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)')
|
| 224 |
+
|
| 225 |
+
# This function's logic is copied from "recursive_repr" function in
|
| 226 |
+
# reprlib module to avoid dependency.
|
| 227 |
+
def _recursive_repr(user_function):
|
| 228 |
+
# Decorator to make a repr function return "..." for a recursive
|
| 229 |
+
# call.
|
| 230 |
+
repr_running = set()
|
| 231 |
+
|
| 232 |
+
@functools.wraps(user_function)
|
| 233 |
+
def wrapper(self):
|
| 234 |
+
key = id(self), _thread.get_ident()
|
| 235 |
+
if key in repr_running:
|
| 236 |
+
return '...'
|
| 237 |
+
repr_running.add(key)
|
| 238 |
+
try:
|
| 239 |
+
result = user_function(self)
|
| 240 |
+
finally:
|
| 241 |
+
repr_running.discard(key)
|
| 242 |
+
return result
|
| 243 |
+
return wrapper
|
| 244 |
+
|
| 245 |
+
class InitVar:
|
| 246 |
+
__slots__ = ('type', )
|
| 247 |
+
|
| 248 |
+
def __init__(self, type):
|
| 249 |
+
self.type = type
|
| 250 |
+
|
| 251 |
+
def __repr__(self):
|
| 252 |
+
if isinstance(self.type, type) and not isinstance(self.type, GenericAlias):
|
| 253 |
+
type_name = self.type.__name__
|
| 254 |
+
else:
|
| 255 |
+
# typing objects, e.g. List[int]
|
| 256 |
+
type_name = repr(self.type)
|
| 257 |
+
return f'dataclasses.InitVar[{type_name}]'
|
| 258 |
+
|
| 259 |
+
def __class_getitem__(cls, type):
|
| 260 |
+
return InitVar(type)
|
| 261 |
+
|
| 262 |
+
# Instances of Field are only ever created from within this module,
|
| 263 |
+
# and only from the field() function, although Field instances are
|
| 264 |
+
# exposed externally as (conceptually) read-only objects.
|
| 265 |
+
#
|
| 266 |
+
# name and type are filled in after the fact, not in __init__.
|
| 267 |
+
# They're not known at the time this class is instantiated, but it's
|
| 268 |
+
# convenient if they're available later.
|
| 269 |
+
#
|
| 270 |
+
# When cls._FIELDS is filled in with a list of Field objects, the name
|
| 271 |
+
# and type fields will have been populated.
|
| 272 |
+
class Field:
|
| 273 |
+
__slots__ = ('name',
|
| 274 |
+
'type',
|
| 275 |
+
'default',
|
| 276 |
+
'default_factory',
|
| 277 |
+
'repr',
|
| 278 |
+
'hash',
|
| 279 |
+
'init',
|
| 280 |
+
'compare',
|
| 281 |
+
'metadata',
|
| 282 |
+
'kw_only',
|
| 283 |
+
'_field_type', # Private: not to be used by user code.
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
def __init__(self, default, default_factory, init, repr, hash, compare,
|
| 287 |
+
metadata, kw_only):
|
| 288 |
+
self.name = None
|
| 289 |
+
self.type = None
|
| 290 |
+
self.default = default
|
| 291 |
+
self.default_factory = default_factory
|
| 292 |
+
self.init = init
|
| 293 |
+
self.repr = repr
|
| 294 |
+
self.hash = hash
|
| 295 |
+
self.compare = compare
|
| 296 |
+
self.metadata = (_EMPTY_METADATA
|
| 297 |
+
if metadata is None else
|
| 298 |
+
types.MappingProxyType(metadata))
|
| 299 |
+
self.kw_only = kw_only
|
| 300 |
+
self._field_type = None
|
| 301 |
+
|
| 302 |
+
@_recursive_repr
|
| 303 |
+
def __repr__(self):
|
| 304 |
+
return ('Field('
|
| 305 |
+
f'name={self.name!r},'
|
| 306 |
+
f'type={self.type!r},'
|
| 307 |
+
f'default={self.default!r},'
|
| 308 |
+
f'default_factory={self.default_factory!r},'
|
| 309 |
+
f'init={self.init!r},'
|
| 310 |
+
f'repr={self.repr!r},'
|
| 311 |
+
f'hash={self.hash!r},'
|
| 312 |
+
f'compare={self.compare!r},'
|
| 313 |
+
f'metadata={self.metadata!r},'
|
| 314 |
+
f'kw_only={self.kw_only!r},'
|
| 315 |
+
f'_field_type={self._field_type}'
|
| 316 |
+
')')
|
| 317 |
+
|
| 318 |
+
# This is used to support the PEP 487 __set_name__ protocol in the
|
| 319 |
+
# case where we're using a field that contains a descriptor as a
|
| 320 |
+
# default value. For details on __set_name__, see
|
| 321 |
+
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
|
| 322 |
+
#
|
| 323 |
+
# Note that in _process_class, this Field object is overwritten
|
| 324 |
+
# with the default value, so the end result is a descriptor that
|
| 325 |
+
# had __set_name__ called on it at the right time.
|
| 326 |
+
def __set_name__(self, owner, name):
|
| 327 |
+
func = getattr(type(self.default), '__set_name__', None)
|
| 328 |
+
if func:
|
| 329 |
+
# There is a __set_name__ method on the descriptor, call
|
| 330 |
+
# it.
|
| 331 |
+
func(self.default, owner, name)
|
| 332 |
+
|
| 333 |
+
__class_getitem__ = classmethod(GenericAlias)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
class _DataclassParams:
|
| 337 |
+
__slots__ = ('init',
|
| 338 |
+
'repr',
|
| 339 |
+
'eq',
|
| 340 |
+
'order',
|
| 341 |
+
'unsafe_hash',
|
| 342 |
+
'frozen',
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
|
| 346 |
+
self.init = init
|
| 347 |
+
self.repr = repr
|
| 348 |
+
self.eq = eq
|
| 349 |
+
self.order = order
|
| 350 |
+
self.unsafe_hash = unsafe_hash
|
| 351 |
+
self.frozen = frozen
|
| 352 |
+
|
| 353 |
+
def __repr__(self):
|
| 354 |
+
return ('_DataclassParams('
|
| 355 |
+
f'init={self.init!r},'
|
| 356 |
+
f'repr={self.repr!r},'
|
| 357 |
+
f'eq={self.eq!r},'
|
| 358 |
+
f'order={self.order!r},'
|
| 359 |
+
f'unsafe_hash={self.unsafe_hash!r},'
|
| 360 |
+
f'frozen={self.frozen!r}'
|
| 361 |
+
')')
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
# This function is used instead of exposing Field creation directly,
|
| 365 |
+
# so that a type checker can be told (via overloads) that this is a
|
| 366 |
+
# function whose type depends on its parameters.
|
| 367 |
+
def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
|
| 368 |
+
hash=None, compare=True, metadata=None, kw_only=MISSING):
|
| 369 |
+
"""Return an object to identify dataclass fields.
|
| 370 |
+
|
| 371 |
+
default is the default value of the field. default_factory is a
|
| 372 |
+
0-argument function called to initialize a field's value. If init
|
| 373 |
+
is true, the field will be a parameter to the class's __init__()
|
| 374 |
+
function. If repr is true, the field will be included in the
|
| 375 |
+
object's repr(). If hash is true, the field will be included in the
|
| 376 |
+
object's hash(). If compare is true, the field will be used in
|
| 377 |
+
comparison functions. metadata, if specified, must be a mapping
|
| 378 |
+
which is stored but not otherwise examined by dataclass. If kw_only
|
| 379 |
+
is true, the field will become a keyword-only parameter to
|
| 380 |
+
__init__().
|
| 381 |
+
|
| 382 |
+
It is an error to specify both default and default_factory.
|
| 383 |
+
"""
|
| 384 |
+
|
| 385 |
+
if default is not MISSING and default_factory is not MISSING:
|
| 386 |
+
raise ValueError('cannot specify both default and default_factory')
|
| 387 |
+
return Field(default, default_factory, init, repr, hash, compare,
|
| 388 |
+
metadata, kw_only)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def _fields_in_init_order(fields):
|
| 392 |
+
# Returns the fields as __init__ will output them. It returns 2 tuples:
|
| 393 |
+
# the first for normal args, and the second for keyword args.
|
| 394 |
+
|
| 395 |
+
return (tuple(f for f in fields if f.init and not f.kw_only),
|
| 396 |
+
tuple(f for f in fields if f.init and f.kw_only)
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def _tuple_str(obj_name, fields):
|
| 401 |
+
# Return a string representing each field of obj_name as a tuple
|
| 402 |
+
# member. So, if fields is ['x', 'y'] and obj_name is "self",
|
| 403 |
+
# return "(self.x,self.y)".
|
| 404 |
+
|
| 405 |
+
# Special case for the 0-tuple.
|
| 406 |
+
if not fields:
|
| 407 |
+
return '()'
|
| 408 |
+
# Note the trailing comma, needed if this turns out to be a 1-tuple.
|
| 409 |
+
return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)'
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def _create_fn(name, args, body, *, globals=None, locals=None,
|
| 413 |
+
return_type=MISSING):
|
| 414 |
+
# Note that we may mutate locals. Callers beware!
|
| 415 |
+
# The only callers are internal to this module, so no
|
| 416 |
+
# worries about external callers.
|
| 417 |
+
if locals is None:
|
| 418 |
+
locals = {}
|
| 419 |
+
return_annotation = ''
|
| 420 |
+
if return_type is not MISSING:
|
| 421 |
+
locals['_return_type'] = return_type
|
| 422 |
+
return_annotation = '->_return_type'
|
| 423 |
+
args = ','.join(args)
|
| 424 |
+
body = '\n'.join(f' {b}' for b in body)
|
| 425 |
+
|
| 426 |
+
# Compute the text of the entire function.
|
| 427 |
+
txt = f' def {name}({args}){return_annotation}:\n{body}'
|
| 428 |
+
|
| 429 |
+
local_vars = ', '.join(locals.keys())
|
| 430 |
+
txt = f"def __create_fn__({local_vars}):\n{txt}\n return {name}"
|
| 431 |
+
ns = {}
|
| 432 |
+
exec(txt, globals, ns)
|
| 433 |
+
return ns['__create_fn__'](**locals)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def _field_assign(frozen, name, value, self_name):
|
| 437 |
+
# If we're a frozen class, then assign to our fields in __init__
|
| 438 |
+
# via object.__setattr__. Otherwise, just use a simple
|
| 439 |
+
# assignment.
|
| 440 |
+
#
|
| 441 |
+
# self_name is what "self" is called in this function: don't
|
| 442 |
+
# hard-code "self", since that might be a field name.
|
| 443 |
+
if frozen:
|
| 444 |
+
return f'__dataclass_builtins_object__.__setattr__({self_name},{name!r},{value})'
|
| 445 |
+
return f'{self_name}.{name}={value}'
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
def _field_init(f, frozen, globals, self_name, slots):
|
| 449 |
+
# Return the text of the line in the body of __init__ that will
|
| 450 |
+
# initialize this field.
|
| 451 |
+
|
| 452 |
+
default_name = f'_dflt_{f.name}'
|
| 453 |
+
if f.default_factory is not MISSING:
|
| 454 |
+
if f.init:
|
| 455 |
+
# This field has a default factory. If a parameter is
|
| 456 |
+
# given, use it. If not, call the factory.
|
| 457 |
+
globals[default_name] = f.default_factory
|
| 458 |
+
value = (f'{default_name}() '
|
| 459 |
+
f'if {f.name} is _HAS_DEFAULT_FACTORY '
|
| 460 |
+
f'else {f.name}')
|
| 461 |
+
else:
|
| 462 |
+
# This is a field that's not in the __init__ params, but
|
| 463 |
+
# has a default factory function. It needs to be
|
| 464 |
+
# initialized here by calling the factory function,
|
| 465 |
+
# because there's no other way to initialize it.
|
| 466 |
+
|
| 467 |
+
# For a field initialized with a default=defaultvalue, the
|
| 468 |
+
# class dict just has the default value
|
| 469 |
+
# (cls.fieldname=defaultvalue). But that won't work for a
|
| 470 |
+
# default factory, the factory must be called in __init__
|
| 471 |
+
# and we must assign that to self.fieldname. We can't
|
| 472 |
+
# fall back to the class dict's value, both because it's
|
| 473 |
+
# not set, and because it might be different per-class
|
| 474 |
+
# (which, after all, is why we have a factory function!).
|
| 475 |
+
|
| 476 |
+
globals[default_name] = f.default_factory
|
| 477 |
+
value = f'{default_name}()'
|
| 478 |
+
else:
|
| 479 |
+
# No default factory.
|
| 480 |
+
if f.init:
|
| 481 |
+
if f.default is MISSING:
|
| 482 |
+
# There's no default, just do an assignment.
|
| 483 |
+
value = f.name
|
| 484 |
+
elif f.default is not MISSING:
|
| 485 |
+
globals[default_name] = f.default
|
| 486 |
+
value = f.name
|
| 487 |
+
else:
|
| 488 |
+
# If the class has slots, then initialize this field.
|
| 489 |
+
if slots and f.default is not MISSING:
|
| 490 |
+
globals[default_name] = f.default
|
| 491 |
+
value = default_name
|
| 492 |
+
else:
|
| 493 |
+
# This field does not need initialization: reading from it will
|
| 494 |
+
# just use the class attribute that contains the default.
|
| 495 |
+
# Signify that to the caller by returning None.
|
| 496 |
+
return None
|
| 497 |
+
|
| 498 |
+
# Only test this now, so that we can create variables for the
|
| 499 |
+
# default. However, return None to signify that we're not going
|
| 500 |
+
# to actually do the assignment statement for InitVars.
|
| 501 |
+
if f._field_type is _FIELD_INITVAR:
|
| 502 |
+
return None
|
| 503 |
+
|
| 504 |
+
# Now, actually generate the field assignment.
|
| 505 |
+
return _field_assign(frozen, f.name, value, self_name)
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def _init_param(f):
|
| 509 |
+
# Return the __init__ parameter string for this field. For
|
| 510 |
+
# example, the equivalent of 'x:int=3' (except instead of 'int',
|
| 511 |
+
# reference a variable set to int, and instead of '3', reference a
|
| 512 |
+
# variable set to 3).
|
| 513 |
+
if f.default is MISSING and f.default_factory is MISSING:
|
| 514 |
+
# There's no default, and no default_factory, just output the
|
| 515 |
+
# variable name and type.
|
| 516 |
+
default = ''
|
| 517 |
+
elif f.default is not MISSING:
|
| 518 |
+
# There's a default, this will be the name that's used to look
|
| 519 |
+
# it up.
|
| 520 |
+
default = f'=_dflt_{f.name}'
|
| 521 |
+
elif f.default_factory is not MISSING:
|
| 522 |
+
# There's a factory function. Set a marker.
|
| 523 |
+
default = '=_HAS_DEFAULT_FACTORY'
|
| 524 |
+
return f'{f.name}:_type_{f.name}{default}'
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init,
|
| 528 |
+
self_name, globals, slots):
|
| 529 |
+
# fields contains both real fields and InitVar pseudo-fields.
|
| 530 |
+
|
| 531 |
+
# Make sure we don't have fields without defaults following fields
|
| 532 |
+
# with defaults. This actually would be caught when exec-ing the
|
| 533 |
+
# function source code, but catching it here gives a better error
|
| 534 |
+
# message, and future-proofs us in case we build up the function
|
| 535 |
+
# using ast.
|
| 536 |
+
|
| 537 |
+
seen_default = False
|
| 538 |
+
for f in std_fields:
|
| 539 |
+
# Only consider the non-kw-only fields in the __init__ call.
|
| 540 |
+
if f.init:
|
| 541 |
+
if not (f.default is MISSING and f.default_factory is MISSING):
|
| 542 |
+
seen_default = True
|
| 543 |
+
elif seen_default:
|
| 544 |
+
raise TypeError(f'non-default argument {f.name!r} '
|
| 545 |
+
'follows default argument')
|
| 546 |
+
|
| 547 |
+
locals = {f'_type_{f.name}': f.type for f in fields}
|
| 548 |
+
locals.update({
|
| 549 |
+
'MISSING': MISSING,
|
| 550 |
+
'_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY,
|
| 551 |
+
'__dataclass_builtins_object__': object,
|
| 552 |
+
})
|
| 553 |
+
|
| 554 |
+
body_lines = []
|
| 555 |
+
for f in fields:
|
| 556 |
+
line = _field_init(f, frozen, locals, self_name, slots)
|
| 557 |
+
# line is None means that this field doesn't require
|
| 558 |
+
# initialization (it's a pseudo-field). Just skip it.
|
| 559 |
+
if line:
|
| 560 |
+
body_lines.append(line)
|
| 561 |
+
|
| 562 |
+
# Does this class have a post-init function?
|
| 563 |
+
if has_post_init:
|
| 564 |
+
params_str = ','.join(f.name for f in fields
|
| 565 |
+
if f._field_type is _FIELD_INITVAR)
|
| 566 |
+
body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str})')
|
| 567 |
+
|
| 568 |
+
# If no body lines, use 'pass'.
|
| 569 |
+
if not body_lines:
|
| 570 |
+
body_lines = ['pass']
|
| 571 |
+
|
| 572 |
+
_init_params = [_init_param(f) for f in std_fields]
|
| 573 |
+
if kw_only_fields:
|
| 574 |
+
# Add the keyword-only args. Because the * can only be added if
|
| 575 |
+
# there's at least one keyword-only arg, there needs to be a test here
|
| 576 |
+
# (instead of just concatenting the lists together).
|
| 577 |
+
_init_params += ['*']
|
| 578 |
+
_init_params += [_init_param(f) for f in kw_only_fields]
|
| 579 |
+
return _create_fn('__init__',
|
| 580 |
+
[self_name] + _init_params,
|
| 581 |
+
body_lines,
|
| 582 |
+
locals=locals,
|
| 583 |
+
globals=globals,
|
| 584 |
+
return_type=None)
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
def _repr_fn(fields, globals):
|
| 588 |
+
fn = _create_fn('__repr__',
|
| 589 |
+
('self',),
|
| 590 |
+
['return self.__class__.__qualname__ + f"(' +
|
| 591 |
+
', '.join([f"{f.name}={{self.{f.name}!r}}"
|
| 592 |
+
for f in fields]) +
|
| 593 |
+
')"'],
|
| 594 |
+
globals=globals)
|
| 595 |
+
return _recursive_repr(fn)
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def _frozen_get_del_attr(cls, fields, globals):
|
| 599 |
+
locals = {'cls': cls,
|
| 600 |
+
'FrozenInstanceError': FrozenInstanceError}
|
| 601 |
+
if fields:
|
| 602 |
+
fields_str = '(' + ','.join(repr(f.name) for f in fields) + ',)'
|
| 603 |
+
else:
|
| 604 |
+
# Special case for the zero-length tuple.
|
| 605 |
+
fields_str = '()'
|
| 606 |
+
return (_create_fn('__setattr__',
|
| 607 |
+
('self', 'name', 'value'),
|
| 608 |
+
(f'if type(self) is cls or name in {fields_str}:',
|
| 609 |
+
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
|
| 610 |
+
f'super(cls, self).__setattr__(name, value)'),
|
| 611 |
+
locals=locals,
|
| 612 |
+
globals=globals),
|
| 613 |
+
_create_fn('__delattr__',
|
| 614 |
+
('self', 'name'),
|
| 615 |
+
(f'if type(self) is cls or name in {fields_str}:',
|
| 616 |
+
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
|
| 617 |
+
f'super(cls, self).__delattr__(name)'),
|
| 618 |
+
locals=locals,
|
| 619 |
+
globals=globals),
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
def _cmp_fn(name, op, self_tuple, other_tuple, globals):
|
| 624 |
+
# Create a comparison function. If the fields in the object are
|
| 625 |
+
# named 'x' and 'y', then self_tuple is the string
|
| 626 |
+
# '(self.x,self.y)' and other_tuple is the string
|
| 627 |
+
# '(other.x,other.y)'.
|
| 628 |
+
|
| 629 |
+
return _create_fn(name,
|
| 630 |
+
('self', 'other'),
|
| 631 |
+
[ 'if other.__class__ is self.__class__:',
|
| 632 |
+
f' return {self_tuple}{op}{other_tuple}',
|
| 633 |
+
'return NotImplemented'],
|
| 634 |
+
globals=globals)
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
def _hash_fn(fields, globals):
|
| 638 |
+
self_tuple = _tuple_str('self', fields)
|
| 639 |
+
return _create_fn('__hash__',
|
| 640 |
+
('self',),
|
| 641 |
+
[f'return hash({self_tuple})'],
|
| 642 |
+
globals=globals)
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def _is_classvar(a_type, typing):
|
| 646 |
+
# This test uses a typing internal class, but it's the best way to
|
| 647 |
+
# test if this is a ClassVar.
|
| 648 |
+
return (a_type is typing.ClassVar
|
| 649 |
+
or (type(a_type) is typing._GenericAlias
|
| 650 |
+
and a_type.__origin__ is typing.ClassVar))
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
def _is_initvar(a_type, dataclasses):
|
| 654 |
+
# The module we're checking against is the module we're
|
| 655 |
+
# currently in (dataclasses.py).
|
| 656 |
+
return (a_type is dataclasses.InitVar
|
| 657 |
+
or type(a_type) is dataclasses.InitVar)
|
| 658 |
+
|
| 659 |
+
def _is_kw_only(a_type, dataclasses):
|
| 660 |
+
return a_type is dataclasses.KW_ONLY
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
|
| 664 |
+
# Given a type annotation string, does it refer to a_type in
|
| 665 |
+
# a_module? For example, when checking that annotation denotes a
|
| 666 |
+
# ClassVar, then a_module is typing, and a_type is
|
| 667 |
+
# typing.ClassVar.
|
| 668 |
+
|
| 669 |
+
# It's possible to look up a_module given a_type, but it involves
|
| 670 |
+
# looking in sys.modules (again!), and seems like a waste since
|
| 671 |
+
# the caller already knows a_module.
|
| 672 |
+
|
| 673 |
+
# - annotation is a string type annotation
|
| 674 |
+
# - cls is the class that this annotation was found in
|
| 675 |
+
# - a_module is the module we want to match
|
| 676 |
+
# - a_type is the type in that module we want to match
|
| 677 |
+
# - is_type_predicate is a function called with (obj, a_module)
|
| 678 |
+
# that determines if obj is of the desired type.
|
| 679 |
+
|
| 680 |
+
# Since this test does not do a local namespace lookup (and
|
| 681 |
+
# instead only a module (global) lookup), there are some things it
|
| 682 |
+
# gets wrong.
|
| 683 |
+
|
| 684 |
+
# With string annotations, cv0 will be detected as a ClassVar:
|
| 685 |
+
# CV = ClassVar
|
| 686 |
+
# @dataclass
|
| 687 |
+
# class C0:
|
| 688 |
+
# cv0: CV
|
| 689 |
+
|
| 690 |
+
# But in this example cv1 will not be detected as a ClassVar:
|
| 691 |
+
# @dataclass
|
| 692 |
+
# class C1:
|
| 693 |
+
# CV = ClassVar
|
| 694 |
+
# cv1: CV
|
| 695 |
+
|
| 696 |
+
# In C1, the code in this function (_is_type) will look up "CV" in
|
| 697 |
+
# the module and not find it, so it will not consider cv1 as a
|
| 698 |
+
# ClassVar. This is a fairly obscure corner case, and the best
|
| 699 |
+
# way to fix it would be to eval() the string "CV" with the
|
| 700 |
+
# correct global and local namespaces. However that would involve
|
| 701 |
+
# a eval() penalty for every single field of every dataclass
|
| 702 |
+
# that's defined. It was judged not worth it.
|
| 703 |
+
|
| 704 |
+
match = _MODULE_IDENTIFIER_RE.match(annotation)
|
| 705 |
+
if match:
|
| 706 |
+
ns = None
|
| 707 |
+
module_name = match.group(1)
|
| 708 |
+
if not module_name:
|
| 709 |
+
# No module name, assume the class's module did
|
| 710 |
+
# "from dataclasses import InitVar".
|
| 711 |
+
ns = sys.modules.get(cls.__module__).__dict__
|
| 712 |
+
else:
|
| 713 |
+
# Look up module_name in the class's module.
|
| 714 |
+
module = sys.modules.get(cls.__module__)
|
| 715 |
+
if module and module.__dict__.get(module_name) is a_module:
|
| 716 |
+
ns = sys.modules.get(a_type.__module__).__dict__
|
| 717 |
+
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
|
| 718 |
+
return True
|
| 719 |
+
return False
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
def _get_field(cls, a_name, a_type, default_kw_only):
|
| 723 |
+
# Return a Field object for this field name and type. ClassVars and
|
| 724 |
+
# InitVars are also returned, but marked as such (see f._field_type).
|
| 725 |
+
# default_kw_only is the value of kw_only to use if there isn't a field()
|
| 726 |
+
# that defines it.
|
| 727 |
+
|
| 728 |
+
# If the default value isn't derived from Field, then it's only a
|
| 729 |
+
# normal default value. Convert it to a Field().
|
| 730 |
+
default = getattr(cls, a_name, MISSING)
|
| 731 |
+
if isinstance(default, Field):
|
| 732 |
+
f = default
|
| 733 |
+
else:
|
| 734 |
+
if isinstance(default, types.MemberDescriptorType):
|
| 735 |
+
# This is a field in __slots__, so it has no default value.
|
| 736 |
+
default = MISSING
|
| 737 |
+
f = field(default=default)
|
| 738 |
+
|
| 739 |
+
# Only at this point do we know the name and the type. Set them.
|
| 740 |
+
f.name = a_name
|
| 741 |
+
f.type = a_type
|
| 742 |
+
|
| 743 |
+
# Assume it's a normal field until proven otherwise. We're next
|
| 744 |
+
# going to decide if it's a ClassVar or InitVar, everything else
|
| 745 |
+
# is just a normal field.
|
| 746 |
+
f._field_type = _FIELD
|
| 747 |
+
|
| 748 |
+
# In addition to checking for actual types here, also check for
|
| 749 |
+
# string annotations. get_type_hints() won't always work for us
|
| 750 |
+
# (see https://github.com/python/typing/issues/508 for example),
|
| 751 |
+
# plus it's expensive and would require an eval for every string
|
| 752 |
+
# annotation. So, make a best effort to see if this is a ClassVar
|
| 753 |
+
# or InitVar using regex's and checking that the thing referenced
|
| 754 |
+
# is actually of the correct type.
|
| 755 |
+
|
| 756 |
+
# For the complete discussion, see https://bugs.python.org/issue33453
|
| 757 |
+
|
| 758 |
+
# If typing has not been imported, then it's impossible for any
|
| 759 |
+
# annotation to be a ClassVar. So, only look for ClassVar if
|
| 760 |
+
# typing has been imported by any module (not necessarily cls's
|
| 761 |
+
# module).
|
| 762 |
+
typing = sys.modules.get('typing')
|
| 763 |
+
if typing:
|
| 764 |
+
if (_is_classvar(a_type, typing)
|
| 765 |
+
or (isinstance(f.type, str)
|
| 766 |
+
and _is_type(f.type, cls, typing, typing.ClassVar,
|
| 767 |
+
_is_classvar))):
|
| 768 |
+
f._field_type = _FIELD_CLASSVAR
|
| 769 |
+
|
| 770 |
+
# If the type is InitVar, or if it's a matching string annotation,
|
| 771 |
+
# then it's an InitVar.
|
| 772 |
+
if f._field_type is _FIELD:
|
| 773 |
+
# The module we're checking against is the module we're
|
| 774 |
+
# currently in (dataclasses.py).
|
| 775 |
+
dataclasses = sys.modules[__name__]
|
| 776 |
+
if (_is_initvar(a_type, dataclasses)
|
| 777 |
+
or (isinstance(f.type, str)
|
| 778 |
+
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar,
|
| 779 |
+
_is_initvar))):
|
| 780 |
+
f._field_type = _FIELD_INITVAR
|
| 781 |
+
|
| 782 |
+
# Validations for individual fields. This is delayed until now,
|
| 783 |
+
# instead of in the Field() constructor, since only here do we
|
| 784 |
+
# know the field name, which allows for better error reporting.
|
| 785 |
+
|
| 786 |
+
# Special restrictions for ClassVar and InitVar.
|
| 787 |
+
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
|
| 788 |
+
if f.default_factory is not MISSING:
|
| 789 |
+
raise TypeError(f'field {f.name} cannot have a '
|
| 790 |
+
'default factory')
|
| 791 |
+
# Should I check for other field settings? default_factory
|
| 792 |
+
# seems the most serious to check for. Maybe add others. For
|
| 793 |
+
# example, how about init=False (or really,
|
| 794 |
+
# init=<not-the-default-init-value>)? It makes no sense for
|
| 795 |
+
# ClassVar and InitVar to specify init=<anything>.
|
| 796 |
+
|
| 797 |
+
# kw_only validation and assignment.
|
| 798 |
+
if f._field_type in (_FIELD, _FIELD_INITVAR):
|
| 799 |
+
# For real and InitVar fields, if kw_only wasn't specified use the
|
| 800 |
+
# default value.
|
| 801 |
+
if f.kw_only is MISSING:
|
| 802 |
+
f.kw_only = default_kw_only
|
| 803 |
+
else:
|
| 804 |
+
# Make sure kw_only isn't set for ClassVars
|
| 805 |
+
assert f._field_type is _FIELD_CLASSVAR
|
| 806 |
+
if f.kw_only is not MISSING:
|
| 807 |
+
raise TypeError(f'field {f.name} is a ClassVar but specifies '
|
| 808 |
+
'kw_only')
|
| 809 |
+
|
| 810 |
+
# For real fields, disallow mutable defaults for known types.
|
| 811 |
+
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
|
| 812 |
+
raise ValueError(f'mutable default {type(f.default)} for field '
|
| 813 |
+
f'{f.name} is not allowed: use default_factory')
|
| 814 |
+
|
| 815 |
+
return f
|
| 816 |
+
|
| 817 |
+
def _set_qualname(cls, value):
|
| 818 |
+
# Ensure that the functions returned from _create_fn uses the proper
|
| 819 |
+
# __qualname__ (the class they belong to).
|
| 820 |
+
if isinstance(value, FunctionType):
|
| 821 |
+
value.__qualname__ = f"{cls.__qualname__}.{value.__name__}"
|
| 822 |
+
return value
|
| 823 |
+
|
| 824 |
+
def _set_new_attribute(cls, name, value):
|
| 825 |
+
# Never overwrites an existing attribute. Returns True if the
|
| 826 |
+
# attribute already exists.
|
| 827 |
+
if name in cls.__dict__:
|
| 828 |
+
return True
|
| 829 |
+
_set_qualname(cls, value)
|
| 830 |
+
setattr(cls, name, value)
|
| 831 |
+
return False
|
| 832 |
+
|
| 833 |
+
|
| 834 |
+
# Decide if/how we're going to create a hash function. Key is
|
| 835 |
+
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
|
| 836 |
+
# take. The common case is to do nothing, so instead of providing a
|
| 837 |
+
# function that is a no-op, use None to signify that.
|
| 838 |
+
|
| 839 |
+
def _hash_set_none(cls, fields, globals):
|
| 840 |
+
return None
|
| 841 |
+
|
| 842 |
+
def _hash_add(cls, fields, globals):
|
| 843 |
+
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
|
| 844 |
+
return _set_qualname(cls, _hash_fn(flds, globals))
|
| 845 |
+
|
| 846 |
+
def _hash_exception(cls, fields, globals):
|
| 847 |
+
# Raise an exception.
|
| 848 |
+
raise TypeError(f'Cannot overwrite attribute __hash__ '
|
| 849 |
+
f'in class {cls.__name__}')
|
| 850 |
+
|
| 851 |
+
#
|
| 852 |
+
# +-------------------------------------- unsafe_hash?
|
| 853 |
+
# | +------------------------------- eq?
|
| 854 |
+
# | | +------------------------ frozen?
|
| 855 |
+
# | | | +---------------- has-explicit-hash?
|
| 856 |
+
# | | | |
|
| 857 |
+
# | | | | +------- action
|
| 858 |
+
# | | | | |
|
| 859 |
+
# v v v v v
|
| 860 |
+
_hash_action = {(False, False, False, False): None,
|
| 861 |
+
(False, False, False, True ): None,
|
| 862 |
+
(False, False, True, False): None,
|
| 863 |
+
(False, False, True, True ): None,
|
| 864 |
+
(False, True, False, False): _hash_set_none,
|
| 865 |
+
(False, True, False, True ): None,
|
| 866 |
+
(False, True, True, False): _hash_add,
|
| 867 |
+
(False, True, True, True ): None,
|
| 868 |
+
(True, False, False, False): _hash_add,
|
| 869 |
+
(True, False, False, True ): _hash_exception,
|
| 870 |
+
(True, False, True, False): _hash_add,
|
| 871 |
+
(True, False, True, True ): _hash_exception,
|
| 872 |
+
(True, True, False, False): _hash_add,
|
| 873 |
+
(True, True, False, True ): _hash_exception,
|
| 874 |
+
(True, True, True, False): _hash_add,
|
| 875 |
+
(True, True, True, True ): _hash_exception,
|
| 876 |
+
}
|
| 877 |
+
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
|
| 878 |
+
# version of this table.
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen,
|
| 882 |
+
match_args, kw_only, slots):
|
| 883 |
+
# Now that dicts retain insertion order, there's no reason to use
|
| 884 |
+
# an ordered dict. I am leveraging that ordering here, because
|
| 885 |
+
# derived class fields overwrite base class fields, but the order
|
| 886 |
+
# is defined by the base class, which is found first.
|
| 887 |
+
fields = {}
|
| 888 |
+
|
| 889 |
+
if cls.__module__ in sys.modules:
|
| 890 |
+
globals = sys.modules[cls.__module__].__dict__
|
| 891 |
+
else:
|
| 892 |
+
# Theoretically this can happen if someone writes
|
| 893 |
+
# a custom string to cls.__module__. In which case
|
| 894 |
+
# such dataclass won't be fully introspectable
|
| 895 |
+
# (w.r.t. typing.get_type_hints) but will still function
|
| 896 |
+
# correctly.
|
| 897 |
+
globals = {}
|
| 898 |
+
|
| 899 |
+
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order,
|
| 900 |
+
unsafe_hash, frozen))
|
| 901 |
+
|
| 902 |
+
# Find our base classes in reverse MRO order, and exclude
|
| 903 |
+
# ourselves. In reversed order so that more derived classes
|
| 904 |
+
# override earlier field definitions in base classes. As long as
|
| 905 |
+
# we're iterating over them, see if any are frozen.
|
| 906 |
+
any_frozen_base = False
|
| 907 |
+
has_dataclass_bases = False
|
| 908 |
+
for b in cls.__mro__[-1:0:-1]:
|
| 909 |
+
# Only process classes that have been processed by our
|
| 910 |
+
# decorator. That is, they have a _FIELDS attribute.
|
| 911 |
+
base_fields = getattr(b, _FIELDS, None)
|
| 912 |
+
if base_fields is not None:
|
| 913 |
+
has_dataclass_bases = True
|
| 914 |
+
for f in base_fields.values():
|
| 915 |
+
fields[f.name] = f
|
| 916 |
+
if getattr(b, _PARAMS).frozen:
|
| 917 |
+
any_frozen_base = True
|
| 918 |
+
|
| 919 |
+
# Annotations that are defined in this class (not in base
|
| 920 |
+
# classes). If __annotations__ isn't present, then this class
|
| 921 |
+
# adds no new annotations. We use this to compute fields that are
|
| 922 |
+
# added by this class.
|
| 923 |
+
#
|
| 924 |
+
# Fields are found from cls_annotations, which is guaranteed to be
|
| 925 |
+
# ordered. Default values are from class attributes, if a field
|
| 926 |
+
# has a default. If the default value is a Field(), then it
|
| 927 |
+
# contains additional info beyond (and possibly including) the
|
| 928 |
+
# actual default value. Pseudo-fields ClassVars and InitVars are
|
| 929 |
+
# included, despite the fact that they're not real fields. That's
|
| 930 |
+
# dealt with later.
|
| 931 |
+
cls_annotations = cls.__dict__.get('__annotations__', {})
|
| 932 |
+
|
| 933 |
+
# Now find fields in our class. While doing so, validate some
|
| 934 |
+
# things, and set the default values (as class attributes) where
|
| 935 |
+
# we can.
|
| 936 |
+
cls_fields = []
|
| 937 |
+
# Get a reference to this module for the _is_kw_only() test.
|
| 938 |
+
KW_ONLY_seen = False
|
| 939 |
+
dataclasses = sys.modules[__name__]
|
| 940 |
+
for name, type in cls_annotations.items():
|
| 941 |
+
# See if this is a marker to change the value of kw_only.
|
| 942 |
+
if (_is_kw_only(type, dataclasses)
|
| 943 |
+
or (isinstance(type, str)
|
| 944 |
+
and _is_type(type, cls, dataclasses, dataclasses.KW_ONLY,
|
| 945 |
+
_is_kw_only))):
|
| 946 |
+
# Switch the default to kw_only=True, and ignore this
|
| 947 |
+
# annotation: it's not a real field.
|
| 948 |
+
if KW_ONLY_seen:
|
| 949 |
+
raise TypeError(f'{name!r} is KW_ONLY, but KW_ONLY '
|
| 950 |
+
'has already been specified')
|
| 951 |
+
KW_ONLY_seen = True
|
| 952 |
+
kw_only = True
|
| 953 |
+
else:
|
| 954 |
+
# Otherwise it's a field of some type.
|
| 955 |
+
cls_fields.append(_get_field(cls, name, type, kw_only))
|
| 956 |
+
|
| 957 |
+
for f in cls_fields:
|
| 958 |
+
fields[f.name] = f
|
| 959 |
+
|
| 960 |
+
# If the class attribute (which is the default value for this
|
| 961 |
+
# field) exists and is of type 'Field', replace it with the
|
| 962 |
+
# real default. This is so that normal class introspection
|
| 963 |
+
# sees a real default value, not a Field.
|
| 964 |
+
if isinstance(getattr(cls, f.name, None), Field):
|
| 965 |
+
if f.default is MISSING:
|
| 966 |
+
# If there's no default, delete the class attribute.
|
| 967 |
+
# This happens if we specify field(repr=False), for
|
| 968 |
+
# example (that is, we specified a field object, but
|
| 969 |
+
# no default value). Also if we're using a default
|
| 970 |
+
# factory. The class attribute should not be set at
|
| 971 |
+
# all in the post-processed class.
|
| 972 |
+
delattr(cls, f.name)
|
| 973 |
+
else:
|
| 974 |
+
setattr(cls, f.name, f.default)
|
| 975 |
+
|
| 976 |
+
# Do we have any Field members that don't also have annotations?
|
| 977 |
+
for name, value in cls.__dict__.items():
|
| 978 |
+
if isinstance(value, Field) and not name in cls_annotations:
|
| 979 |
+
raise TypeError(f'{name!r} is a field but has no type annotation')
|
| 980 |
+
|
| 981 |
+
# Check rules that apply if we are derived from any dataclasses.
|
| 982 |
+
if has_dataclass_bases:
|
| 983 |
+
# Raise an exception if any of our bases are frozen, but we're not.
|
| 984 |
+
if any_frozen_base and not frozen:
|
| 985 |
+
raise TypeError('cannot inherit non-frozen dataclass from a '
|
| 986 |
+
'frozen one')
|
| 987 |
+
|
| 988 |
+
# Raise an exception if we're frozen, but none of our bases are.
|
| 989 |
+
if not any_frozen_base and frozen:
|
| 990 |
+
raise TypeError('cannot inherit frozen dataclass from a '
|
| 991 |
+
'non-frozen one')
|
| 992 |
+
|
| 993 |
+
# Remember all of the fields on our class (including bases). This
|
| 994 |
+
# also marks this class as being a dataclass.
|
| 995 |
+
setattr(cls, _FIELDS, fields)
|
| 996 |
+
|
| 997 |
+
# Was this class defined with an explicit __hash__? Note that if
|
| 998 |
+
# __eq__ is defined in this class, then python will automatically
|
| 999 |
+
# set __hash__ to None. This is a heuristic, as it's possible
|
| 1000 |
+
# that such a __hash__ == None was not auto-generated, but it
|
| 1001 |
+
# close enough.
|
| 1002 |
+
class_hash = cls.__dict__.get('__hash__', MISSING)
|
| 1003 |
+
has_explicit_hash = not (class_hash is MISSING or
|
| 1004 |
+
(class_hash is None and '__eq__' in cls.__dict__))
|
| 1005 |
+
|
| 1006 |
+
# If we're generating ordering methods, we must be generating the
|
| 1007 |
+
# eq methods.
|
| 1008 |
+
if order and not eq:
|
| 1009 |
+
raise ValueError('eq must be true if order is true')
|
| 1010 |
+
|
| 1011 |
+
# Include InitVars and regular fields (so, not ClassVars). This is
|
| 1012 |
+
# initialized here, outside of the "if init:" test, because std_init_fields
|
| 1013 |
+
# is used with match_args, below.
|
| 1014 |
+
all_init_fields = [f for f in fields.values()
|
| 1015 |
+
if f._field_type in (_FIELD, _FIELD_INITVAR)]
|
| 1016 |
+
(std_init_fields,
|
| 1017 |
+
kw_only_init_fields) = _fields_in_init_order(all_init_fields)
|
| 1018 |
+
|
| 1019 |
+
if init:
|
| 1020 |
+
# Does this class have a post-init function?
|
| 1021 |
+
has_post_init = hasattr(cls, _POST_INIT_NAME)
|
| 1022 |
+
|
| 1023 |
+
_set_new_attribute(cls, '__init__',
|
| 1024 |
+
_init_fn(all_init_fields,
|
| 1025 |
+
std_init_fields,
|
| 1026 |
+
kw_only_init_fields,
|
| 1027 |
+
frozen,
|
| 1028 |
+
has_post_init,
|
| 1029 |
+
# The name to use for the "self"
|
| 1030 |
+
# param in __init__. Use "self"
|
| 1031 |
+
# if possible.
|
| 1032 |
+
'__dataclass_self__' if 'self' in fields
|
| 1033 |
+
else 'self',
|
| 1034 |
+
globals,
|
| 1035 |
+
slots,
|
| 1036 |
+
))
|
| 1037 |
+
|
| 1038 |
+
# Get the fields as a list, and include only real fields. This is
|
| 1039 |
+
# used in all of the following methods.
|
| 1040 |
+
field_list = [f for f in fields.values() if f._field_type is _FIELD]
|
| 1041 |
+
|
| 1042 |
+
if repr:
|
| 1043 |
+
flds = [f for f in field_list if f.repr]
|
| 1044 |
+
_set_new_attribute(cls, '__repr__', _repr_fn(flds, globals))
|
| 1045 |
+
|
| 1046 |
+
if eq:
|
| 1047 |
+
# Create __eq__ method. There's no need for a __ne__ method,
|
| 1048 |
+
# since python will call __eq__ and negate it.
|
| 1049 |
+
flds = [f for f in field_list if f.compare]
|
| 1050 |
+
self_tuple = _tuple_str('self', flds)
|
| 1051 |
+
other_tuple = _tuple_str('other', flds)
|
| 1052 |
+
_set_new_attribute(cls, '__eq__',
|
| 1053 |
+
_cmp_fn('__eq__', '==',
|
| 1054 |
+
self_tuple, other_tuple,
|
| 1055 |
+
globals=globals))
|
| 1056 |
+
|
| 1057 |
+
if order:
|
| 1058 |
+
# Create and set the ordering methods.
|
| 1059 |
+
flds = [f for f in field_list if f.compare]
|
| 1060 |
+
self_tuple = _tuple_str('self', flds)
|
| 1061 |
+
other_tuple = _tuple_str('other', flds)
|
| 1062 |
+
for name, op in [('__lt__', '<'),
|
| 1063 |
+
('__le__', '<='),
|
| 1064 |
+
('__gt__', '>'),
|
| 1065 |
+
('__ge__', '>='),
|
| 1066 |
+
]:
|
| 1067 |
+
if _set_new_attribute(cls, name,
|
| 1068 |
+
_cmp_fn(name, op, self_tuple, other_tuple,
|
| 1069 |
+
globals=globals)):
|
| 1070 |
+
raise TypeError(f'Cannot overwrite attribute {name} '
|
| 1071 |
+
f'in class {cls.__name__}. Consider using '
|
| 1072 |
+
'functools.total_ordering')
|
| 1073 |
+
|
| 1074 |
+
if frozen:
|
| 1075 |
+
for fn in _frozen_get_del_attr(cls, field_list, globals):
|
| 1076 |
+
if _set_new_attribute(cls, fn.__name__, fn):
|
| 1077 |
+
raise TypeError(f'Cannot overwrite attribute {fn.__name__} '
|
| 1078 |
+
f'in class {cls.__name__}')
|
| 1079 |
+
|
| 1080 |
+
# Decide if/how we're going to create a hash function.
|
| 1081 |
+
hash_action = _hash_action[bool(unsafe_hash),
|
| 1082 |
+
bool(eq),
|
| 1083 |
+
bool(frozen),
|
| 1084 |
+
has_explicit_hash]
|
| 1085 |
+
if hash_action:
|
| 1086 |
+
# No need to call _set_new_attribute here, since by the time
|
| 1087 |
+
# we're here the overwriting is unconditional.
|
| 1088 |
+
cls.__hash__ = hash_action(cls, field_list, globals)
|
| 1089 |
+
|
| 1090 |
+
if not getattr(cls, '__doc__'):
|
| 1091 |
+
# Create a class doc-string.
|
| 1092 |
+
cls.__doc__ = (cls.__name__ +
|
| 1093 |
+
str(inspect.signature(cls)).replace(' -> None', ''))
|
| 1094 |
+
|
| 1095 |
+
if match_args:
|
| 1096 |
+
# I could probably compute this once
|
| 1097 |
+
_set_new_attribute(cls, '__match_args__',
|
| 1098 |
+
tuple(f.name for f in std_init_fields))
|
| 1099 |
+
|
| 1100 |
+
if slots:
|
| 1101 |
+
cls = _add_slots(cls, frozen)
|
| 1102 |
+
|
| 1103 |
+
abc.update_abstractmethods(cls)
|
| 1104 |
+
|
| 1105 |
+
return cls
|
| 1106 |
+
|
| 1107 |
+
|
| 1108 |
+
# _dataclass_getstate and _dataclass_setstate are needed for pickling frozen
|
| 1109 |
+
# classes with slots. These could be slighly more performant if we generated
|
| 1110 |
+
# the code instead of iterating over fields. But that can be a project for
|
| 1111 |
+
# another day, if performance becomes an issue.
|
| 1112 |
+
def _dataclass_getstate(self):
|
| 1113 |
+
return [getattr(self, f.name) for f in fields(self)]
|
| 1114 |
+
|
| 1115 |
+
|
| 1116 |
+
def _dataclass_setstate(self, state):
|
| 1117 |
+
for field, value in zip(fields(self), state):
|
| 1118 |
+
# use setattr because dataclass may be frozen
|
| 1119 |
+
object.__setattr__(self, field.name, value)
|
| 1120 |
+
|
| 1121 |
+
|
| 1122 |
+
def _add_slots(cls, is_frozen):
|
| 1123 |
+
# Need to create a new class, since we can't set __slots__
|
| 1124 |
+
# after a class has been created.
|
| 1125 |
+
|
| 1126 |
+
# Make sure __slots__ isn't already set.
|
| 1127 |
+
if '__slots__' in cls.__dict__:
|
| 1128 |
+
raise TypeError(f'{cls.__name__} already specifies __slots__')
|
| 1129 |
+
|
| 1130 |
+
# Create a new dict for our new class.
|
| 1131 |
+
cls_dict = dict(cls.__dict__)
|
| 1132 |
+
field_names = tuple(f.name for f in fields(cls))
|
| 1133 |
+
cls_dict['__slots__'] = field_names
|
| 1134 |
+
for field_name in field_names:
|
| 1135 |
+
# Remove our attributes, if present. They'll still be
|
| 1136 |
+
# available in _MARKER.
|
| 1137 |
+
cls_dict.pop(field_name, None)
|
| 1138 |
+
|
| 1139 |
+
# Remove __dict__ itself.
|
| 1140 |
+
cls_dict.pop('__dict__', None)
|
| 1141 |
+
|
| 1142 |
+
# And finally create the class.
|
| 1143 |
+
qualname = getattr(cls, '__qualname__', None)
|
| 1144 |
+
cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
|
| 1145 |
+
if qualname is not None:
|
| 1146 |
+
cls.__qualname__ = qualname
|
| 1147 |
+
|
| 1148 |
+
if is_frozen:
|
| 1149 |
+
# Need this for pickling frozen classes with slots.
|
| 1150 |
+
cls.__getstate__ = _dataclass_getstate
|
| 1151 |
+
cls.__setstate__ = _dataclass_setstate
|
| 1152 |
+
|
| 1153 |
+
return cls
|
| 1154 |
+
|
| 1155 |
+
|
| 1156 |
+
def dataclass(cls=None, /, *, init=True, repr=True, eq=True, order=False,
|
| 1157 |
+
unsafe_hash=False, frozen=False, match_args=True,
|
| 1158 |
+
kw_only=False, slots=False):
|
| 1159 |
+
"""Returns the same class as was passed in, with dunder methods
|
| 1160 |
+
added based on the fields defined in the class.
|
| 1161 |
+
|
| 1162 |
+
Examines PEP 526 __annotations__ to determine fields.
|
| 1163 |
+
|
| 1164 |
+
If init is true, an __init__() method is added to the class. If
|
| 1165 |
+
repr is true, a __repr__() method is added. If order is true, rich
|
| 1166 |
+
comparison dunder methods are added. If unsafe_hash is true, a
|
| 1167 |
+
__hash__() method function is added. If frozen is true, fields may
|
| 1168 |
+
not be assigned to after instance creation. If match_args is true,
|
| 1169 |
+
the __match_args__ tuple is added. If kw_only is true, then by
|
| 1170 |
+
default all fields are keyword-only. If slots is true, an
|
| 1171 |
+
__slots__ attribute is added.
|
| 1172 |
+
"""
|
| 1173 |
+
|
| 1174 |
+
def wrap(cls):
|
| 1175 |
+
return _process_class(cls, init, repr, eq, order, unsafe_hash,
|
| 1176 |
+
frozen, match_args, kw_only, slots)
|
| 1177 |
+
|
| 1178 |
+
# See if we're being called as @dataclass or @dataclass().
|
| 1179 |
+
if cls is None:
|
| 1180 |
+
# We're called with parens.
|
| 1181 |
+
return wrap
|
| 1182 |
+
|
| 1183 |
+
# We're called as @dataclass without parens.
|
| 1184 |
+
return wrap(cls)
|
| 1185 |
+
|
| 1186 |
+
|
| 1187 |
+
def fields(class_or_instance):
|
| 1188 |
+
"""Return a tuple describing the fields of this dataclass.
|
| 1189 |
+
|
| 1190 |
+
Accepts a dataclass or an instance of one. Tuple elements are of
|
| 1191 |
+
type Field.
|
| 1192 |
+
"""
|
| 1193 |
+
|
| 1194 |
+
# Might it be worth caching this, per class?
|
| 1195 |
+
try:
|
| 1196 |
+
fields = getattr(class_or_instance, _FIELDS)
|
| 1197 |
+
except AttributeError:
|
| 1198 |
+
raise TypeError('must be called with a dataclass type or instance') from None
|
| 1199 |
+
|
| 1200 |
+
# Exclude pseudo-fields. Note that fields is sorted by insertion
|
| 1201 |
+
# order, so the order of the tuple is as the fields were defined.
|
| 1202 |
+
return tuple(f for f in fields.values() if f._field_type is _FIELD)
|
| 1203 |
+
|
| 1204 |
+
|
| 1205 |
+
def _is_dataclass_instance(obj):
|
| 1206 |
+
"""Returns True if obj is an instance of a dataclass."""
|
| 1207 |
+
return hasattr(type(obj), _FIELDS)
|
| 1208 |
+
|
| 1209 |
+
|
| 1210 |
+
def is_dataclass(obj):
|
| 1211 |
+
"""Returns True if obj is a dataclass or an instance of a
|
| 1212 |
+
dataclass."""
|
| 1213 |
+
cls = obj if isinstance(obj, type) and not isinstance(obj, GenericAlias) else type(obj)
|
| 1214 |
+
return hasattr(cls, _FIELDS)
|
| 1215 |
+
|
| 1216 |
+
|
| 1217 |
+
def asdict(obj, *, dict_factory=dict):
|
| 1218 |
+
"""Return the fields of a dataclass instance as a new dictionary mapping
|
| 1219 |
+
field names to field values.
|
| 1220 |
+
|
| 1221 |
+
Example usage:
|
| 1222 |
+
|
| 1223 |
+
@dataclass
|
| 1224 |
+
class C:
|
| 1225 |
+
x: int
|
| 1226 |
+
y: int
|
| 1227 |
+
|
| 1228 |
+
c = C(1, 2)
|
| 1229 |
+
assert asdict(c) == {'x': 1, 'y': 2}
|
| 1230 |
+
|
| 1231 |
+
If given, 'dict_factory' will be used instead of built-in dict.
|
| 1232 |
+
The function applies recursively to field values that are
|
| 1233 |
+
dataclass instances. This will also look into built-in containers:
|
| 1234 |
+
tuples, lists, and dicts.
|
| 1235 |
+
"""
|
| 1236 |
+
if not _is_dataclass_instance(obj):
|
| 1237 |
+
raise TypeError("asdict() should be called on dataclass instances")
|
| 1238 |
+
return _asdict_inner(obj, dict_factory)
|
| 1239 |
+
|
| 1240 |
+
|
| 1241 |
+
def _asdict_inner(obj, dict_factory):
|
| 1242 |
+
if _is_dataclass_instance(obj):
|
| 1243 |
+
result = []
|
| 1244 |
+
for f in fields(obj):
|
| 1245 |
+
value = _asdict_inner(getattr(obj, f.name), dict_factory)
|
| 1246 |
+
result.append((f.name, value))
|
| 1247 |
+
return dict_factory(result)
|
| 1248 |
+
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
|
| 1249 |
+
# obj is a namedtuple. Recurse into it, but the returned
|
| 1250 |
+
# object is another namedtuple of the same type. This is
|
| 1251 |
+
# similar to how other list- or tuple-derived classes are
|
| 1252 |
+
# treated (see below), but we just need to create them
|
| 1253 |
+
# differently because a namedtuple's __init__ needs to be
|
| 1254 |
+
# called differently (see bpo-34363).
|
| 1255 |
+
|
| 1256 |
+
# I'm not using namedtuple's _asdict()
|
| 1257 |
+
# method, because:
|
| 1258 |
+
# - it does not recurse in to the namedtuple fields and
|
| 1259 |
+
# convert them to dicts (using dict_factory).
|
| 1260 |
+
# - I don't actually want to return a dict here. The main
|
| 1261 |
+
# use case here is json.dumps, and it handles converting
|
| 1262 |
+
# namedtuples to lists. Admittedly we're losing some
|
| 1263 |
+
# information here when we produce a json list instead of a
|
| 1264 |
+
# dict. Note that if we returned dicts here instead of
|
| 1265 |
+
# namedtuples, we could no longer call asdict() on a data
|
| 1266 |
+
# structure where a namedtuple was used as a dict key.
|
| 1267 |
+
|
| 1268 |
+
return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj])
|
| 1269 |
+
elif isinstance(obj, (list, tuple)):
|
| 1270 |
+
# Assume we can create an object of this type by passing in a
|
| 1271 |
+
# generator (which is not true for namedtuples, handled
|
| 1272 |
+
# above).
|
| 1273 |
+
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
|
| 1274 |
+
elif isinstance(obj, dict):
|
| 1275 |
+
return type(obj)((_asdict_inner(k, dict_factory),
|
| 1276 |
+
_asdict_inner(v, dict_factory))
|
| 1277 |
+
for k, v in obj.items())
|
| 1278 |
+
else:
|
| 1279 |
+
return copy.deepcopy(obj)
|
| 1280 |
+
|
| 1281 |
+
|
| 1282 |
+
def astuple(obj, *, tuple_factory=tuple):
|
| 1283 |
+
"""Return the fields of a dataclass instance as a new tuple of field values.
|
| 1284 |
+
|
| 1285 |
+
Example usage::
|
| 1286 |
+
|
| 1287 |
+
@dataclass
|
| 1288 |
+
class C:
|
| 1289 |
+
x: int
|
| 1290 |
+
y: int
|
| 1291 |
+
|
| 1292 |
+
c = C(1, 2)
|
| 1293 |
+
assert astuple(c) == (1, 2)
|
| 1294 |
+
|
| 1295 |
+
If given, 'tuple_factory' will be used instead of built-in tuple.
|
| 1296 |
+
The function applies recursively to field values that are
|
| 1297 |
+
dataclass instances. This will also look into built-in containers:
|
| 1298 |
+
tuples, lists, and dicts.
|
| 1299 |
+
"""
|
| 1300 |
+
|
| 1301 |
+
if not _is_dataclass_instance(obj):
|
| 1302 |
+
raise TypeError("astuple() should be called on dataclass instances")
|
| 1303 |
+
return _astuple_inner(obj, tuple_factory)
|
| 1304 |
+
|
| 1305 |
+
|
| 1306 |
+
def _astuple_inner(obj, tuple_factory):
|
| 1307 |
+
if _is_dataclass_instance(obj):
|
| 1308 |
+
result = []
|
| 1309 |
+
for f in fields(obj):
|
| 1310 |
+
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
|
| 1311 |
+
result.append(value)
|
| 1312 |
+
return tuple_factory(result)
|
| 1313 |
+
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
|
| 1314 |
+
# obj is a namedtuple. Recurse into it, but the returned
|
| 1315 |
+
# object is another namedtuple of the same type. This is
|
| 1316 |
+
# similar to how other list- or tuple-derived classes are
|
| 1317 |
+
# treated (see below), but we just need to create them
|
| 1318 |
+
# differently because a namedtuple's __init__ needs to be
|
| 1319 |
+
# called differently (see bpo-34363).
|
| 1320 |
+
return type(obj)(*[_astuple_inner(v, tuple_factory) for v in obj])
|
| 1321 |
+
elif isinstance(obj, (list, tuple)):
|
| 1322 |
+
# Assume we can create an object of this type by passing in a
|
| 1323 |
+
# generator (which is not true for namedtuples, handled
|
| 1324 |
+
# above).
|
| 1325 |
+
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
|
| 1326 |
+
elif isinstance(obj, dict):
|
| 1327 |
+
return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
|
| 1328 |
+
for k, v in obj.items())
|
| 1329 |
+
else:
|
| 1330 |
+
return copy.deepcopy(obj)
|
| 1331 |
+
|
| 1332 |
+
|
| 1333 |
+
def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
|
| 1334 |
+
repr=True, eq=True, order=False, unsafe_hash=False,
|
| 1335 |
+
frozen=False, match_args=True, kw_only=False, slots=False):
|
| 1336 |
+
"""Return a new dynamically created dataclass.
|
| 1337 |
+
|
| 1338 |
+
The dataclass name will be 'cls_name'. 'fields' is an iterable
|
| 1339 |
+
of either (name), (name, type) or (name, type, Field) objects. If type is
|
| 1340 |
+
omitted, use the string 'typing.Any'. Field objects are created by
|
| 1341 |
+
the equivalent of calling 'field(name, type [, Field-info])'.
|
| 1342 |
+
|
| 1343 |
+
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
|
| 1344 |
+
|
| 1345 |
+
is equivalent to:
|
| 1346 |
+
|
| 1347 |
+
@dataclass
|
| 1348 |
+
class C(Base):
|
| 1349 |
+
x: 'typing.Any'
|
| 1350 |
+
y: int
|
| 1351 |
+
z: int = field(init=False)
|
| 1352 |
+
|
| 1353 |
+
For the bases and namespace parameters, see the builtin type() function.
|
| 1354 |
+
|
| 1355 |
+
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
|
| 1356 |
+
dataclass().
|
| 1357 |
+
"""
|
| 1358 |
+
|
| 1359 |
+
if namespace is None:
|
| 1360 |
+
namespace = {}
|
| 1361 |
+
|
| 1362 |
+
# While we're looking through the field names, validate that they
|
| 1363 |
+
# are identifiers, are not keywords, and not duplicates.
|
| 1364 |
+
seen = set()
|
| 1365 |
+
annotations = {}
|
| 1366 |
+
defaults = {}
|
| 1367 |
+
for item in fields:
|
| 1368 |
+
if isinstance(item, str):
|
| 1369 |
+
name = item
|
| 1370 |
+
tp = 'typing.Any'
|
| 1371 |
+
elif len(item) == 2:
|
| 1372 |
+
name, tp, = item
|
| 1373 |
+
elif len(item) == 3:
|
| 1374 |
+
name, tp, spec = item
|
| 1375 |
+
defaults[name] = spec
|
| 1376 |
+
else:
|
| 1377 |
+
raise TypeError(f'Invalid field: {item!r}')
|
| 1378 |
+
|
| 1379 |
+
if not isinstance(name, str) or not name.isidentifier():
|
| 1380 |
+
raise TypeError(f'Field names must be valid identifiers: {name!r}')
|
| 1381 |
+
if keyword.iskeyword(name):
|
| 1382 |
+
raise TypeError(f'Field names must not be keywords: {name!r}')
|
| 1383 |
+
if name in seen:
|
| 1384 |
+
raise TypeError(f'Field name duplicated: {name!r}')
|
| 1385 |
+
|
| 1386 |
+
seen.add(name)
|
| 1387 |
+
annotations[name] = tp
|
| 1388 |
+
|
| 1389 |
+
# Update 'ns' with the user-supplied namespace plus our calculated values.
|
| 1390 |
+
def exec_body_callback(ns):
|
| 1391 |
+
ns.update(namespace)
|
| 1392 |
+
ns.update(defaults)
|
| 1393 |
+
ns['__annotations__'] = annotations
|
| 1394 |
+
|
| 1395 |
+
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
|
| 1396 |
+
# of generic dataclasses.
|
| 1397 |
+
cls = types.new_class(cls_name, bases, {}, exec_body_callback)
|
| 1398 |
+
|
| 1399 |
+
# Apply the normal decorator.
|
| 1400 |
+
return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
|
| 1401 |
+
unsafe_hash=unsafe_hash, frozen=frozen,
|
| 1402 |
+
match_args=match_args, kw_only=kw_only, slots=slots)
|
| 1403 |
+
|
| 1404 |
+
|
| 1405 |
+
def replace(obj, /, **changes):
|
| 1406 |
+
"""Return a new object replacing specified fields with new values.
|
| 1407 |
+
|
| 1408 |
+
This is especially useful for frozen classes. Example usage:
|
| 1409 |
+
|
| 1410 |
+
@dataclass(frozen=True)
|
| 1411 |
+
class C:
|
| 1412 |
+
x: int
|
| 1413 |
+
y: int
|
| 1414 |
+
|
| 1415 |
+
c = C(1, 2)
|
| 1416 |
+
c1 = replace(c, x=3)
|
| 1417 |
+
assert c1.x == 3 and c1.y == 2
|
| 1418 |
+
"""
|
| 1419 |
+
|
| 1420 |
+
# We're going to mutate 'changes', but that's okay because it's a
|
| 1421 |
+
# new dict, even if called with 'replace(obj, **my_changes)'.
|
| 1422 |
+
|
| 1423 |
+
if not _is_dataclass_instance(obj):
|
| 1424 |
+
raise TypeError("replace() should be called on dataclass instances")
|
| 1425 |
+
|
| 1426 |
+
# It's an error to have init=False fields in 'changes'.
|
| 1427 |
+
# If a field is not in 'changes', read its value from the provided obj.
|
| 1428 |
+
|
| 1429 |
+
for f in getattr(obj, _FIELDS).values():
|
| 1430 |
+
# Only consider normal fields or InitVars.
|
| 1431 |
+
if f._field_type is _FIELD_CLASSVAR:
|
| 1432 |
+
continue
|
| 1433 |
+
|
| 1434 |
+
if not f.init:
|
| 1435 |
+
# Error if this field is specified in changes.
|
| 1436 |
+
if f.name in changes:
|
| 1437 |
+
raise ValueError(f'field {f.name} is declared with '
|
| 1438 |
+
'init=False, it cannot be specified with '
|
| 1439 |
+
'replace()')
|
| 1440 |
+
continue
|
| 1441 |
+
|
| 1442 |
+
if f.name not in changes:
|
| 1443 |
+
if f._field_type is _FIELD_INITVAR and f.default is MISSING:
|
| 1444 |
+
raise ValueError(f"InitVar {f.name!r} "
|
| 1445 |
+
'must be specified with replace()')
|
| 1446 |
+
changes[f.name] = getattr(obj, f.name)
|
| 1447 |
+
|
| 1448 |
+
# Create the new object, which calls __init__() and
|
| 1449 |
+
# __post_init__() (if defined), using all of the init fields we've
|
| 1450 |
+
# added and/or left in 'changes'. If there are values supplied in
|
| 1451 |
+
# changes that aren't fields, this will correctly raise a
|
| 1452 |
+
# TypeError.
|
| 1453 |
+
return obj.__class__(**changes)
|
parrot/lib/python3.10/datetime.py
ADDED
|
@@ -0,0 +1,2524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Concrete date/time and related types.
|
| 2 |
+
|
| 3 |
+
See http://www.iana.org/time-zones/repository/tz-link.html for
|
| 4 |
+
time zone and DST data sources.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
__all__ = ("date", "datetime", "time", "timedelta", "timezone", "tzinfo",
|
| 8 |
+
"MINYEAR", "MAXYEAR")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
import time as _time
|
| 12 |
+
import math as _math
|
| 13 |
+
import sys
|
| 14 |
+
from operator import index as _index
|
| 15 |
+
|
| 16 |
+
def _cmp(x, y):
|
| 17 |
+
return 0 if x == y else 1 if x > y else -1
|
| 18 |
+
|
| 19 |
+
MINYEAR = 1
|
| 20 |
+
MAXYEAR = 9999
|
| 21 |
+
_MAXORDINAL = 3652059 # date.max.toordinal()
|
| 22 |
+
|
| 23 |
+
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
|
| 24 |
+
# also assumes the current Gregorian calendar indefinitely extended in
|
| 25 |
+
# both directions. Difference: Dates.py calls January 1 of year 0 day
|
| 26 |
+
# number 1. The code here calls January 1 of year 1 day number 1. This is
|
| 27 |
+
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
|
| 28 |
+
# and Reingold's "Calendrical Calculations", where it's the base calendar
|
| 29 |
+
# for all computations. See the book for algorithms for converting between
|
| 30 |
+
# proleptic Gregorian ordinals and many other calendar systems.
|
| 31 |
+
|
| 32 |
+
# -1 is a placeholder for indexing purposes.
|
| 33 |
+
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
|
| 34 |
+
|
| 35 |
+
_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes.
|
| 36 |
+
dbm = 0
|
| 37 |
+
for dim in _DAYS_IN_MONTH[1:]:
|
| 38 |
+
_DAYS_BEFORE_MONTH.append(dbm)
|
| 39 |
+
dbm += dim
|
| 40 |
+
del dbm, dim
|
| 41 |
+
|
| 42 |
+
def _is_leap(year):
|
| 43 |
+
"year -> 1 if leap year, else 0."
|
| 44 |
+
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
|
| 45 |
+
|
| 46 |
+
def _days_before_year(year):
|
| 47 |
+
"year -> number of days before January 1st of year."
|
| 48 |
+
y = year - 1
|
| 49 |
+
return y*365 + y//4 - y//100 + y//400
|
| 50 |
+
|
| 51 |
+
def _days_in_month(year, month):
|
| 52 |
+
"year, month -> number of days in that month in that year."
|
| 53 |
+
assert 1 <= month <= 12, month
|
| 54 |
+
if month == 2 and _is_leap(year):
|
| 55 |
+
return 29
|
| 56 |
+
return _DAYS_IN_MONTH[month]
|
| 57 |
+
|
| 58 |
+
def _days_before_month(year, month):
|
| 59 |
+
"year, month -> number of days in year preceding first day of month."
|
| 60 |
+
assert 1 <= month <= 12, 'month must be in 1..12'
|
| 61 |
+
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
|
| 62 |
+
|
| 63 |
+
def _ymd2ord(year, month, day):
|
| 64 |
+
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
|
| 65 |
+
assert 1 <= month <= 12, 'month must be in 1..12'
|
| 66 |
+
dim = _days_in_month(year, month)
|
| 67 |
+
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
|
| 68 |
+
return (_days_before_year(year) +
|
| 69 |
+
_days_before_month(year, month) +
|
| 70 |
+
day)
|
| 71 |
+
|
| 72 |
+
_DI400Y = _days_before_year(401) # number of days in 400 years
|
| 73 |
+
_DI100Y = _days_before_year(101) # " " " " 100 "
|
| 74 |
+
_DI4Y = _days_before_year(5) # " " " " 4 "
|
| 75 |
+
|
| 76 |
+
# A 4-year cycle has an extra leap day over what we'd get from pasting
|
| 77 |
+
# together 4 single years.
|
| 78 |
+
assert _DI4Y == 4 * 365 + 1
|
| 79 |
+
|
| 80 |
+
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
|
| 81 |
+
# pasting together 4 100-year cycles.
|
| 82 |
+
assert _DI400Y == 4 * _DI100Y + 1
|
| 83 |
+
|
| 84 |
+
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
|
| 85 |
+
# pasting together 25 4-year cycles.
|
| 86 |
+
assert _DI100Y == 25 * _DI4Y - 1
|
| 87 |
+
|
| 88 |
+
def _ord2ymd(n):
|
| 89 |
+
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
|
| 90 |
+
|
| 91 |
+
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
|
| 92 |
+
# repeats exactly every 400 years. The basic strategy is to find the
|
| 93 |
+
# closest 400-year boundary at or before n, then work with the offset
|
| 94 |
+
# from that boundary to n. Life is much clearer if we subtract 1 from
|
| 95 |
+
# n first -- then the values of n at 400-year boundaries are exactly
|
| 96 |
+
# those divisible by _DI400Y:
|
| 97 |
+
#
|
| 98 |
+
# D M Y n n-1
|
| 99 |
+
# -- --- ---- ---------- ----------------
|
| 100 |
+
# 31 Dec -400 -_DI400Y -_DI400Y -1
|
| 101 |
+
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
|
| 102 |
+
# ...
|
| 103 |
+
# 30 Dec 000 -1 -2
|
| 104 |
+
# 31 Dec 000 0 -1
|
| 105 |
+
# 1 Jan 001 1 0 400-year boundary
|
| 106 |
+
# 2 Jan 001 2 1
|
| 107 |
+
# 3 Jan 001 3 2
|
| 108 |
+
# ...
|
| 109 |
+
# 31 Dec 400 _DI400Y _DI400Y -1
|
| 110 |
+
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
|
| 111 |
+
n -= 1
|
| 112 |
+
n400, n = divmod(n, _DI400Y)
|
| 113 |
+
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
|
| 114 |
+
|
| 115 |
+
# Now n is the (non-negative) offset, in days, from January 1 of year, to
|
| 116 |
+
# the desired date. Now compute how many 100-year cycles precede n.
|
| 117 |
+
# Note that it's possible for n100 to equal 4! In that case 4 full
|
| 118 |
+
# 100-year cycles precede the desired day, which implies the desired
|
| 119 |
+
# day is December 31 at the end of a 400-year cycle.
|
| 120 |
+
n100, n = divmod(n, _DI100Y)
|
| 121 |
+
|
| 122 |
+
# Now compute how many 4-year cycles precede it.
|
| 123 |
+
n4, n = divmod(n, _DI4Y)
|
| 124 |
+
|
| 125 |
+
# And now how many single years. Again n1 can be 4, and again meaning
|
| 126 |
+
# that the desired day is December 31 at the end of the 4-year cycle.
|
| 127 |
+
n1, n = divmod(n, 365)
|
| 128 |
+
|
| 129 |
+
year += n100 * 100 + n4 * 4 + n1
|
| 130 |
+
if n1 == 4 or n100 == 4:
|
| 131 |
+
assert n == 0
|
| 132 |
+
return year-1, 12, 31
|
| 133 |
+
|
| 134 |
+
# Now the year is correct, and n is the offset from January 1. We find
|
| 135 |
+
# the month via an estimate that's either exact or one too large.
|
| 136 |
+
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
|
| 137 |
+
assert leapyear == _is_leap(year)
|
| 138 |
+
month = (n + 50) >> 5
|
| 139 |
+
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
|
| 140 |
+
if preceding > n: # estimate is too large
|
| 141 |
+
month -= 1
|
| 142 |
+
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
|
| 143 |
+
n -= preceding
|
| 144 |
+
assert 0 <= n < _days_in_month(year, month)
|
| 145 |
+
|
| 146 |
+
# Now the year and month are correct, and n is the offset from the
|
| 147 |
+
# start of that month: we're done!
|
| 148 |
+
return year, month, n+1
|
| 149 |
+
|
| 150 |
+
# Month and day names. For localized versions, see the calendar module.
|
| 151 |
+
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
| 152 |
+
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
|
| 153 |
+
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
|
| 157 |
+
wday = (_ymd2ord(y, m, d) + 6) % 7
|
| 158 |
+
dnum = _days_before_month(y, m) + d
|
| 159 |
+
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
|
| 160 |
+
|
| 161 |
+
def _format_time(hh, mm, ss, us, timespec='auto'):
|
| 162 |
+
specs = {
|
| 163 |
+
'hours': '{:02d}',
|
| 164 |
+
'minutes': '{:02d}:{:02d}',
|
| 165 |
+
'seconds': '{:02d}:{:02d}:{:02d}',
|
| 166 |
+
'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}',
|
| 167 |
+
'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}'
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
if timespec == 'auto':
|
| 171 |
+
# Skip trailing microseconds when us==0.
|
| 172 |
+
timespec = 'microseconds' if us else 'seconds'
|
| 173 |
+
elif timespec == 'milliseconds':
|
| 174 |
+
us //= 1000
|
| 175 |
+
try:
|
| 176 |
+
fmt = specs[timespec]
|
| 177 |
+
except KeyError:
|
| 178 |
+
raise ValueError('Unknown timespec value')
|
| 179 |
+
else:
|
| 180 |
+
return fmt.format(hh, mm, ss, us)
|
| 181 |
+
|
| 182 |
+
def _format_offset(off):
|
| 183 |
+
s = ''
|
| 184 |
+
if off is not None:
|
| 185 |
+
if off.days < 0:
|
| 186 |
+
sign = "-"
|
| 187 |
+
off = -off
|
| 188 |
+
else:
|
| 189 |
+
sign = "+"
|
| 190 |
+
hh, mm = divmod(off, timedelta(hours=1))
|
| 191 |
+
mm, ss = divmod(mm, timedelta(minutes=1))
|
| 192 |
+
s += "%s%02d:%02d" % (sign, hh, mm)
|
| 193 |
+
if ss or ss.microseconds:
|
| 194 |
+
s += ":%02d" % ss.seconds
|
| 195 |
+
|
| 196 |
+
if ss.microseconds:
|
| 197 |
+
s += '.%06d' % ss.microseconds
|
| 198 |
+
return s
|
| 199 |
+
|
| 200 |
+
# Correctly substitute for %z and %Z escapes in strftime formats.
|
| 201 |
+
def _wrap_strftime(object, format, timetuple):
|
| 202 |
+
# Don't call utcoffset() or tzname() unless actually needed.
|
| 203 |
+
freplace = None # the string to use for %f
|
| 204 |
+
zreplace = None # the string to use for %z
|
| 205 |
+
Zreplace = None # the string to use for %Z
|
| 206 |
+
|
| 207 |
+
# Scan format for %z and %Z escapes, replacing as needed.
|
| 208 |
+
newformat = []
|
| 209 |
+
push = newformat.append
|
| 210 |
+
i, n = 0, len(format)
|
| 211 |
+
while i < n:
|
| 212 |
+
ch = format[i]
|
| 213 |
+
i += 1
|
| 214 |
+
if ch == '%':
|
| 215 |
+
if i < n:
|
| 216 |
+
ch = format[i]
|
| 217 |
+
i += 1
|
| 218 |
+
if ch == 'f':
|
| 219 |
+
if freplace is None:
|
| 220 |
+
freplace = '%06d' % getattr(object,
|
| 221 |
+
'microsecond', 0)
|
| 222 |
+
newformat.append(freplace)
|
| 223 |
+
elif ch == 'z':
|
| 224 |
+
if zreplace is None:
|
| 225 |
+
zreplace = ""
|
| 226 |
+
if hasattr(object, "utcoffset"):
|
| 227 |
+
offset = object.utcoffset()
|
| 228 |
+
if offset is not None:
|
| 229 |
+
sign = '+'
|
| 230 |
+
if offset.days < 0:
|
| 231 |
+
offset = -offset
|
| 232 |
+
sign = '-'
|
| 233 |
+
h, rest = divmod(offset, timedelta(hours=1))
|
| 234 |
+
m, rest = divmod(rest, timedelta(minutes=1))
|
| 235 |
+
s = rest.seconds
|
| 236 |
+
u = offset.microseconds
|
| 237 |
+
if u:
|
| 238 |
+
zreplace = '%c%02d%02d%02d.%06d' % (sign, h, m, s, u)
|
| 239 |
+
elif s:
|
| 240 |
+
zreplace = '%c%02d%02d%02d' % (sign, h, m, s)
|
| 241 |
+
else:
|
| 242 |
+
zreplace = '%c%02d%02d' % (sign, h, m)
|
| 243 |
+
assert '%' not in zreplace
|
| 244 |
+
newformat.append(zreplace)
|
| 245 |
+
elif ch == 'Z':
|
| 246 |
+
if Zreplace is None:
|
| 247 |
+
Zreplace = ""
|
| 248 |
+
if hasattr(object, "tzname"):
|
| 249 |
+
s = object.tzname()
|
| 250 |
+
if s is not None:
|
| 251 |
+
# strftime is going to have at this: escape %
|
| 252 |
+
Zreplace = s.replace('%', '%%')
|
| 253 |
+
newformat.append(Zreplace)
|
| 254 |
+
else:
|
| 255 |
+
push('%')
|
| 256 |
+
push(ch)
|
| 257 |
+
else:
|
| 258 |
+
push('%')
|
| 259 |
+
else:
|
| 260 |
+
push(ch)
|
| 261 |
+
newformat = "".join(newformat)
|
| 262 |
+
return _time.strftime(newformat, timetuple)
|
| 263 |
+
|
| 264 |
+
# Helpers for parsing the result of isoformat()
|
| 265 |
+
def _parse_isoformat_date(dtstr):
|
| 266 |
+
# It is assumed that this function will only be called with a
|
| 267 |
+
# string of length exactly 10, and (though this is not used) ASCII-only
|
| 268 |
+
year = int(dtstr[0:4])
|
| 269 |
+
if dtstr[4] != '-':
|
| 270 |
+
raise ValueError('Invalid date separator: %s' % dtstr[4])
|
| 271 |
+
|
| 272 |
+
month = int(dtstr[5:7])
|
| 273 |
+
|
| 274 |
+
if dtstr[7] != '-':
|
| 275 |
+
raise ValueError('Invalid date separator')
|
| 276 |
+
|
| 277 |
+
day = int(dtstr[8:10])
|
| 278 |
+
|
| 279 |
+
return [year, month, day]
|
| 280 |
+
|
| 281 |
+
def _parse_hh_mm_ss_ff(tstr):
|
| 282 |
+
# Parses things of the form HH[:MM[:SS[.fff[fff]]]]
|
| 283 |
+
len_str = len(tstr)
|
| 284 |
+
|
| 285 |
+
time_comps = [0, 0, 0, 0]
|
| 286 |
+
pos = 0
|
| 287 |
+
for comp in range(0, 3):
|
| 288 |
+
if (len_str - pos) < 2:
|
| 289 |
+
raise ValueError('Incomplete time component')
|
| 290 |
+
|
| 291 |
+
time_comps[comp] = int(tstr[pos:pos+2])
|
| 292 |
+
|
| 293 |
+
pos += 2
|
| 294 |
+
next_char = tstr[pos:pos+1]
|
| 295 |
+
|
| 296 |
+
if not next_char or comp >= 2:
|
| 297 |
+
break
|
| 298 |
+
|
| 299 |
+
if next_char != ':':
|
| 300 |
+
raise ValueError('Invalid time separator: %c' % next_char)
|
| 301 |
+
|
| 302 |
+
pos += 1
|
| 303 |
+
|
| 304 |
+
if pos < len_str:
|
| 305 |
+
if tstr[pos] != '.':
|
| 306 |
+
raise ValueError('Invalid microsecond component')
|
| 307 |
+
else:
|
| 308 |
+
pos += 1
|
| 309 |
+
|
| 310 |
+
len_remainder = len_str - pos
|
| 311 |
+
if len_remainder not in (3, 6):
|
| 312 |
+
raise ValueError('Invalid microsecond component')
|
| 313 |
+
|
| 314 |
+
time_comps[3] = int(tstr[pos:])
|
| 315 |
+
if len_remainder == 3:
|
| 316 |
+
time_comps[3] *= 1000
|
| 317 |
+
|
| 318 |
+
return time_comps
|
| 319 |
+
|
| 320 |
+
def _parse_isoformat_time(tstr):
|
| 321 |
+
# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]
|
| 322 |
+
len_str = len(tstr)
|
| 323 |
+
if len_str < 2:
|
| 324 |
+
raise ValueError('Isoformat time too short')
|
| 325 |
+
|
| 326 |
+
# This is equivalent to re.search('[+-]', tstr), but faster
|
| 327 |
+
tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1)
|
| 328 |
+
timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr
|
| 329 |
+
|
| 330 |
+
time_comps = _parse_hh_mm_ss_ff(timestr)
|
| 331 |
+
|
| 332 |
+
tzi = None
|
| 333 |
+
if tz_pos > 0:
|
| 334 |
+
tzstr = tstr[tz_pos:]
|
| 335 |
+
|
| 336 |
+
# Valid time zone strings are:
|
| 337 |
+
# HH:MM len: 5
|
| 338 |
+
# HH:MM:SS len: 8
|
| 339 |
+
# HH:MM:SS.ffffff len: 15
|
| 340 |
+
|
| 341 |
+
if len(tzstr) not in (5, 8, 15):
|
| 342 |
+
raise ValueError('Malformed time zone string')
|
| 343 |
+
|
| 344 |
+
tz_comps = _parse_hh_mm_ss_ff(tzstr)
|
| 345 |
+
if all(x == 0 for x in tz_comps):
|
| 346 |
+
tzi = timezone.utc
|
| 347 |
+
else:
|
| 348 |
+
tzsign = -1 if tstr[tz_pos - 1] == '-' else 1
|
| 349 |
+
|
| 350 |
+
td = timedelta(hours=tz_comps[0], minutes=tz_comps[1],
|
| 351 |
+
seconds=tz_comps[2], microseconds=tz_comps[3])
|
| 352 |
+
|
| 353 |
+
tzi = timezone(tzsign * td)
|
| 354 |
+
|
| 355 |
+
time_comps.append(tzi)
|
| 356 |
+
|
| 357 |
+
return time_comps
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
# Just raise TypeError if the arg isn't None or a string.
|
| 361 |
+
def _check_tzname(name):
|
| 362 |
+
if name is not None and not isinstance(name, str):
|
| 363 |
+
raise TypeError("tzinfo.tzname() must return None or string, "
|
| 364 |
+
"not '%s'" % type(name))
|
| 365 |
+
|
| 366 |
+
# name is the offset-producing method, "utcoffset" or "dst".
|
| 367 |
+
# offset is what it returned.
|
| 368 |
+
# If offset isn't None or timedelta, raises TypeError.
|
| 369 |
+
# If offset is None, returns None.
|
| 370 |
+
# Else offset is checked for being in range.
|
| 371 |
+
# If it is, its integer value is returned. Else ValueError is raised.
|
| 372 |
+
def _check_utc_offset(name, offset):
|
| 373 |
+
assert name in ("utcoffset", "dst")
|
| 374 |
+
if offset is None:
|
| 375 |
+
return
|
| 376 |
+
if not isinstance(offset, timedelta):
|
| 377 |
+
raise TypeError("tzinfo.%s() must return None "
|
| 378 |
+
"or timedelta, not '%s'" % (name, type(offset)))
|
| 379 |
+
if not -timedelta(1) < offset < timedelta(1):
|
| 380 |
+
raise ValueError("%s()=%s, must be strictly between "
|
| 381 |
+
"-timedelta(hours=24) and timedelta(hours=24)" %
|
| 382 |
+
(name, offset))
|
| 383 |
+
|
| 384 |
+
def _check_date_fields(year, month, day):
|
| 385 |
+
year = _index(year)
|
| 386 |
+
month = _index(month)
|
| 387 |
+
day = _index(day)
|
| 388 |
+
if not MINYEAR <= year <= MAXYEAR:
|
| 389 |
+
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
|
| 390 |
+
if not 1 <= month <= 12:
|
| 391 |
+
raise ValueError('month must be in 1..12', month)
|
| 392 |
+
dim = _days_in_month(year, month)
|
| 393 |
+
if not 1 <= day <= dim:
|
| 394 |
+
raise ValueError('day must be in 1..%d' % dim, day)
|
| 395 |
+
return year, month, day
|
| 396 |
+
|
| 397 |
+
def _check_time_fields(hour, minute, second, microsecond, fold):
|
| 398 |
+
hour = _index(hour)
|
| 399 |
+
minute = _index(minute)
|
| 400 |
+
second = _index(second)
|
| 401 |
+
microsecond = _index(microsecond)
|
| 402 |
+
if not 0 <= hour <= 23:
|
| 403 |
+
raise ValueError('hour must be in 0..23', hour)
|
| 404 |
+
if not 0 <= minute <= 59:
|
| 405 |
+
raise ValueError('minute must be in 0..59', minute)
|
| 406 |
+
if not 0 <= second <= 59:
|
| 407 |
+
raise ValueError('second must be in 0..59', second)
|
| 408 |
+
if not 0 <= microsecond <= 999999:
|
| 409 |
+
raise ValueError('microsecond must be in 0..999999', microsecond)
|
| 410 |
+
if fold not in (0, 1):
|
| 411 |
+
raise ValueError('fold must be either 0 or 1', fold)
|
| 412 |
+
return hour, minute, second, microsecond, fold
|
| 413 |
+
|
| 414 |
+
def _check_tzinfo_arg(tz):
|
| 415 |
+
if tz is not None and not isinstance(tz, tzinfo):
|
| 416 |
+
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
|
| 417 |
+
|
| 418 |
+
def _cmperror(x, y):
|
| 419 |
+
raise TypeError("can't compare '%s' to '%s'" % (
|
| 420 |
+
type(x).__name__, type(y).__name__))
|
| 421 |
+
|
| 422 |
+
def _divide_and_round(a, b):
|
| 423 |
+
"""divide a by b and round result to the nearest integer
|
| 424 |
+
|
| 425 |
+
When the ratio is exactly half-way between two integers,
|
| 426 |
+
the even integer is returned.
|
| 427 |
+
"""
|
| 428 |
+
# Based on the reference implementation for divmod_near
|
| 429 |
+
# in Objects/longobject.c.
|
| 430 |
+
q, r = divmod(a, b)
|
| 431 |
+
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
|
| 432 |
+
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
|
| 433 |
+
# positive, 2 * r < b if b negative.
|
| 434 |
+
r *= 2
|
| 435 |
+
greater_than_half = r > b if b > 0 else r < b
|
| 436 |
+
if greater_than_half or r == b and q % 2 == 1:
|
| 437 |
+
q += 1
|
| 438 |
+
|
| 439 |
+
return q
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
class timedelta:
|
| 443 |
+
"""Represent the difference between two datetime objects.
|
| 444 |
+
|
| 445 |
+
Supported operators:
|
| 446 |
+
|
| 447 |
+
- add, subtract timedelta
|
| 448 |
+
- unary plus, minus, abs
|
| 449 |
+
- compare to timedelta
|
| 450 |
+
- multiply, divide by int
|
| 451 |
+
|
| 452 |
+
In addition, datetime supports subtraction of two datetime objects
|
| 453 |
+
returning a timedelta, and addition or subtraction of a datetime
|
| 454 |
+
and a timedelta giving a datetime.
|
| 455 |
+
|
| 456 |
+
Representation: (days, seconds, microseconds). Why? Because I
|
| 457 |
+
felt like it.
|
| 458 |
+
"""
|
| 459 |
+
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
|
| 460 |
+
|
| 461 |
+
def __new__(cls, days=0, seconds=0, microseconds=0,
|
| 462 |
+
milliseconds=0, minutes=0, hours=0, weeks=0):
|
| 463 |
+
# Doing this efficiently and accurately in C is going to be difficult
|
| 464 |
+
# and error-prone, due to ubiquitous overflow possibilities, and that
|
| 465 |
+
# C double doesn't have enough bits of precision to represent
|
| 466 |
+
# microseconds over 10K years faithfully. The code here tries to make
|
| 467 |
+
# explicit where go-fast assumptions can be relied on, in order to
|
| 468 |
+
# guide the C implementation; it's way more convoluted than speed-
|
| 469 |
+
# ignoring auto-overflow-to-long idiomatic Python could be.
|
| 470 |
+
|
| 471 |
+
# XXX Check that all inputs are ints or floats.
|
| 472 |
+
|
| 473 |
+
# Final values, all integer.
|
| 474 |
+
# s and us fit in 32-bit signed ints; d isn't bounded.
|
| 475 |
+
d = s = us = 0
|
| 476 |
+
|
| 477 |
+
# Normalize everything to days, seconds, microseconds.
|
| 478 |
+
days += weeks*7
|
| 479 |
+
seconds += minutes*60 + hours*3600
|
| 480 |
+
microseconds += milliseconds*1000
|
| 481 |
+
|
| 482 |
+
# Get rid of all fractions, and normalize s and us.
|
| 483 |
+
# Take a deep breath <wink>.
|
| 484 |
+
if isinstance(days, float):
|
| 485 |
+
dayfrac, days = _math.modf(days)
|
| 486 |
+
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
|
| 487 |
+
assert daysecondswhole == int(daysecondswhole) # can't overflow
|
| 488 |
+
s = int(daysecondswhole)
|
| 489 |
+
assert days == int(days)
|
| 490 |
+
d = int(days)
|
| 491 |
+
else:
|
| 492 |
+
daysecondsfrac = 0.0
|
| 493 |
+
d = days
|
| 494 |
+
assert isinstance(daysecondsfrac, float)
|
| 495 |
+
assert abs(daysecondsfrac) <= 1.0
|
| 496 |
+
assert isinstance(d, int)
|
| 497 |
+
assert abs(s) <= 24 * 3600
|
| 498 |
+
# days isn't referenced again before redefinition
|
| 499 |
+
|
| 500 |
+
if isinstance(seconds, float):
|
| 501 |
+
secondsfrac, seconds = _math.modf(seconds)
|
| 502 |
+
assert seconds == int(seconds)
|
| 503 |
+
seconds = int(seconds)
|
| 504 |
+
secondsfrac += daysecondsfrac
|
| 505 |
+
assert abs(secondsfrac) <= 2.0
|
| 506 |
+
else:
|
| 507 |
+
secondsfrac = daysecondsfrac
|
| 508 |
+
# daysecondsfrac isn't referenced again
|
| 509 |
+
assert isinstance(secondsfrac, float)
|
| 510 |
+
assert abs(secondsfrac) <= 2.0
|
| 511 |
+
|
| 512 |
+
assert isinstance(seconds, int)
|
| 513 |
+
days, seconds = divmod(seconds, 24*3600)
|
| 514 |
+
d += days
|
| 515 |
+
s += int(seconds) # can't overflow
|
| 516 |
+
assert isinstance(s, int)
|
| 517 |
+
assert abs(s) <= 2 * 24 * 3600
|
| 518 |
+
# seconds isn't referenced again before redefinition
|
| 519 |
+
|
| 520 |
+
usdouble = secondsfrac * 1e6
|
| 521 |
+
assert abs(usdouble) < 2.1e6 # exact value not critical
|
| 522 |
+
# secondsfrac isn't referenced again
|
| 523 |
+
|
| 524 |
+
if isinstance(microseconds, float):
|
| 525 |
+
microseconds = round(microseconds + usdouble)
|
| 526 |
+
seconds, microseconds = divmod(microseconds, 1000000)
|
| 527 |
+
days, seconds = divmod(seconds, 24*3600)
|
| 528 |
+
d += days
|
| 529 |
+
s += seconds
|
| 530 |
+
else:
|
| 531 |
+
microseconds = int(microseconds)
|
| 532 |
+
seconds, microseconds = divmod(microseconds, 1000000)
|
| 533 |
+
days, seconds = divmod(seconds, 24*3600)
|
| 534 |
+
d += days
|
| 535 |
+
s += seconds
|
| 536 |
+
microseconds = round(microseconds + usdouble)
|
| 537 |
+
assert isinstance(s, int)
|
| 538 |
+
assert isinstance(microseconds, int)
|
| 539 |
+
assert abs(s) <= 3 * 24 * 3600
|
| 540 |
+
assert abs(microseconds) < 3.1e6
|
| 541 |
+
|
| 542 |
+
# Just a little bit of carrying possible for microseconds and seconds.
|
| 543 |
+
seconds, us = divmod(microseconds, 1000000)
|
| 544 |
+
s += seconds
|
| 545 |
+
days, s = divmod(s, 24*3600)
|
| 546 |
+
d += days
|
| 547 |
+
|
| 548 |
+
assert isinstance(d, int)
|
| 549 |
+
assert isinstance(s, int) and 0 <= s < 24*3600
|
| 550 |
+
assert isinstance(us, int) and 0 <= us < 1000000
|
| 551 |
+
|
| 552 |
+
if abs(d) > 999999999:
|
| 553 |
+
raise OverflowError("timedelta # of days is too large: %d" % d)
|
| 554 |
+
|
| 555 |
+
self = object.__new__(cls)
|
| 556 |
+
self._days = d
|
| 557 |
+
self._seconds = s
|
| 558 |
+
self._microseconds = us
|
| 559 |
+
self._hashcode = -1
|
| 560 |
+
return self
|
| 561 |
+
|
| 562 |
+
def __repr__(self):
|
| 563 |
+
args = []
|
| 564 |
+
if self._days:
|
| 565 |
+
args.append("days=%d" % self._days)
|
| 566 |
+
if self._seconds:
|
| 567 |
+
args.append("seconds=%d" % self._seconds)
|
| 568 |
+
if self._microseconds:
|
| 569 |
+
args.append("microseconds=%d" % self._microseconds)
|
| 570 |
+
if not args:
|
| 571 |
+
args.append('0')
|
| 572 |
+
return "%s.%s(%s)" % (self.__class__.__module__,
|
| 573 |
+
self.__class__.__qualname__,
|
| 574 |
+
', '.join(args))
|
| 575 |
+
|
| 576 |
+
def __str__(self):
|
| 577 |
+
mm, ss = divmod(self._seconds, 60)
|
| 578 |
+
hh, mm = divmod(mm, 60)
|
| 579 |
+
s = "%d:%02d:%02d" % (hh, mm, ss)
|
| 580 |
+
if self._days:
|
| 581 |
+
def plural(n):
|
| 582 |
+
return n, abs(n) != 1 and "s" or ""
|
| 583 |
+
s = ("%d day%s, " % plural(self._days)) + s
|
| 584 |
+
if self._microseconds:
|
| 585 |
+
s = s + ".%06d" % self._microseconds
|
| 586 |
+
return s
|
| 587 |
+
|
| 588 |
+
def total_seconds(self):
|
| 589 |
+
"""Total seconds in the duration."""
|
| 590 |
+
return ((self.days * 86400 + self.seconds) * 10**6 +
|
| 591 |
+
self.microseconds) / 10**6
|
| 592 |
+
|
| 593 |
+
# Read-only field accessors
|
| 594 |
+
@property
|
| 595 |
+
def days(self):
|
| 596 |
+
"""days"""
|
| 597 |
+
return self._days
|
| 598 |
+
|
| 599 |
+
@property
|
| 600 |
+
def seconds(self):
|
| 601 |
+
"""seconds"""
|
| 602 |
+
return self._seconds
|
| 603 |
+
|
| 604 |
+
@property
|
| 605 |
+
def microseconds(self):
|
| 606 |
+
"""microseconds"""
|
| 607 |
+
return self._microseconds
|
| 608 |
+
|
| 609 |
+
def __add__(self, other):
|
| 610 |
+
if isinstance(other, timedelta):
|
| 611 |
+
# for CPython compatibility, we cannot use
|
| 612 |
+
# our __class__ here, but need a real timedelta
|
| 613 |
+
return timedelta(self._days + other._days,
|
| 614 |
+
self._seconds + other._seconds,
|
| 615 |
+
self._microseconds + other._microseconds)
|
| 616 |
+
return NotImplemented
|
| 617 |
+
|
| 618 |
+
__radd__ = __add__
|
| 619 |
+
|
| 620 |
+
def __sub__(self, other):
|
| 621 |
+
if isinstance(other, timedelta):
|
| 622 |
+
# for CPython compatibility, we cannot use
|
| 623 |
+
# our __class__ here, but need a real timedelta
|
| 624 |
+
return timedelta(self._days - other._days,
|
| 625 |
+
self._seconds - other._seconds,
|
| 626 |
+
self._microseconds - other._microseconds)
|
| 627 |
+
return NotImplemented
|
| 628 |
+
|
| 629 |
+
def __rsub__(self, other):
|
| 630 |
+
if isinstance(other, timedelta):
|
| 631 |
+
return -self + other
|
| 632 |
+
return NotImplemented
|
| 633 |
+
|
| 634 |
+
def __neg__(self):
|
| 635 |
+
# for CPython compatibility, we cannot use
|
| 636 |
+
# our __class__ here, but need a real timedelta
|
| 637 |
+
return timedelta(-self._days,
|
| 638 |
+
-self._seconds,
|
| 639 |
+
-self._microseconds)
|
| 640 |
+
|
| 641 |
+
def __pos__(self):
|
| 642 |
+
return self
|
| 643 |
+
|
| 644 |
+
def __abs__(self):
|
| 645 |
+
if self._days < 0:
|
| 646 |
+
return -self
|
| 647 |
+
else:
|
| 648 |
+
return self
|
| 649 |
+
|
| 650 |
+
def __mul__(self, other):
|
| 651 |
+
if isinstance(other, int):
|
| 652 |
+
# for CPython compatibility, we cannot use
|
| 653 |
+
# our __class__ here, but need a real timedelta
|
| 654 |
+
return timedelta(self._days * other,
|
| 655 |
+
self._seconds * other,
|
| 656 |
+
self._microseconds * other)
|
| 657 |
+
if isinstance(other, float):
|
| 658 |
+
usec = self._to_microseconds()
|
| 659 |
+
a, b = other.as_integer_ratio()
|
| 660 |
+
return timedelta(0, 0, _divide_and_round(usec * a, b))
|
| 661 |
+
return NotImplemented
|
| 662 |
+
|
| 663 |
+
__rmul__ = __mul__
|
| 664 |
+
|
| 665 |
+
def _to_microseconds(self):
|
| 666 |
+
return ((self._days * (24*3600) + self._seconds) * 1000000 +
|
| 667 |
+
self._microseconds)
|
| 668 |
+
|
| 669 |
+
def __floordiv__(self, other):
|
| 670 |
+
if not isinstance(other, (int, timedelta)):
|
| 671 |
+
return NotImplemented
|
| 672 |
+
usec = self._to_microseconds()
|
| 673 |
+
if isinstance(other, timedelta):
|
| 674 |
+
return usec // other._to_microseconds()
|
| 675 |
+
if isinstance(other, int):
|
| 676 |
+
return timedelta(0, 0, usec // other)
|
| 677 |
+
|
| 678 |
+
def __truediv__(self, other):
|
| 679 |
+
if not isinstance(other, (int, float, timedelta)):
|
| 680 |
+
return NotImplemented
|
| 681 |
+
usec = self._to_microseconds()
|
| 682 |
+
if isinstance(other, timedelta):
|
| 683 |
+
return usec / other._to_microseconds()
|
| 684 |
+
if isinstance(other, int):
|
| 685 |
+
return timedelta(0, 0, _divide_and_round(usec, other))
|
| 686 |
+
if isinstance(other, float):
|
| 687 |
+
a, b = other.as_integer_ratio()
|
| 688 |
+
return timedelta(0, 0, _divide_and_round(b * usec, a))
|
| 689 |
+
|
| 690 |
+
def __mod__(self, other):
|
| 691 |
+
if isinstance(other, timedelta):
|
| 692 |
+
r = self._to_microseconds() % other._to_microseconds()
|
| 693 |
+
return timedelta(0, 0, r)
|
| 694 |
+
return NotImplemented
|
| 695 |
+
|
| 696 |
+
def __divmod__(self, other):
|
| 697 |
+
if isinstance(other, timedelta):
|
| 698 |
+
q, r = divmod(self._to_microseconds(),
|
| 699 |
+
other._to_microseconds())
|
| 700 |
+
return q, timedelta(0, 0, r)
|
| 701 |
+
return NotImplemented
|
| 702 |
+
|
| 703 |
+
# Comparisons of timedelta objects with other.
|
| 704 |
+
|
| 705 |
+
def __eq__(self, other):
|
| 706 |
+
if isinstance(other, timedelta):
|
| 707 |
+
return self._cmp(other) == 0
|
| 708 |
+
else:
|
| 709 |
+
return NotImplemented
|
| 710 |
+
|
| 711 |
+
def __le__(self, other):
|
| 712 |
+
if isinstance(other, timedelta):
|
| 713 |
+
return self._cmp(other) <= 0
|
| 714 |
+
else:
|
| 715 |
+
return NotImplemented
|
| 716 |
+
|
| 717 |
+
def __lt__(self, other):
|
| 718 |
+
if isinstance(other, timedelta):
|
| 719 |
+
return self._cmp(other) < 0
|
| 720 |
+
else:
|
| 721 |
+
return NotImplemented
|
| 722 |
+
|
| 723 |
+
def __ge__(self, other):
|
| 724 |
+
if isinstance(other, timedelta):
|
| 725 |
+
return self._cmp(other) >= 0
|
| 726 |
+
else:
|
| 727 |
+
return NotImplemented
|
| 728 |
+
|
| 729 |
+
def __gt__(self, other):
|
| 730 |
+
if isinstance(other, timedelta):
|
| 731 |
+
return self._cmp(other) > 0
|
| 732 |
+
else:
|
| 733 |
+
return NotImplemented
|
| 734 |
+
|
| 735 |
+
def _cmp(self, other):
|
| 736 |
+
assert isinstance(other, timedelta)
|
| 737 |
+
return _cmp(self._getstate(), other._getstate())
|
| 738 |
+
|
| 739 |
+
def __hash__(self):
|
| 740 |
+
if self._hashcode == -1:
|
| 741 |
+
self._hashcode = hash(self._getstate())
|
| 742 |
+
return self._hashcode
|
| 743 |
+
|
| 744 |
+
def __bool__(self):
|
| 745 |
+
return (self._days != 0 or
|
| 746 |
+
self._seconds != 0 or
|
| 747 |
+
self._microseconds != 0)
|
| 748 |
+
|
| 749 |
+
# Pickle support.
|
| 750 |
+
|
| 751 |
+
def _getstate(self):
|
| 752 |
+
return (self._days, self._seconds, self._microseconds)
|
| 753 |
+
|
| 754 |
+
def __reduce__(self):
|
| 755 |
+
return (self.__class__, self._getstate())
|
| 756 |
+
|
| 757 |
+
timedelta.min = timedelta(-999999999)
|
| 758 |
+
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
|
| 759 |
+
microseconds=999999)
|
| 760 |
+
timedelta.resolution = timedelta(microseconds=1)
|
| 761 |
+
|
| 762 |
+
class date:
|
| 763 |
+
"""Concrete date type.
|
| 764 |
+
|
| 765 |
+
Constructors:
|
| 766 |
+
|
| 767 |
+
__new__()
|
| 768 |
+
fromtimestamp()
|
| 769 |
+
today()
|
| 770 |
+
fromordinal()
|
| 771 |
+
|
| 772 |
+
Operators:
|
| 773 |
+
|
| 774 |
+
__repr__, __str__
|
| 775 |
+
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
|
| 776 |
+
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
|
| 777 |
+
|
| 778 |
+
Methods:
|
| 779 |
+
|
| 780 |
+
timetuple()
|
| 781 |
+
toordinal()
|
| 782 |
+
weekday()
|
| 783 |
+
isoweekday(), isocalendar(), isoformat()
|
| 784 |
+
ctime()
|
| 785 |
+
strftime()
|
| 786 |
+
|
| 787 |
+
Properties (readonly):
|
| 788 |
+
year, month, day
|
| 789 |
+
"""
|
| 790 |
+
__slots__ = '_year', '_month', '_day', '_hashcode'
|
| 791 |
+
|
| 792 |
+
def __new__(cls, year, month=None, day=None):
|
| 793 |
+
"""Constructor.
|
| 794 |
+
|
| 795 |
+
Arguments:
|
| 796 |
+
|
| 797 |
+
year, month, day (required, base 1)
|
| 798 |
+
"""
|
| 799 |
+
if (month is None and
|
| 800 |
+
isinstance(year, (bytes, str)) and len(year) == 4 and
|
| 801 |
+
1 <= ord(year[2:3]) <= 12):
|
| 802 |
+
# Pickle support
|
| 803 |
+
if isinstance(year, str):
|
| 804 |
+
try:
|
| 805 |
+
year = year.encode('latin1')
|
| 806 |
+
except UnicodeEncodeError:
|
| 807 |
+
# More informative error message.
|
| 808 |
+
raise ValueError(
|
| 809 |
+
"Failed to encode latin1 string when unpickling "
|
| 810 |
+
"a date object. "
|
| 811 |
+
"pickle.load(data, encoding='latin1') is assumed.")
|
| 812 |
+
self = object.__new__(cls)
|
| 813 |
+
self.__setstate(year)
|
| 814 |
+
self._hashcode = -1
|
| 815 |
+
return self
|
| 816 |
+
year, month, day = _check_date_fields(year, month, day)
|
| 817 |
+
self = object.__new__(cls)
|
| 818 |
+
self._year = year
|
| 819 |
+
self._month = month
|
| 820 |
+
self._day = day
|
| 821 |
+
self._hashcode = -1
|
| 822 |
+
return self
|
| 823 |
+
|
| 824 |
+
# Additional constructors
|
| 825 |
+
|
| 826 |
+
@classmethod
|
| 827 |
+
def fromtimestamp(cls, t):
|
| 828 |
+
"Construct a date from a POSIX timestamp (like time.time())."
|
| 829 |
+
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
|
| 830 |
+
return cls(y, m, d)
|
| 831 |
+
|
| 832 |
+
@classmethod
|
| 833 |
+
def today(cls):
|
| 834 |
+
"Construct a date from time.time()."
|
| 835 |
+
t = _time.time()
|
| 836 |
+
return cls.fromtimestamp(t)
|
| 837 |
+
|
| 838 |
+
@classmethod
|
| 839 |
+
def fromordinal(cls, n):
|
| 840 |
+
"""Construct a date from a proleptic Gregorian ordinal.
|
| 841 |
+
|
| 842 |
+
January 1 of year 1 is day 1. Only the year, month and day are
|
| 843 |
+
non-zero in the result.
|
| 844 |
+
"""
|
| 845 |
+
y, m, d = _ord2ymd(n)
|
| 846 |
+
return cls(y, m, d)
|
| 847 |
+
|
| 848 |
+
@classmethod
|
| 849 |
+
def fromisoformat(cls, date_string):
|
| 850 |
+
"""Construct a date from the output of date.isoformat()."""
|
| 851 |
+
if not isinstance(date_string, str):
|
| 852 |
+
raise TypeError('fromisoformat: argument must be str')
|
| 853 |
+
|
| 854 |
+
try:
|
| 855 |
+
assert len(date_string) == 10
|
| 856 |
+
return cls(*_parse_isoformat_date(date_string))
|
| 857 |
+
except Exception:
|
| 858 |
+
raise ValueError(f'Invalid isoformat string: {date_string!r}')
|
| 859 |
+
|
| 860 |
+
@classmethod
|
| 861 |
+
def fromisocalendar(cls, year, week, day):
|
| 862 |
+
"""Construct a date from the ISO year, week number and weekday.
|
| 863 |
+
|
| 864 |
+
This is the inverse of the date.isocalendar() function"""
|
| 865 |
+
# Year is bounded this way because 9999-12-31 is (9999, 52, 5)
|
| 866 |
+
if not MINYEAR <= year <= MAXYEAR:
|
| 867 |
+
raise ValueError(f"Year is out of range: {year}")
|
| 868 |
+
|
| 869 |
+
if not 0 < week < 53:
|
| 870 |
+
out_of_range = True
|
| 871 |
+
|
| 872 |
+
if week == 53:
|
| 873 |
+
# ISO years have 53 weeks in them on years starting with a
|
| 874 |
+
# Thursday and leap years starting on a Wednesday
|
| 875 |
+
first_weekday = _ymd2ord(year, 1, 1) % 7
|
| 876 |
+
if (first_weekday == 4 or (first_weekday == 3 and
|
| 877 |
+
_is_leap(year))):
|
| 878 |
+
out_of_range = False
|
| 879 |
+
|
| 880 |
+
if out_of_range:
|
| 881 |
+
raise ValueError(f"Invalid week: {week}")
|
| 882 |
+
|
| 883 |
+
if not 0 < day < 8:
|
| 884 |
+
raise ValueError(f"Invalid weekday: {day} (range is [1, 7])")
|
| 885 |
+
|
| 886 |
+
# Now compute the offset from (Y, 1, 1) in days:
|
| 887 |
+
day_offset = (week - 1) * 7 + (day - 1)
|
| 888 |
+
|
| 889 |
+
# Calculate the ordinal day for monday, week 1
|
| 890 |
+
day_1 = _isoweek1monday(year)
|
| 891 |
+
ord_day = day_1 + day_offset
|
| 892 |
+
|
| 893 |
+
return cls(*_ord2ymd(ord_day))
|
| 894 |
+
|
| 895 |
+
# Conversions to string
|
| 896 |
+
|
| 897 |
+
def __repr__(self):
|
| 898 |
+
"""Convert to formal string, for repr().
|
| 899 |
+
|
| 900 |
+
>>> dt = datetime(2010, 1, 1)
|
| 901 |
+
>>> repr(dt)
|
| 902 |
+
'datetime.datetime(2010, 1, 1, 0, 0)'
|
| 903 |
+
|
| 904 |
+
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
|
| 905 |
+
>>> repr(dt)
|
| 906 |
+
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
|
| 907 |
+
"""
|
| 908 |
+
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
|
| 909 |
+
self.__class__.__qualname__,
|
| 910 |
+
self._year,
|
| 911 |
+
self._month,
|
| 912 |
+
self._day)
|
| 913 |
+
# XXX These shouldn't depend on time.localtime(), because that
|
| 914 |
+
# clips the usable dates to [1970 .. 2038). At least ctime() is
|
| 915 |
+
# easily done without using strftime() -- that's better too because
|
| 916 |
+
# strftime("%c", ...) is locale specific.
|
| 917 |
+
|
| 918 |
+
|
| 919 |
+
def ctime(self):
|
| 920 |
+
"Return ctime() style string."
|
| 921 |
+
weekday = self.toordinal() % 7 or 7
|
| 922 |
+
return "%s %s %2d 00:00:00 %04d" % (
|
| 923 |
+
_DAYNAMES[weekday],
|
| 924 |
+
_MONTHNAMES[self._month],
|
| 925 |
+
self._day, self._year)
|
| 926 |
+
|
| 927 |
+
def strftime(self, fmt):
|
| 928 |
+
"Format using strftime()."
|
| 929 |
+
return _wrap_strftime(self, fmt, self.timetuple())
|
| 930 |
+
|
| 931 |
+
def __format__(self, fmt):
|
| 932 |
+
if not isinstance(fmt, str):
|
| 933 |
+
raise TypeError("must be str, not %s" % type(fmt).__name__)
|
| 934 |
+
if len(fmt) != 0:
|
| 935 |
+
return self.strftime(fmt)
|
| 936 |
+
return str(self)
|
| 937 |
+
|
| 938 |
+
def isoformat(self):
|
| 939 |
+
"""Return the date formatted according to ISO.
|
| 940 |
+
|
| 941 |
+
This is 'YYYY-MM-DD'.
|
| 942 |
+
|
| 943 |
+
References:
|
| 944 |
+
- http://www.w3.org/TR/NOTE-datetime
|
| 945 |
+
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
|
| 946 |
+
"""
|
| 947 |
+
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
|
| 948 |
+
|
| 949 |
+
__str__ = isoformat
|
| 950 |
+
|
| 951 |
+
# Read-only field accessors
|
| 952 |
+
@property
|
| 953 |
+
def year(self):
|
| 954 |
+
"""year (1-9999)"""
|
| 955 |
+
return self._year
|
| 956 |
+
|
| 957 |
+
@property
|
| 958 |
+
def month(self):
|
| 959 |
+
"""month (1-12)"""
|
| 960 |
+
return self._month
|
| 961 |
+
|
| 962 |
+
@property
|
| 963 |
+
def day(self):
|
| 964 |
+
"""day (1-31)"""
|
| 965 |
+
return self._day
|
| 966 |
+
|
| 967 |
+
# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
|
| 968 |
+
# __hash__ (and helpers)
|
| 969 |
+
|
| 970 |
+
def timetuple(self):
|
| 971 |
+
"Return local time tuple compatible with time.localtime()."
|
| 972 |
+
return _build_struct_time(self._year, self._month, self._day,
|
| 973 |
+
0, 0, 0, -1)
|
| 974 |
+
|
| 975 |
+
def toordinal(self):
|
| 976 |
+
"""Return proleptic Gregorian ordinal for the year, month and day.
|
| 977 |
+
|
| 978 |
+
January 1 of year 1 is day 1. Only the year, month and day values
|
| 979 |
+
contribute to the result.
|
| 980 |
+
"""
|
| 981 |
+
return _ymd2ord(self._year, self._month, self._day)
|
| 982 |
+
|
| 983 |
+
def replace(self, year=None, month=None, day=None):
|
| 984 |
+
"""Return a new date with new values for the specified fields."""
|
| 985 |
+
if year is None:
|
| 986 |
+
year = self._year
|
| 987 |
+
if month is None:
|
| 988 |
+
month = self._month
|
| 989 |
+
if day is None:
|
| 990 |
+
day = self._day
|
| 991 |
+
return type(self)(year, month, day)
|
| 992 |
+
|
| 993 |
+
# Comparisons of date objects with other.
|
| 994 |
+
|
| 995 |
+
def __eq__(self, other):
|
| 996 |
+
if isinstance(other, date):
|
| 997 |
+
return self._cmp(other) == 0
|
| 998 |
+
return NotImplemented
|
| 999 |
+
|
| 1000 |
+
def __le__(self, other):
|
| 1001 |
+
if isinstance(other, date):
|
| 1002 |
+
return self._cmp(other) <= 0
|
| 1003 |
+
return NotImplemented
|
| 1004 |
+
|
| 1005 |
+
def __lt__(self, other):
|
| 1006 |
+
if isinstance(other, date):
|
| 1007 |
+
return self._cmp(other) < 0
|
| 1008 |
+
return NotImplemented
|
| 1009 |
+
|
| 1010 |
+
def __ge__(self, other):
|
| 1011 |
+
if isinstance(other, date):
|
| 1012 |
+
return self._cmp(other) >= 0
|
| 1013 |
+
return NotImplemented
|
| 1014 |
+
|
| 1015 |
+
def __gt__(self, other):
|
| 1016 |
+
if isinstance(other, date):
|
| 1017 |
+
return self._cmp(other) > 0
|
| 1018 |
+
return NotImplemented
|
| 1019 |
+
|
| 1020 |
+
def _cmp(self, other):
|
| 1021 |
+
assert isinstance(other, date)
|
| 1022 |
+
y, m, d = self._year, self._month, self._day
|
| 1023 |
+
y2, m2, d2 = other._year, other._month, other._day
|
| 1024 |
+
return _cmp((y, m, d), (y2, m2, d2))
|
| 1025 |
+
|
| 1026 |
+
def __hash__(self):
|
| 1027 |
+
"Hash."
|
| 1028 |
+
if self._hashcode == -1:
|
| 1029 |
+
self._hashcode = hash(self._getstate())
|
| 1030 |
+
return self._hashcode
|
| 1031 |
+
|
| 1032 |
+
# Computations
|
| 1033 |
+
|
| 1034 |
+
def __add__(self, other):
|
| 1035 |
+
"Add a date to a timedelta."
|
| 1036 |
+
if isinstance(other, timedelta):
|
| 1037 |
+
o = self.toordinal() + other.days
|
| 1038 |
+
if 0 < o <= _MAXORDINAL:
|
| 1039 |
+
return type(self).fromordinal(o)
|
| 1040 |
+
raise OverflowError("result out of range")
|
| 1041 |
+
return NotImplemented
|
| 1042 |
+
|
| 1043 |
+
__radd__ = __add__
|
| 1044 |
+
|
| 1045 |
+
def __sub__(self, other):
|
| 1046 |
+
"""Subtract two dates, or a date and a timedelta."""
|
| 1047 |
+
if isinstance(other, timedelta):
|
| 1048 |
+
return self + timedelta(-other.days)
|
| 1049 |
+
if isinstance(other, date):
|
| 1050 |
+
days1 = self.toordinal()
|
| 1051 |
+
days2 = other.toordinal()
|
| 1052 |
+
return timedelta(days1 - days2)
|
| 1053 |
+
return NotImplemented
|
| 1054 |
+
|
| 1055 |
+
def weekday(self):
|
| 1056 |
+
"Return day of the week, where Monday == 0 ... Sunday == 6."
|
| 1057 |
+
return (self.toordinal() + 6) % 7
|
| 1058 |
+
|
| 1059 |
+
# Day-of-the-week and week-of-the-year, according to ISO
|
| 1060 |
+
|
| 1061 |
+
def isoweekday(self):
|
| 1062 |
+
"Return day of the week, where Monday == 1 ... Sunday == 7."
|
| 1063 |
+
# 1-Jan-0001 is a Monday
|
| 1064 |
+
return self.toordinal() % 7 or 7
|
| 1065 |
+
|
| 1066 |
+
def isocalendar(self):
|
| 1067 |
+
"""Return a named tuple containing ISO year, week number, and weekday.
|
| 1068 |
+
|
| 1069 |
+
The first ISO week of the year is the (Mon-Sun) week
|
| 1070 |
+
containing the year's first Thursday; everything else derives
|
| 1071 |
+
from that.
|
| 1072 |
+
|
| 1073 |
+
The first week is 1; Monday is 1 ... Sunday is 7.
|
| 1074 |
+
|
| 1075 |
+
ISO calendar algorithm taken from
|
| 1076 |
+
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
|
| 1077 |
+
(used with permission)
|
| 1078 |
+
"""
|
| 1079 |
+
year = self._year
|
| 1080 |
+
week1monday = _isoweek1monday(year)
|
| 1081 |
+
today = _ymd2ord(self._year, self._month, self._day)
|
| 1082 |
+
# Internally, week and day have origin 0
|
| 1083 |
+
week, day = divmod(today - week1monday, 7)
|
| 1084 |
+
if week < 0:
|
| 1085 |
+
year -= 1
|
| 1086 |
+
week1monday = _isoweek1monday(year)
|
| 1087 |
+
week, day = divmod(today - week1monday, 7)
|
| 1088 |
+
elif week >= 52:
|
| 1089 |
+
if today >= _isoweek1monday(year+1):
|
| 1090 |
+
year += 1
|
| 1091 |
+
week = 0
|
| 1092 |
+
return _IsoCalendarDate(year, week+1, day+1)
|
| 1093 |
+
|
| 1094 |
+
# Pickle support.
|
| 1095 |
+
|
| 1096 |
+
def _getstate(self):
|
| 1097 |
+
yhi, ylo = divmod(self._year, 256)
|
| 1098 |
+
return bytes([yhi, ylo, self._month, self._day]),
|
| 1099 |
+
|
| 1100 |
+
def __setstate(self, string):
|
| 1101 |
+
yhi, ylo, self._month, self._day = string
|
| 1102 |
+
self._year = yhi * 256 + ylo
|
| 1103 |
+
|
| 1104 |
+
def __reduce__(self):
|
| 1105 |
+
return (self.__class__, self._getstate())
|
| 1106 |
+
|
| 1107 |
+
_date_class = date # so functions w/ args named "date" can get at the class
|
| 1108 |
+
|
| 1109 |
+
date.min = date(1, 1, 1)
|
| 1110 |
+
date.max = date(9999, 12, 31)
|
| 1111 |
+
date.resolution = timedelta(days=1)
|
| 1112 |
+
|
| 1113 |
+
|
| 1114 |
+
class tzinfo:
|
| 1115 |
+
"""Abstract base class for time zone info classes.
|
| 1116 |
+
|
| 1117 |
+
Subclasses must override the name(), utcoffset() and dst() methods.
|
| 1118 |
+
"""
|
| 1119 |
+
__slots__ = ()
|
| 1120 |
+
|
| 1121 |
+
def tzname(self, dt):
|
| 1122 |
+
"datetime -> string name of time zone."
|
| 1123 |
+
raise NotImplementedError("tzinfo subclass must override tzname()")
|
| 1124 |
+
|
| 1125 |
+
def utcoffset(self, dt):
|
| 1126 |
+
"datetime -> timedelta, positive for east of UTC, negative for west of UTC"
|
| 1127 |
+
raise NotImplementedError("tzinfo subclass must override utcoffset()")
|
| 1128 |
+
|
| 1129 |
+
def dst(self, dt):
|
| 1130 |
+
"""datetime -> DST offset as timedelta, positive for east of UTC.
|
| 1131 |
+
|
| 1132 |
+
Return 0 if DST not in effect. utcoffset() must include the DST
|
| 1133 |
+
offset.
|
| 1134 |
+
"""
|
| 1135 |
+
raise NotImplementedError("tzinfo subclass must override dst()")
|
| 1136 |
+
|
| 1137 |
+
def fromutc(self, dt):
|
| 1138 |
+
"datetime in UTC -> datetime in local time."
|
| 1139 |
+
|
| 1140 |
+
if not isinstance(dt, datetime):
|
| 1141 |
+
raise TypeError("fromutc() requires a datetime argument")
|
| 1142 |
+
if dt.tzinfo is not self:
|
| 1143 |
+
raise ValueError("dt.tzinfo is not self")
|
| 1144 |
+
|
| 1145 |
+
dtoff = dt.utcoffset()
|
| 1146 |
+
if dtoff is None:
|
| 1147 |
+
raise ValueError("fromutc() requires a non-None utcoffset() "
|
| 1148 |
+
"result")
|
| 1149 |
+
|
| 1150 |
+
# See the long comment block at the end of this file for an
|
| 1151 |
+
# explanation of this algorithm.
|
| 1152 |
+
dtdst = dt.dst()
|
| 1153 |
+
if dtdst is None:
|
| 1154 |
+
raise ValueError("fromutc() requires a non-None dst() result")
|
| 1155 |
+
delta = dtoff - dtdst
|
| 1156 |
+
if delta:
|
| 1157 |
+
dt += delta
|
| 1158 |
+
dtdst = dt.dst()
|
| 1159 |
+
if dtdst is None:
|
| 1160 |
+
raise ValueError("fromutc(): dt.dst gave inconsistent "
|
| 1161 |
+
"results; cannot convert")
|
| 1162 |
+
return dt + dtdst
|
| 1163 |
+
|
| 1164 |
+
# Pickle support.
|
| 1165 |
+
|
| 1166 |
+
def __reduce__(self):
|
| 1167 |
+
getinitargs = getattr(self, "__getinitargs__", None)
|
| 1168 |
+
if getinitargs:
|
| 1169 |
+
args = getinitargs()
|
| 1170 |
+
else:
|
| 1171 |
+
args = ()
|
| 1172 |
+
getstate = getattr(self, "__getstate__", None)
|
| 1173 |
+
if getstate:
|
| 1174 |
+
state = getstate()
|
| 1175 |
+
else:
|
| 1176 |
+
state = getattr(self, "__dict__", None) or None
|
| 1177 |
+
if state is None:
|
| 1178 |
+
return (self.__class__, args)
|
| 1179 |
+
else:
|
| 1180 |
+
return (self.__class__, args, state)
|
| 1181 |
+
|
| 1182 |
+
|
| 1183 |
+
class IsoCalendarDate(tuple):
|
| 1184 |
+
|
| 1185 |
+
def __new__(cls, year, week, weekday, /):
|
| 1186 |
+
return super().__new__(cls, (year, week, weekday))
|
| 1187 |
+
|
| 1188 |
+
@property
|
| 1189 |
+
def year(self):
|
| 1190 |
+
return self[0]
|
| 1191 |
+
|
| 1192 |
+
@property
|
| 1193 |
+
def week(self):
|
| 1194 |
+
return self[1]
|
| 1195 |
+
|
| 1196 |
+
@property
|
| 1197 |
+
def weekday(self):
|
| 1198 |
+
return self[2]
|
| 1199 |
+
|
| 1200 |
+
def __reduce__(self):
|
| 1201 |
+
# This code is intended to pickle the object without making the
|
| 1202 |
+
# class public. See https://bugs.python.org/msg352381
|
| 1203 |
+
return (tuple, (tuple(self),))
|
| 1204 |
+
|
| 1205 |
+
def __repr__(self):
|
| 1206 |
+
return (f'{self.__class__.__name__}'
|
| 1207 |
+
f'(year={self[0]}, week={self[1]}, weekday={self[2]})')
|
| 1208 |
+
|
| 1209 |
+
|
| 1210 |
+
_IsoCalendarDate = IsoCalendarDate
|
| 1211 |
+
del IsoCalendarDate
|
| 1212 |
+
_tzinfo_class = tzinfo
|
| 1213 |
+
|
| 1214 |
+
class time:
|
| 1215 |
+
"""Time with time zone.
|
| 1216 |
+
|
| 1217 |
+
Constructors:
|
| 1218 |
+
|
| 1219 |
+
__new__()
|
| 1220 |
+
|
| 1221 |
+
Operators:
|
| 1222 |
+
|
| 1223 |
+
__repr__, __str__
|
| 1224 |
+
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
|
| 1225 |
+
|
| 1226 |
+
Methods:
|
| 1227 |
+
|
| 1228 |
+
strftime()
|
| 1229 |
+
isoformat()
|
| 1230 |
+
utcoffset()
|
| 1231 |
+
tzname()
|
| 1232 |
+
dst()
|
| 1233 |
+
|
| 1234 |
+
Properties (readonly):
|
| 1235 |
+
hour, minute, second, microsecond, tzinfo, fold
|
| 1236 |
+
"""
|
| 1237 |
+
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold'
|
| 1238 |
+
|
| 1239 |
+
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0):
|
| 1240 |
+
"""Constructor.
|
| 1241 |
+
|
| 1242 |
+
Arguments:
|
| 1243 |
+
|
| 1244 |
+
hour, minute (required)
|
| 1245 |
+
second, microsecond (default to zero)
|
| 1246 |
+
tzinfo (default to None)
|
| 1247 |
+
fold (keyword only, default to zero)
|
| 1248 |
+
"""
|
| 1249 |
+
if (isinstance(hour, (bytes, str)) and len(hour) == 6 and
|
| 1250 |
+
ord(hour[0:1])&0x7F < 24):
|
| 1251 |
+
# Pickle support
|
| 1252 |
+
if isinstance(hour, str):
|
| 1253 |
+
try:
|
| 1254 |
+
hour = hour.encode('latin1')
|
| 1255 |
+
except UnicodeEncodeError:
|
| 1256 |
+
# More informative error message.
|
| 1257 |
+
raise ValueError(
|
| 1258 |
+
"Failed to encode latin1 string when unpickling "
|
| 1259 |
+
"a time object. "
|
| 1260 |
+
"pickle.load(data, encoding='latin1') is assumed.")
|
| 1261 |
+
self = object.__new__(cls)
|
| 1262 |
+
self.__setstate(hour, minute or None)
|
| 1263 |
+
self._hashcode = -1
|
| 1264 |
+
return self
|
| 1265 |
+
hour, minute, second, microsecond, fold = _check_time_fields(
|
| 1266 |
+
hour, minute, second, microsecond, fold)
|
| 1267 |
+
_check_tzinfo_arg(tzinfo)
|
| 1268 |
+
self = object.__new__(cls)
|
| 1269 |
+
self._hour = hour
|
| 1270 |
+
self._minute = minute
|
| 1271 |
+
self._second = second
|
| 1272 |
+
self._microsecond = microsecond
|
| 1273 |
+
self._tzinfo = tzinfo
|
| 1274 |
+
self._hashcode = -1
|
| 1275 |
+
self._fold = fold
|
| 1276 |
+
return self
|
| 1277 |
+
|
| 1278 |
+
# Read-only field accessors
|
| 1279 |
+
@property
|
| 1280 |
+
def hour(self):
|
| 1281 |
+
"""hour (0-23)"""
|
| 1282 |
+
return self._hour
|
| 1283 |
+
|
| 1284 |
+
@property
|
| 1285 |
+
def minute(self):
|
| 1286 |
+
"""minute (0-59)"""
|
| 1287 |
+
return self._minute
|
| 1288 |
+
|
| 1289 |
+
@property
|
| 1290 |
+
def second(self):
|
| 1291 |
+
"""second (0-59)"""
|
| 1292 |
+
return self._second
|
| 1293 |
+
|
| 1294 |
+
@property
|
| 1295 |
+
def microsecond(self):
|
| 1296 |
+
"""microsecond (0-999999)"""
|
| 1297 |
+
return self._microsecond
|
| 1298 |
+
|
| 1299 |
+
@property
|
| 1300 |
+
def tzinfo(self):
|
| 1301 |
+
"""timezone info object"""
|
| 1302 |
+
return self._tzinfo
|
| 1303 |
+
|
| 1304 |
+
@property
|
| 1305 |
+
def fold(self):
|
| 1306 |
+
return self._fold
|
| 1307 |
+
|
| 1308 |
+
# Standard conversions, __hash__ (and helpers)
|
| 1309 |
+
|
| 1310 |
+
# Comparisons of time objects with other.
|
| 1311 |
+
|
| 1312 |
+
def __eq__(self, other):
|
| 1313 |
+
if isinstance(other, time):
|
| 1314 |
+
return self._cmp(other, allow_mixed=True) == 0
|
| 1315 |
+
else:
|
| 1316 |
+
return NotImplemented
|
| 1317 |
+
|
| 1318 |
+
def __le__(self, other):
|
| 1319 |
+
if isinstance(other, time):
|
| 1320 |
+
return self._cmp(other) <= 0
|
| 1321 |
+
else:
|
| 1322 |
+
return NotImplemented
|
| 1323 |
+
|
| 1324 |
+
def __lt__(self, other):
|
| 1325 |
+
if isinstance(other, time):
|
| 1326 |
+
return self._cmp(other) < 0
|
| 1327 |
+
else:
|
| 1328 |
+
return NotImplemented
|
| 1329 |
+
|
| 1330 |
+
def __ge__(self, other):
|
| 1331 |
+
if isinstance(other, time):
|
| 1332 |
+
return self._cmp(other) >= 0
|
| 1333 |
+
else:
|
| 1334 |
+
return NotImplemented
|
| 1335 |
+
|
| 1336 |
+
def __gt__(self, other):
|
| 1337 |
+
if isinstance(other, time):
|
| 1338 |
+
return self._cmp(other) > 0
|
| 1339 |
+
else:
|
| 1340 |
+
return NotImplemented
|
| 1341 |
+
|
| 1342 |
+
def _cmp(self, other, allow_mixed=False):
|
| 1343 |
+
assert isinstance(other, time)
|
| 1344 |
+
mytz = self._tzinfo
|
| 1345 |
+
ottz = other._tzinfo
|
| 1346 |
+
myoff = otoff = None
|
| 1347 |
+
|
| 1348 |
+
if mytz is ottz:
|
| 1349 |
+
base_compare = True
|
| 1350 |
+
else:
|
| 1351 |
+
myoff = self.utcoffset()
|
| 1352 |
+
otoff = other.utcoffset()
|
| 1353 |
+
base_compare = myoff == otoff
|
| 1354 |
+
|
| 1355 |
+
if base_compare:
|
| 1356 |
+
return _cmp((self._hour, self._minute, self._second,
|
| 1357 |
+
self._microsecond),
|
| 1358 |
+
(other._hour, other._minute, other._second,
|
| 1359 |
+
other._microsecond))
|
| 1360 |
+
if myoff is None or otoff is None:
|
| 1361 |
+
if allow_mixed:
|
| 1362 |
+
return 2 # arbitrary non-zero value
|
| 1363 |
+
else:
|
| 1364 |
+
raise TypeError("cannot compare naive and aware times")
|
| 1365 |
+
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
|
| 1366 |
+
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
|
| 1367 |
+
return _cmp((myhhmm, self._second, self._microsecond),
|
| 1368 |
+
(othhmm, other._second, other._microsecond))
|
| 1369 |
+
|
| 1370 |
+
def __hash__(self):
|
| 1371 |
+
"""Hash."""
|
| 1372 |
+
if self._hashcode == -1:
|
| 1373 |
+
if self.fold:
|
| 1374 |
+
t = self.replace(fold=0)
|
| 1375 |
+
else:
|
| 1376 |
+
t = self
|
| 1377 |
+
tzoff = t.utcoffset()
|
| 1378 |
+
if not tzoff: # zero or None
|
| 1379 |
+
self._hashcode = hash(t._getstate()[0])
|
| 1380 |
+
else:
|
| 1381 |
+
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
|
| 1382 |
+
timedelta(hours=1))
|
| 1383 |
+
assert not m % timedelta(minutes=1), "whole minute"
|
| 1384 |
+
m //= timedelta(minutes=1)
|
| 1385 |
+
if 0 <= h < 24:
|
| 1386 |
+
self._hashcode = hash(time(h, m, self.second, self.microsecond))
|
| 1387 |
+
else:
|
| 1388 |
+
self._hashcode = hash((h, m, self.second, self.microsecond))
|
| 1389 |
+
return self._hashcode
|
| 1390 |
+
|
| 1391 |
+
# Conversion to string
|
| 1392 |
+
|
| 1393 |
+
def _tzstr(self):
|
| 1394 |
+
"""Return formatted timezone offset (+xx:xx) or an empty string."""
|
| 1395 |
+
off = self.utcoffset()
|
| 1396 |
+
return _format_offset(off)
|
| 1397 |
+
|
| 1398 |
+
def __repr__(self):
|
| 1399 |
+
"""Convert to formal string, for repr()."""
|
| 1400 |
+
if self._microsecond != 0:
|
| 1401 |
+
s = ", %d, %d" % (self._second, self._microsecond)
|
| 1402 |
+
elif self._second != 0:
|
| 1403 |
+
s = ", %d" % self._second
|
| 1404 |
+
else:
|
| 1405 |
+
s = ""
|
| 1406 |
+
s= "%s.%s(%d, %d%s)" % (self.__class__.__module__,
|
| 1407 |
+
self.__class__.__qualname__,
|
| 1408 |
+
self._hour, self._minute, s)
|
| 1409 |
+
if self._tzinfo is not None:
|
| 1410 |
+
assert s[-1:] == ")"
|
| 1411 |
+
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
|
| 1412 |
+
if self._fold:
|
| 1413 |
+
assert s[-1:] == ")"
|
| 1414 |
+
s = s[:-1] + ", fold=1)"
|
| 1415 |
+
return s
|
| 1416 |
+
|
| 1417 |
+
def isoformat(self, timespec='auto'):
|
| 1418 |
+
"""Return the time formatted according to ISO.
|
| 1419 |
+
|
| 1420 |
+
The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional
|
| 1421 |
+
part is omitted if self.microsecond == 0.
|
| 1422 |
+
|
| 1423 |
+
The optional argument timespec specifies the number of additional
|
| 1424 |
+
terms of the time to include. Valid options are 'auto', 'hours',
|
| 1425 |
+
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
|
| 1426 |
+
"""
|
| 1427 |
+
s = _format_time(self._hour, self._minute, self._second,
|
| 1428 |
+
self._microsecond, timespec)
|
| 1429 |
+
tz = self._tzstr()
|
| 1430 |
+
if tz:
|
| 1431 |
+
s += tz
|
| 1432 |
+
return s
|
| 1433 |
+
|
| 1434 |
+
__str__ = isoformat
|
| 1435 |
+
|
| 1436 |
+
@classmethod
|
| 1437 |
+
def fromisoformat(cls, time_string):
|
| 1438 |
+
"""Construct a time from the output of isoformat()."""
|
| 1439 |
+
if not isinstance(time_string, str):
|
| 1440 |
+
raise TypeError('fromisoformat: argument must be str')
|
| 1441 |
+
|
| 1442 |
+
try:
|
| 1443 |
+
return cls(*_parse_isoformat_time(time_string))
|
| 1444 |
+
except Exception:
|
| 1445 |
+
raise ValueError(f'Invalid isoformat string: {time_string!r}')
|
| 1446 |
+
|
| 1447 |
+
|
| 1448 |
+
def strftime(self, fmt):
|
| 1449 |
+
"""Format using strftime(). The date part of the timestamp passed
|
| 1450 |
+
to underlying strftime should not be used.
|
| 1451 |
+
"""
|
| 1452 |
+
# The year must be >= 1000 else Python's strftime implementation
|
| 1453 |
+
# can raise a bogus exception.
|
| 1454 |
+
timetuple = (1900, 1, 1,
|
| 1455 |
+
self._hour, self._minute, self._second,
|
| 1456 |
+
0, 1, -1)
|
| 1457 |
+
return _wrap_strftime(self, fmt, timetuple)
|
| 1458 |
+
|
| 1459 |
+
def __format__(self, fmt):
|
| 1460 |
+
if not isinstance(fmt, str):
|
| 1461 |
+
raise TypeError("must be str, not %s" % type(fmt).__name__)
|
| 1462 |
+
if len(fmt) != 0:
|
| 1463 |
+
return self.strftime(fmt)
|
| 1464 |
+
return str(self)
|
| 1465 |
+
|
| 1466 |
+
# Timezone functions
|
| 1467 |
+
|
| 1468 |
+
def utcoffset(self):
|
| 1469 |
+
"""Return the timezone offset as timedelta, positive east of UTC
|
| 1470 |
+
(negative west of UTC)."""
|
| 1471 |
+
if self._tzinfo is None:
|
| 1472 |
+
return None
|
| 1473 |
+
offset = self._tzinfo.utcoffset(None)
|
| 1474 |
+
_check_utc_offset("utcoffset", offset)
|
| 1475 |
+
return offset
|
| 1476 |
+
|
| 1477 |
+
def tzname(self):
|
| 1478 |
+
"""Return the timezone name.
|
| 1479 |
+
|
| 1480 |
+
Note that the name is 100% informational -- there's no requirement that
|
| 1481 |
+
it mean anything in particular. For example, "GMT", "UTC", "-500",
|
| 1482 |
+
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
|
| 1483 |
+
"""
|
| 1484 |
+
if self._tzinfo is None:
|
| 1485 |
+
return None
|
| 1486 |
+
name = self._tzinfo.tzname(None)
|
| 1487 |
+
_check_tzname(name)
|
| 1488 |
+
return name
|
| 1489 |
+
|
| 1490 |
+
def dst(self):
|
| 1491 |
+
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
|
| 1492 |
+
positive eastward) if DST is in effect.
|
| 1493 |
+
|
| 1494 |
+
This is purely informational; the DST offset has already been added to
|
| 1495 |
+
the UTC offset returned by utcoffset() if applicable, so there's no
|
| 1496 |
+
need to consult dst() unless you're interested in displaying the DST
|
| 1497 |
+
info.
|
| 1498 |
+
"""
|
| 1499 |
+
if self._tzinfo is None:
|
| 1500 |
+
return None
|
| 1501 |
+
offset = self._tzinfo.dst(None)
|
| 1502 |
+
_check_utc_offset("dst", offset)
|
| 1503 |
+
return offset
|
| 1504 |
+
|
| 1505 |
+
def replace(self, hour=None, minute=None, second=None, microsecond=None,
|
| 1506 |
+
tzinfo=True, *, fold=None):
|
| 1507 |
+
"""Return a new time with new values for the specified fields."""
|
| 1508 |
+
if hour is None:
|
| 1509 |
+
hour = self.hour
|
| 1510 |
+
if minute is None:
|
| 1511 |
+
minute = self.minute
|
| 1512 |
+
if second is None:
|
| 1513 |
+
second = self.second
|
| 1514 |
+
if microsecond is None:
|
| 1515 |
+
microsecond = self.microsecond
|
| 1516 |
+
if tzinfo is True:
|
| 1517 |
+
tzinfo = self.tzinfo
|
| 1518 |
+
if fold is None:
|
| 1519 |
+
fold = self._fold
|
| 1520 |
+
return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold)
|
| 1521 |
+
|
| 1522 |
+
# Pickle support.
|
| 1523 |
+
|
| 1524 |
+
def _getstate(self, protocol=3):
|
| 1525 |
+
us2, us3 = divmod(self._microsecond, 256)
|
| 1526 |
+
us1, us2 = divmod(us2, 256)
|
| 1527 |
+
h = self._hour
|
| 1528 |
+
if self._fold and protocol > 3:
|
| 1529 |
+
h += 128
|
| 1530 |
+
basestate = bytes([h, self._minute, self._second,
|
| 1531 |
+
us1, us2, us3])
|
| 1532 |
+
if self._tzinfo is None:
|
| 1533 |
+
return (basestate,)
|
| 1534 |
+
else:
|
| 1535 |
+
return (basestate, self._tzinfo)
|
| 1536 |
+
|
| 1537 |
+
def __setstate(self, string, tzinfo):
|
| 1538 |
+
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
|
| 1539 |
+
raise TypeError("bad tzinfo state arg")
|
| 1540 |
+
h, self._minute, self._second, us1, us2, us3 = string
|
| 1541 |
+
if h > 127:
|
| 1542 |
+
self._fold = 1
|
| 1543 |
+
self._hour = h - 128
|
| 1544 |
+
else:
|
| 1545 |
+
self._fold = 0
|
| 1546 |
+
self._hour = h
|
| 1547 |
+
self._microsecond = (((us1 << 8) | us2) << 8) | us3
|
| 1548 |
+
self._tzinfo = tzinfo
|
| 1549 |
+
|
| 1550 |
+
def __reduce_ex__(self, protocol):
|
| 1551 |
+
return (self.__class__, self._getstate(protocol))
|
| 1552 |
+
|
| 1553 |
+
def __reduce__(self):
|
| 1554 |
+
return self.__reduce_ex__(2)
|
| 1555 |
+
|
| 1556 |
+
_time_class = time # so functions w/ args named "time" can get at the class
|
| 1557 |
+
|
| 1558 |
+
time.min = time(0, 0, 0)
|
| 1559 |
+
time.max = time(23, 59, 59, 999999)
|
| 1560 |
+
time.resolution = timedelta(microseconds=1)
|
| 1561 |
+
|
| 1562 |
+
|
| 1563 |
+
class datetime(date):
|
| 1564 |
+
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
|
| 1565 |
+
|
| 1566 |
+
The year, month and day arguments are required. tzinfo may be None, or an
|
| 1567 |
+
instance of a tzinfo subclass. The remaining arguments may be ints.
|
| 1568 |
+
"""
|
| 1569 |
+
__slots__ = date.__slots__ + time.__slots__
|
| 1570 |
+
|
| 1571 |
+
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
|
| 1572 |
+
microsecond=0, tzinfo=None, *, fold=0):
|
| 1573 |
+
if (isinstance(year, (bytes, str)) and len(year) == 10 and
|
| 1574 |
+
1 <= ord(year[2:3])&0x7F <= 12):
|
| 1575 |
+
# Pickle support
|
| 1576 |
+
if isinstance(year, str):
|
| 1577 |
+
try:
|
| 1578 |
+
year = bytes(year, 'latin1')
|
| 1579 |
+
except UnicodeEncodeError:
|
| 1580 |
+
# More informative error message.
|
| 1581 |
+
raise ValueError(
|
| 1582 |
+
"Failed to encode latin1 string when unpickling "
|
| 1583 |
+
"a datetime object. "
|
| 1584 |
+
"pickle.load(data, encoding='latin1') is assumed.")
|
| 1585 |
+
self = object.__new__(cls)
|
| 1586 |
+
self.__setstate(year, month)
|
| 1587 |
+
self._hashcode = -1
|
| 1588 |
+
return self
|
| 1589 |
+
year, month, day = _check_date_fields(year, month, day)
|
| 1590 |
+
hour, minute, second, microsecond, fold = _check_time_fields(
|
| 1591 |
+
hour, minute, second, microsecond, fold)
|
| 1592 |
+
_check_tzinfo_arg(tzinfo)
|
| 1593 |
+
self = object.__new__(cls)
|
| 1594 |
+
self._year = year
|
| 1595 |
+
self._month = month
|
| 1596 |
+
self._day = day
|
| 1597 |
+
self._hour = hour
|
| 1598 |
+
self._minute = minute
|
| 1599 |
+
self._second = second
|
| 1600 |
+
self._microsecond = microsecond
|
| 1601 |
+
self._tzinfo = tzinfo
|
| 1602 |
+
self._hashcode = -1
|
| 1603 |
+
self._fold = fold
|
| 1604 |
+
return self
|
| 1605 |
+
|
| 1606 |
+
# Read-only field accessors
|
| 1607 |
+
@property
|
| 1608 |
+
def hour(self):
|
| 1609 |
+
"""hour (0-23)"""
|
| 1610 |
+
return self._hour
|
| 1611 |
+
|
| 1612 |
+
@property
|
| 1613 |
+
def minute(self):
|
| 1614 |
+
"""minute (0-59)"""
|
| 1615 |
+
return self._minute
|
| 1616 |
+
|
| 1617 |
+
@property
|
| 1618 |
+
def second(self):
|
| 1619 |
+
"""second (0-59)"""
|
| 1620 |
+
return self._second
|
| 1621 |
+
|
| 1622 |
+
@property
|
| 1623 |
+
def microsecond(self):
|
| 1624 |
+
"""microsecond (0-999999)"""
|
| 1625 |
+
return self._microsecond
|
| 1626 |
+
|
| 1627 |
+
@property
|
| 1628 |
+
def tzinfo(self):
|
| 1629 |
+
"""timezone info object"""
|
| 1630 |
+
return self._tzinfo
|
| 1631 |
+
|
| 1632 |
+
@property
|
| 1633 |
+
def fold(self):
|
| 1634 |
+
return self._fold
|
| 1635 |
+
|
| 1636 |
+
@classmethod
|
| 1637 |
+
def _fromtimestamp(cls, t, utc, tz):
|
| 1638 |
+
"""Construct a datetime from a POSIX timestamp (like time.time()).
|
| 1639 |
+
|
| 1640 |
+
A timezone info object may be passed in as well.
|
| 1641 |
+
"""
|
| 1642 |
+
frac, t = _math.modf(t)
|
| 1643 |
+
us = round(frac * 1e6)
|
| 1644 |
+
if us >= 1000000:
|
| 1645 |
+
t += 1
|
| 1646 |
+
us -= 1000000
|
| 1647 |
+
elif us < 0:
|
| 1648 |
+
t -= 1
|
| 1649 |
+
us += 1000000
|
| 1650 |
+
|
| 1651 |
+
converter = _time.gmtime if utc else _time.localtime
|
| 1652 |
+
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
|
| 1653 |
+
ss = min(ss, 59) # clamp out leap seconds if the platform has them
|
| 1654 |
+
result = cls(y, m, d, hh, mm, ss, us, tz)
|
| 1655 |
+
if tz is None and not utc:
|
| 1656 |
+
# As of version 2015f max fold in IANA database is
|
| 1657 |
+
# 23 hours at 1969-09-30 13:00:00 in Kwajalein.
|
| 1658 |
+
# Let's probe 24 hours in the past to detect a transition:
|
| 1659 |
+
max_fold_seconds = 24 * 3600
|
| 1660 |
+
|
| 1661 |
+
# On Windows localtime_s throws an OSError for negative values,
|
| 1662 |
+
# thus we can't perform fold detection for values of time less
|
| 1663 |
+
# than the max time fold. See comments in _datetimemodule's
|
| 1664 |
+
# version of this method for more details.
|
| 1665 |
+
if t < max_fold_seconds and sys.platform.startswith("win"):
|
| 1666 |
+
return result
|
| 1667 |
+
|
| 1668 |
+
y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6]
|
| 1669 |
+
probe1 = cls(y, m, d, hh, mm, ss, us, tz)
|
| 1670 |
+
trans = result - probe1 - timedelta(0, max_fold_seconds)
|
| 1671 |
+
if trans.days < 0:
|
| 1672 |
+
y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6]
|
| 1673 |
+
probe2 = cls(y, m, d, hh, mm, ss, us, tz)
|
| 1674 |
+
if probe2 == result:
|
| 1675 |
+
result._fold = 1
|
| 1676 |
+
elif tz is not None:
|
| 1677 |
+
result = tz.fromutc(result)
|
| 1678 |
+
return result
|
| 1679 |
+
|
| 1680 |
+
@classmethod
|
| 1681 |
+
def fromtimestamp(cls, t, tz=None):
|
| 1682 |
+
"""Construct a datetime from a POSIX timestamp (like time.time()).
|
| 1683 |
+
|
| 1684 |
+
A timezone info object may be passed in as well.
|
| 1685 |
+
"""
|
| 1686 |
+
_check_tzinfo_arg(tz)
|
| 1687 |
+
|
| 1688 |
+
return cls._fromtimestamp(t, tz is not None, tz)
|
| 1689 |
+
|
| 1690 |
+
@classmethod
|
| 1691 |
+
def utcfromtimestamp(cls, t):
|
| 1692 |
+
"""Construct a naive UTC datetime from a POSIX timestamp."""
|
| 1693 |
+
return cls._fromtimestamp(t, True, None)
|
| 1694 |
+
|
| 1695 |
+
@classmethod
|
| 1696 |
+
def now(cls, tz=None):
|
| 1697 |
+
"Construct a datetime from time.time() and optional time zone info."
|
| 1698 |
+
t = _time.time()
|
| 1699 |
+
return cls.fromtimestamp(t, tz)
|
| 1700 |
+
|
| 1701 |
+
@classmethod
|
| 1702 |
+
def utcnow(cls):
|
| 1703 |
+
"Construct a UTC datetime from time.time()."
|
| 1704 |
+
t = _time.time()
|
| 1705 |
+
return cls.utcfromtimestamp(t)
|
| 1706 |
+
|
| 1707 |
+
@classmethod
|
| 1708 |
+
def combine(cls, date, time, tzinfo=True):
|
| 1709 |
+
"Construct a datetime from a given date and a given time."
|
| 1710 |
+
if not isinstance(date, _date_class):
|
| 1711 |
+
raise TypeError("date argument must be a date instance")
|
| 1712 |
+
if not isinstance(time, _time_class):
|
| 1713 |
+
raise TypeError("time argument must be a time instance")
|
| 1714 |
+
if tzinfo is True:
|
| 1715 |
+
tzinfo = time.tzinfo
|
| 1716 |
+
return cls(date.year, date.month, date.day,
|
| 1717 |
+
time.hour, time.minute, time.second, time.microsecond,
|
| 1718 |
+
tzinfo, fold=time.fold)
|
| 1719 |
+
|
| 1720 |
+
@classmethod
|
| 1721 |
+
def fromisoformat(cls, date_string):
|
| 1722 |
+
"""Construct a datetime from the output of datetime.isoformat()."""
|
| 1723 |
+
if not isinstance(date_string, str):
|
| 1724 |
+
raise TypeError('fromisoformat: argument must be str')
|
| 1725 |
+
|
| 1726 |
+
# Split this at the separator
|
| 1727 |
+
dstr = date_string[0:10]
|
| 1728 |
+
tstr = date_string[11:]
|
| 1729 |
+
|
| 1730 |
+
try:
|
| 1731 |
+
date_components = _parse_isoformat_date(dstr)
|
| 1732 |
+
except ValueError:
|
| 1733 |
+
raise ValueError(f'Invalid isoformat string: {date_string!r}')
|
| 1734 |
+
|
| 1735 |
+
if tstr:
|
| 1736 |
+
try:
|
| 1737 |
+
time_components = _parse_isoformat_time(tstr)
|
| 1738 |
+
except ValueError:
|
| 1739 |
+
raise ValueError(f'Invalid isoformat string: {date_string!r}')
|
| 1740 |
+
else:
|
| 1741 |
+
time_components = [0, 0, 0, 0, None]
|
| 1742 |
+
|
| 1743 |
+
return cls(*(date_components + time_components))
|
| 1744 |
+
|
| 1745 |
+
def timetuple(self):
|
| 1746 |
+
"Return local time tuple compatible with time.localtime()."
|
| 1747 |
+
dst = self.dst()
|
| 1748 |
+
if dst is None:
|
| 1749 |
+
dst = -1
|
| 1750 |
+
elif dst:
|
| 1751 |
+
dst = 1
|
| 1752 |
+
else:
|
| 1753 |
+
dst = 0
|
| 1754 |
+
return _build_struct_time(self.year, self.month, self.day,
|
| 1755 |
+
self.hour, self.minute, self.second,
|
| 1756 |
+
dst)
|
| 1757 |
+
|
| 1758 |
+
def _mktime(self):
|
| 1759 |
+
"""Return integer POSIX timestamp."""
|
| 1760 |
+
epoch = datetime(1970, 1, 1)
|
| 1761 |
+
max_fold_seconds = 24 * 3600
|
| 1762 |
+
t = (self - epoch) // timedelta(0, 1)
|
| 1763 |
+
def local(u):
|
| 1764 |
+
y, m, d, hh, mm, ss = _time.localtime(u)[:6]
|
| 1765 |
+
return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)
|
| 1766 |
+
|
| 1767 |
+
# Our goal is to solve t = local(u) for u.
|
| 1768 |
+
a = local(t) - t
|
| 1769 |
+
u1 = t - a
|
| 1770 |
+
t1 = local(u1)
|
| 1771 |
+
if t1 == t:
|
| 1772 |
+
# We found one solution, but it may not be the one we need.
|
| 1773 |
+
# Look for an earlier solution (if `fold` is 0), or a
|
| 1774 |
+
# later one (if `fold` is 1).
|
| 1775 |
+
u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]
|
| 1776 |
+
b = local(u2) - u2
|
| 1777 |
+
if a == b:
|
| 1778 |
+
return u1
|
| 1779 |
+
else:
|
| 1780 |
+
b = t1 - u1
|
| 1781 |
+
assert a != b
|
| 1782 |
+
u2 = t - b
|
| 1783 |
+
t2 = local(u2)
|
| 1784 |
+
if t2 == t:
|
| 1785 |
+
return u2
|
| 1786 |
+
if t1 == t:
|
| 1787 |
+
return u1
|
| 1788 |
+
# We have found both offsets a and b, but neither t - a nor t - b is
|
| 1789 |
+
# a solution. This means t is in the gap.
|
| 1790 |
+
return (max, min)[self.fold](u1, u2)
|
| 1791 |
+
|
| 1792 |
+
|
| 1793 |
+
def timestamp(self):
|
| 1794 |
+
"Return POSIX timestamp as float"
|
| 1795 |
+
if self._tzinfo is None:
|
| 1796 |
+
s = self._mktime()
|
| 1797 |
+
return s + self.microsecond / 1e6
|
| 1798 |
+
else:
|
| 1799 |
+
return (self - _EPOCH).total_seconds()
|
| 1800 |
+
|
| 1801 |
+
def utctimetuple(self):
|
| 1802 |
+
"Return UTC time tuple compatible with time.gmtime()."
|
| 1803 |
+
offset = self.utcoffset()
|
| 1804 |
+
if offset:
|
| 1805 |
+
self -= offset
|
| 1806 |
+
y, m, d = self.year, self.month, self.day
|
| 1807 |
+
hh, mm, ss = self.hour, self.minute, self.second
|
| 1808 |
+
return _build_struct_time(y, m, d, hh, mm, ss, 0)
|
| 1809 |
+
|
| 1810 |
+
def date(self):
|
| 1811 |
+
"Return the date part."
|
| 1812 |
+
return date(self._year, self._month, self._day)
|
| 1813 |
+
|
| 1814 |
+
def time(self):
|
| 1815 |
+
"Return the time part, with tzinfo None."
|
| 1816 |
+
return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold)
|
| 1817 |
+
|
| 1818 |
+
def timetz(self):
|
| 1819 |
+
"Return the time part, with same tzinfo."
|
| 1820 |
+
return time(self.hour, self.minute, self.second, self.microsecond,
|
| 1821 |
+
self._tzinfo, fold=self.fold)
|
| 1822 |
+
|
| 1823 |
+
def replace(self, year=None, month=None, day=None, hour=None,
|
| 1824 |
+
minute=None, second=None, microsecond=None, tzinfo=True,
|
| 1825 |
+
*, fold=None):
|
| 1826 |
+
"""Return a new datetime with new values for the specified fields."""
|
| 1827 |
+
if year is None:
|
| 1828 |
+
year = self.year
|
| 1829 |
+
if month is None:
|
| 1830 |
+
month = self.month
|
| 1831 |
+
if day is None:
|
| 1832 |
+
day = self.day
|
| 1833 |
+
if hour is None:
|
| 1834 |
+
hour = self.hour
|
| 1835 |
+
if minute is None:
|
| 1836 |
+
minute = self.minute
|
| 1837 |
+
if second is None:
|
| 1838 |
+
second = self.second
|
| 1839 |
+
if microsecond is None:
|
| 1840 |
+
microsecond = self.microsecond
|
| 1841 |
+
if tzinfo is True:
|
| 1842 |
+
tzinfo = self.tzinfo
|
| 1843 |
+
if fold is None:
|
| 1844 |
+
fold = self.fold
|
| 1845 |
+
return type(self)(year, month, day, hour, minute, second,
|
| 1846 |
+
microsecond, tzinfo, fold=fold)
|
| 1847 |
+
|
| 1848 |
+
def _local_timezone(self):
|
| 1849 |
+
if self.tzinfo is None:
|
| 1850 |
+
ts = self._mktime()
|
| 1851 |
+
else:
|
| 1852 |
+
ts = (self - _EPOCH) // timedelta(seconds=1)
|
| 1853 |
+
localtm = _time.localtime(ts)
|
| 1854 |
+
local = datetime(*localtm[:6])
|
| 1855 |
+
# Extract TZ data
|
| 1856 |
+
gmtoff = localtm.tm_gmtoff
|
| 1857 |
+
zone = localtm.tm_zone
|
| 1858 |
+
return timezone(timedelta(seconds=gmtoff), zone)
|
| 1859 |
+
|
| 1860 |
+
def astimezone(self, tz=None):
|
| 1861 |
+
if tz is None:
|
| 1862 |
+
tz = self._local_timezone()
|
| 1863 |
+
elif not isinstance(tz, tzinfo):
|
| 1864 |
+
raise TypeError("tz argument must be an instance of tzinfo")
|
| 1865 |
+
|
| 1866 |
+
mytz = self.tzinfo
|
| 1867 |
+
if mytz is None:
|
| 1868 |
+
mytz = self._local_timezone()
|
| 1869 |
+
myoffset = mytz.utcoffset(self)
|
| 1870 |
+
else:
|
| 1871 |
+
myoffset = mytz.utcoffset(self)
|
| 1872 |
+
if myoffset is None:
|
| 1873 |
+
mytz = self.replace(tzinfo=None)._local_timezone()
|
| 1874 |
+
myoffset = mytz.utcoffset(self)
|
| 1875 |
+
|
| 1876 |
+
if tz is mytz:
|
| 1877 |
+
return self
|
| 1878 |
+
|
| 1879 |
+
# Convert self to UTC, and attach the new time zone object.
|
| 1880 |
+
utc = (self - myoffset).replace(tzinfo=tz)
|
| 1881 |
+
|
| 1882 |
+
# Convert from UTC to tz's local time.
|
| 1883 |
+
return tz.fromutc(utc)
|
| 1884 |
+
|
| 1885 |
+
# Ways to produce a string.
|
| 1886 |
+
|
| 1887 |
+
def ctime(self):
|
| 1888 |
+
"Return ctime() style string."
|
| 1889 |
+
weekday = self.toordinal() % 7 or 7
|
| 1890 |
+
return "%s %s %2d %02d:%02d:%02d %04d" % (
|
| 1891 |
+
_DAYNAMES[weekday],
|
| 1892 |
+
_MONTHNAMES[self._month],
|
| 1893 |
+
self._day,
|
| 1894 |
+
self._hour, self._minute, self._second,
|
| 1895 |
+
self._year)
|
| 1896 |
+
|
| 1897 |
+
def isoformat(self, sep='T', timespec='auto'):
|
| 1898 |
+
"""Return the time formatted according to ISO.
|
| 1899 |
+
|
| 1900 |
+
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
|
| 1901 |
+
By default, the fractional part is omitted if self.microsecond == 0.
|
| 1902 |
+
|
| 1903 |
+
If self.tzinfo is not None, the UTC offset is also attached, giving
|
| 1904 |
+
giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
|
| 1905 |
+
|
| 1906 |
+
Optional argument sep specifies the separator between date and
|
| 1907 |
+
time, default 'T'.
|
| 1908 |
+
|
| 1909 |
+
The optional argument timespec specifies the number of additional
|
| 1910 |
+
terms of the time to include. Valid options are 'auto', 'hours',
|
| 1911 |
+
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
|
| 1912 |
+
"""
|
| 1913 |
+
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
|
| 1914 |
+
_format_time(self._hour, self._minute, self._second,
|
| 1915 |
+
self._microsecond, timespec))
|
| 1916 |
+
|
| 1917 |
+
off = self.utcoffset()
|
| 1918 |
+
tz = _format_offset(off)
|
| 1919 |
+
if tz:
|
| 1920 |
+
s += tz
|
| 1921 |
+
|
| 1922 |
+
return s
|
| 1923 |
+
|
| 1924 |
+
def __repr__(self):
|
| 1925 |
+
"""Convert to formal string, for repr()."""
|
| 1926 |
+
L = [self._year, self._month, self._day, # These are never zero
|
| 1927 |
+
self._hour, self._minute, self._second, self._microsecond]
|
| 1928 |
+
if L[-1] == 0:
|
| 1929 |
+
del L[-1]
|
| 1930 |
+
if L[-1] == 0:
|
| 1931 |
+
del L[-1]
|
| 1932 |
+
s = "%s.%s(%s)" % (self.__class__.__module__,
|
| 1933 |
+
self.__class__.__qualname__,
|
| 1934 |
+
", ".join(map(str, L)))
|
| 1935 |
+
if self._tzinfo is not None:
|
| 1936 |
+
assert s[-1:] == ")"
|
| 1937 |
+
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
|
| 1938 |
+
if self._fold:
|
| 1939 |
+
assert s[-1:] == ")"
|
| 1940 |
+
s = s[:-1] + ", fold=1)"
|
| 1941 |
+
return s
|
| 1942 |
+
|
| 1943 |
+
def __str__(self):
|
| 1944 |
+
"Convert to string, for str()."
|
| 1945 |
+
return self.isoformat(sep=' ')
|
| 1946 |
+
|
| 1947 |
+
@classmethod
|
| 1948 |
+
def strptime(cls, date_string, format):
|
| 1949 |
+
'string, format -> new datetime parsed from a string (like time.strptime()).'
|
| 1950 |
+
import _strptime
|
| 1951 |
+
return _strptime._strptime_datetime(cls, date_string, format)
|
| 1952 |
+
|
| 1953 |
+
def utcoffset(self):
|
| 1954 |
+
"""Return the timezone offset as timedelta positive east of UTC (negative west of
|
| 1955 |
+
UTC)."""
|
| 1956 |
+
if self._tzinfo is None:
|
| 1957 |
+
return None
|
| 1958 |
+
offset = self._tzinfo.utcoffset(self)
|
| 1959 |
+
_check_utc_offset("utcoffset", offset)
|
| 1960 |
+
return offset
|
| 1961 |
+
|
| 1962 |
+
def tzname(self):
|
| 1963 |
+
"""Return the timezone name.
|
| 1964 |
+
|
| 1965 |
+
Note that the name is 100% informational -- there's no requirement that
|
| 1966 |
+
it mean anything in particular. For example, "GMT", "UTC", "-500",
|
| 1967 |
+
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
|
| 1968 |
+
"""
|
| 1969 |
+
if self._tzinfo is None:
|
| 1970 |
+
return None
|
| 1971 |
+
name = self._tzinfo.tzname(self)
|
| 1972 |
+
_check_tzname(name)
|
| 1973 |
+
return name
|
| 1974 |
+
|
| 1975 |
+
def dst(self):
|
| 1976 |
+
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
|
| 1977 |
+
positive eastward) if DST is in effect.
|
| 1978 |
+
|
| 1979 |
+
This is purely informational; the DST offset has already been added to
|
| 1980 |
+
the UTC offset returned by utcoffset() if applicable, so there's no
|
| 1981 |
+
need to consult dst() unless you're interested in displaying the DST
|
| 1982 |
+
info.
|
| 1983 |
+
"""
|
| 1984 |
+
if self._tzinfo is None:
|
| 1985 |
+
return None
|
| 1986 |
+
offset = self._tzinfo.dst(self)
|
| 1987 |
+
_check_utc_offset("dst", offset)
|
| 1988 |
+
return offset
|
| 1989 |
+
|
| 1990 |
+
# Comparisons of datetime objects with other.
|
| 1991 |
+
|
| 1992 |
+
def __eq__(self, other):
|
| 1993 |
+
if isinstance(other, datetime):
|
| 1994 |
+
return self._cmp(other, allow_mixed=True) == 0
|
| 1995 |
+
elif not isinstance(other, date):
|
| 1996 |
+
return NotImplemented
|
| 1997 |
+
else:
|
| 1998 |
+
return False
|
| 1999 |
+
|
| 2000 |
+
def __le__(self, other):
|
| 2001 |
+
if isinstance(other, datetime):
|
| 2002 |
+
return self._cmp(other) <= 0
|
| 2003 |
+
elif not isinstance(other, date):
|
| 2004 |
+
return NotImplemented
|
| 2005 |
+
else:
|
| 2006 |
+
_cmperror(self, other)
|
| 2007 |
+
|
| 2008 |
+
def __lt__(self, other):
|
| 2009 |
+
if isinstance(other, datetime):
|
| 2010 |
+
return self._cmp(other) < 0
|
| 2011 |
+
elif not isinstance(other, date):
|
| 2012 |
+
return NotImplemented
|
| 2013 |
+
else:
|
| 2014 |
+
_cmperror(self, other)
|
| 2015 |
+
|
| 2016 |
+
def __ge__(self, other):
|
| 2017 |
+
if isinstance(other, datetime):
|
| 2018 |
+
return self._cmp(other) >= 0
|
| 2019 |
+
elif not isinstance(other, date):
|
| 2020 |
+
return NotImplemented
|
| 2021 |
+
else:
|
| 2022 |
+
_cmperror(self, other)
|
| 2023 |
+
|
| 2024 |
+
def __gt__(self, other):
|
| 2025 |
+
if isinstance(other, datetime):
|
| 2026 |
+
return self._cmp(other) > 0
|
| 2027 |
+
elif not isinstance(other, date):
|
| 2028 |
+
return NotImplemented
|
| 2029 |
+
else:
|
| 2030 |
+
_cmperror(self, other)
|
| 2031 |
+
|
| 2032 |
+
def _cmp(self, other, allow_mixed=False):
|
| 2033 |
+
assert isinstance(other, datetime)
|
| 2034 |
+
mytz = self._tzinfo
|
| 2035 |
+
ottz = other._tzinfo
|
| 2036 |
+
myoff = otoff = None
|
| 2037 |
+
|
| 2038 |
+
if mytz is ottz:
|
| 2039 |
+
base_compare = True
|
| 2040 |
+
else:
|
| 2041 |
+
myoff = self.utcoffset()
|
| 2042 |
+
otoff = other.utcoffset()
|
| 2043 |
+
# Assume that allow_mixed means that we are called from __eq__
|
| 2044 |
+
if allow_mixed:
|
| 2045 |
+
if myoff != self.replace(fold=not self.fold).utcoffset():
|
| 2046 |
+
return 2
|
| 2047 |
+
if otoff != other.replace(fold=not other.fold).utcoffset():
|
| 2048 |
+
return 2
|
| 2049 |
+
base_compare = myoff == otoff
|
| 2050 |
+
|
| 2051 |
+
if base_compare:
|
| 2052 |
+
return _cmp((self._year, self._month, self._day,
|
| 2053 |
+
self._hour, self._minute, self._second,
|
| 2054 |
+
self._microsecond),
|
| 2055 |
+
(other._year, other._month, other._day,
|
| 2056 |
+
other._hour, other._minute, other._second,
|
| 2057 |
+
other._microsecond))
|
| 2058 |
+
if myoff is None or otoff is None:
|
| 2059 |
+
if allow_mixed:
|
| 2060 |
+
return 2 # arbitrary non-zero value
|
| 2061 |
+
else:
|
| 2062 |
+
raise TypeError("cannot compare naive and aware datetimes")
|
| 2063 |
+
# XXX What follows could be done more efficiently...
|
| 2064 |
+
diff = self - other # this will take offsets into account
|
| 2065 |
+
if diff.days < 0:
|
| 2066 |
+
return -1
|
| 2067 |
+
return diff and 1 or 0
|
| 2068 |
+
|
| 2069 |
+
def __add__(self, other):
|
| 2070 |
+
"Add a datetime and a timedelta."
|
| 2071 |
+
if not isinstance(other, timedelta):
|
| 2072 |
+
return NotImplemented
|
| 2073 |
+
delta = timedelta(self.toordinal(),
|
| 2074 |
+
hours=self._hour,
|
| 2075 |
+
minutes=self._minute,
|
| 2076 |
+
seconds=self._second,
|
| 2077 |
+
microseconds=self._microsecond)
|
| 2078 |
+
delta += other
|
| 2079 |
+
hour, rem = divmod(delta.seconds, 3600)
|
| 2080 |
+
minute, second = divmod(rem, 60)
|
| 2081 |
+
if 0 < delta.days <= _MAXORDINAL:
|
| 2082 |
+
return type(self).combine(date.fromordinal(delta.days),
|
| 2083 |
+
time(hour, minute, second,
|
| 2084 |
+
delta.microseconds,
|
| 2085 |
+
tzinfo=self._tzinfo))
|
| 2086 |
+
raise OverflowError("result out of range")
|
| 2087 |
+
|
| 2088 |
+
__radd__ = __add__
|
| 2089 |
+
|
| 2090 |
+
def __sub__(self, other):
|
| 2091 |
+
"Subtract two datetimes, or a datetime and a timedelta."
|
| 2092 |
+
if not isinstance(other, datetime):
|
| 2093 |
+
if isinstance(other, timedelta):
|
| 2094 |
+
return self + -other
|
| 2095 |
+
return NotImplemented
|
| 2096 |
+
|
| 2097 |
+
days1 = self.toordinal()
|
| 2098 |
+
days2 = other.toordinal()
|
| 2099 |
+
secs1 = self._second + self._minute * 60 + self._hour * 3600
|
| 2100 |
+
secs2 = other._second + other._minute * 60 + other._hour * 3600
|
| 2101 |
+
base = timedelta(days1 - days2,
|
| 2102 |
+
secs1 - secs2,
|
| 2103 |
+
self._microsecond - other._microsecond)
|
| 2104 |
+
if self._tzinfo is other._tzinfo:
|
| 2105 |
+
return base
|
| 2106 |
+
myoff = self.utcoffset()
|
| 2107 |
+
otoff = other.utcoffset()
|
| 2108 |
+
if myoff == otoff:
|
| 2109 |
+
return base
|
| 2110 |
+
if myoff is None or otoff is None:
|
| 2111 |
+
raise TypeError("cannot mix naive and timezone-aware time")
|
| 2112 |
+
return base + otoff - myoff
|
| 2113 |
+
|
| 2114 |
+
def __hash__(self):
|
| 2115 |
+
if self._hashcode == -1:
|
| 2116 |
+
if self.fold:
|
| 2117 |
+
t = self.replace(fold=0)
|
| 2118 |
+
else:
|
| 2119 |
+
t = self
|
| 2120 |
+
tzoff = t.utcoffset()
|
| 2121 |
+
if tzoff is None:
|
| 2122 |
+
self._hashcode = hash(t._getstate()[0])
|
| 2123 |
+
else:
|
| 2124 |
+
days = _ymd2ord(self.year, self.month, self.day)
|
| 2125 |
+
seconds = self.hour * 3600 + self.minute * 60 + self.second
|
| 2126 |
+
self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
|
| 2127 |
+
return self._hashcode
|
| 2128 |
+
|
| 2129 |
+
# Pickle support.
|
| 2130 |
+
|
| 2131 |
+
def _getstate(self, protocol=3):
|
| 2132 |
+
yhi, ylo = divmod(self._year, 256)
|
| 2133 |
+
us2, us3 = divmod(self._microsecond, 256)
|
| 2134 |
+
us1, us2 = divmod(us2, 256)
|
| 2135 |
+
m = self._month
|
| 2136 |
+
if self._fold and protocol > 3:
|
| 2137 |
+
m += 128
|
| 2138 |
+
basestate = bytes([yhi, ylo, m, self._day,
|
| 2139 |
+
self._hour, self._minute, self._second,
|
| 2140 |
+
us1, us2, us3])
|
| 2141 |
+
if self._tzinfo is None:
|
| 2142 |
+
return (basestate,)
|
| 2143 |
+
else:
|
| 2144 |
+
return (basestate, self._tzinfo)
|
| 2145 |
+
|
| 2146 |
+
def __setstate(self, string, tzinfo):
|
| 2147 |
+
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
|
| 2148 |
+
raise TypeError("bad tzinfo state arg")
|
| 2149 |
+
(yhi, ylo, m, self._day, self._hour,
|
| 2150 |
+
self._minute, self._second, us1, us2, us3) = string
|
| 2151 |
+
if m > 127:
|
| 2152 |
+
self._fold = 1
|
| 2153 |
+
self._month = m - 128
|
| 2154 |
+
else:
|
| 2155 |
+
self._fold = 0
|
| 2156 |
+
self._month = m
|
| 2157 |
+
self._year = yhi * 256 + ylo
|
| 2158 |
+
self._microsecond = (((us1 << 8) | us2) << 8) | us3
|
| 2159 |
+
self._tzinfo = tzinfo
|
| 2160 |
+
|
| 2161 |
+
def __reduce_ex__(self, protocol):
|
| 2162 |
+
return (self.__class__, self._getstate(protocol))
|
| 2163 |
+
|
| 2164 |
+
def __reduce__(self):
|
| 2165 |
+
return self.__reduce_ex__(2)
|
| 2166 |
+
|
| 2167 |
+
|
| 2168 |
+
datetime.min = datetime(1, 1, 1)
|
| 2169 |
+
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
|
| 2170 |
+
datetime.resolution = timedelta(microseconds=1)
|
| 2171 |
+
|
| 2172 |
+
|
| 2173 |
+
def _isoweek1monday(year):
|
| 2174 |
+
# Helper to calculate the day number of the Monday starting week 1
|
| 2175 |
+
# XXX This could be done more efficiently
|
| 2176 |
+
THURSDAY = 3
|
| 2177 |
+
firstday = _ymd2ord(year, 1, 1)
|
| 2178 |
+
firstweekday = (firstday + 6) % 7 # See weekday() above
|
| 2179 |
+
week1monday = firstday - firstweekday
|
| 2180 |
+
if firstweekday > THURSDAY:
|
| 2181 |
+
week1monday += 7
|
| 2182 |
+
return week1monday
|
| 2183 |
+
|
| 2184 |
+
|
| 2185 |
+
class timezone(tzinfo):
|
| 2186 |
+
__slots__ = '_offset', '_name'
|
| 2187 |
+
|
| 2188 |
+
# Sentinel value to disallow None
|
| 2189 |
+
_Omitted = object()
|
| 2190 |
+
def __new__(cls, offset, name=_Omitted):
|
| 2191 |
+
if not isinstance(offset, timedelta):
|
| 2192 |
+
raise TypeError("offset must be a timedelta")
|
| 2193 |
+
if name is cls._Omitted:
|
| 2194 |
+
if not offset:
|
| 2195 |
+
return cls.utc
|
| 2196 |
+
name = None
|
| 2197 |
+
elif not isinstance(name, str):
|
| 2198 |
+
raise TypeError("name must be a string")
|
| 2199 |
+
if not cls._minoffset <= offset <= cls._maxoffset:
|
| 2200 |
+
raise ValueError("offset must be a timedelta "
|
| 2201 |
+
"strictly between -timedelta(hours=24) and "
|
| 2202 |
+
"timedelta(hours=24).")
|
| 2203 |
+
return cls._create(offset, name)
|
| 2204 |
+
|
| 2205 |
+
@classmethod
|
| 2206 |
+
def _create(cls, offset, name=None):
|
| 2207 |
+
self = tzinfo.__new__(cls)
|
| 2208 |
+
self._offset = offset
|
| 2209 |
+
self._name = name
|
| 2210 |
+
return self
|
| 2211 |
+
|
| 2212 |
+
def __getinitargs__(self):
|
| 2213 |
+
"""pickle support"""
|
| 2214 |
+
if self._name is None:
|
| 2215 |
+
return (self._offset,)
|
| 2216 |
+
return (self._offset, self._name)
|
| 2217 |
+
|
| 2218 |
+
def __eq__(self, other):
|
| 2219 |
+
if isinstance(other, timezone):
|
| 2220 |
+
return self._offset == other._offset
|
| 2221 |
+
return NotImplemented
|
| 2222 |
+
|
| 2223 |
+
def __hash__(self):
|
| 2224 |
+
return hash(self._offset)
|
| 2225 |
+
|
| 2226 |
+
def __repr__(self):
|
| 2227 |
+
"""Convert to formal string, for repr().
|
| 2228 |
+
|
| 2229 |
+
>>> tz = timezone.utc
|
| 2230 |
+
>>> repr(tz)
|
| 2231 |
+
'datetime.timezone.utc'
|
| 2232 |
+
>>> tz = timezone(timedelta(hours=-5), 'EST')
|
| 2233 |
+
>>> repr(tz)
|
| 2234 |
+
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
|
| 2235 |
+
"""
|
| 2236 |
+
if self is self.utc:
|
| 2237 |
+
return 'datetime.timezone.utc'
|
| 2238 |
+
if self._name is None:
|
| 2239 |
+
return "%s.%s(%r)" % (self.__class__.__module__,
|
| 2240 |
+
self.__class__.__qualname__,
|
| 2241 |
+
self._offset)
|
| 2242 |
+
return "%s.%s(%r, %r)" % (self.__class__.__module__,
|
| 2243 |
+
self.__class__.__qualname__,
|
| 2244 |
+
self._offset, self._name)
|
| 2245 |
+
|
| 2246 |
+
def __str__(self):
|
| 2247 |
+
return self.tzname(None)
|
| 2248 |
+
|
| 2249 |
+
def utcoffset(self, dt):
|
| 2250 |
+
if isinstance(dt, datetime) or dt is None:
|
| 2251 |
+
return self._offset
|
| 2252 |
+
raise TypeError("utcoffset() argument must be a datetime instance"
|
| 2253 |
+
" or None")
|
| 2254 |
+
|
| 2255 |
+
def tzname(self, dt):
|
| 2256 |
+
if isinstance(dt, datetime) or dt is None:
|
| 2257 |
+
if self._name is None:
|
| 2258 |
+
return self._name_from_offset(self._offset)
|
| 2259 |
+
return self._name
|
| 2260 |
+
raise TypeError("tzname() argument must be a datetime instance"
|
| 2261 |
+
" or None")
|
| 2262 |
+
|
| 2263 |
+
def dst(self, dt):
|
| 2264 |
+
if isinstance(dt, datetime) or dt is None:
|
| 2265 |
+
return None
|
| 2266 |
+
raise TypeError("dst() argument must be a datetime instance"
|
| 2267 |
+
" or None")
|
| 2268 |
+
|
| 2269 |
+
def fromutc(self, dt):
|
| 2270 |
+
if isinstance(dt, datetime):
|
| 2271 |
+
if dt.tzinfo is not self:
|
| 2272 |
+
raise ValueError("fromutc: dt.tzinfo "
|
| 2273 |
+
"is not self")
|
| 2274 |
+
return dt + self._offset
|
| 2275 |
+
raise TypeError("fromutc() argument must be a datetime instance"
|
| 2276 |
+
" or None")
|
| 2277 |
+
|
| 2278 |
+
_maxoffset = timedelta(hours=24, microseconds=-1)
|
| 2279 |
+
_minoffset = -_maxoffset
|
| 2280 |
+
|
| 2281 |
+
@staticmethod
|
| 2282 |
+
def _name_from_offset(delta):
|
| 2283 |
+
if not delta:
|
| 2284 |
+
return 'UTC'
|
| 2285 |
+
if delta < timedelta(0):
|
| 2286 |
+
sign = '-'
|
| 2287 |
+
delta = -delta
|
| 2288 |
+
else:
|
| 2289 |
+
sign = '+'
|
| 2290 |
+
hours, rest = divmod(delta, timedelta(hours=1))
|
| 2291 |
+
minutes, rest = divmod(rest, timedelta(minutes=1))
|
| 2292 |
+
seconds = rest.seconds
|
| 2293 |
+
microseconds = rest.microseconds
|
| 2294 |
+
if microseconds:
|
| 2295 |
+
return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
|
| 2296 |
+
f'.{microseconds:06d}')
|
| 2297 |
+
if seconds:
|
| 2298 |
+
return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
|
| 2299 |
+
return f'UTC{sign}{hours:02d}:{minutes:02d}'
|
| 2300 |
+
|
| 2301 |
+
timezone.utc = timezone._create(timedelta(0))
|
| 2302 |
+
# bpo-37642: These attributes are rounded to the nearest minute for backwards
|
| 2303 |
+
# compatibility, even though the constructor will accept a wider range of
|
| 2304 |
+
# values. This may change in the future.
|
| 2305 |
+
timezone.min = timezone._create(-timedelta(hours=23, minutes=59))
|
| 2306 |
+
timezone.max = timezone._create(timedelta(hours=23, minutes=59))
|
| 2307 |
+
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
|
| 2308 |
+
|
| 2309 |
+
# Some time zone algebra. For a datetime x, let
|
| 2310 |
+
# x.n = x stripped of its timezone -- its naive time.
|
| 2311 |
+
# x.o = x.utcoffset(), and assuming that doesn't raise an exception or
|
| 2312 |
+
# return None
|
| 2313 |
+
# x.d = x.dst(), and assuming that doesn't raise an exception or
|
| 2314 |
+
# return None
|
| 2315 |
+
# x.s = x's standard offset, x.o - x.d
|
| 2316 |
+
#
|
| 2317 |
+
# Now some derived rules, where k is a duration (timedelta).
|
| 2318 |
+
#
|
| 2319 |
+
# 1. x.o = x.s + x.d
|
| 2320 |
+
# This follows from the definition of x.s.
|
| 2321 |
+
#
|
| 2322 |
+
# 2. If x and y have the same tzinfo member, x.s = y.s.
|
| 2323 |
+
# This is actually a requirement, an assumption we need to make about
|
| 2324 |
+
# sane tzinfo classes.
|
| 2325 |
+
#
|
| 2326 |
+
# 3. The naive UTC time corresponding to x is x.n - x.o.
|
| 2327 |
+
# This is again a requirement for a sane tzinfo class.
|
| 2328 |
+
#
|
| 2329 |
+
# 4. (x+k).s = x.s
|
| 2330 |
+
# This follows from #2, and that datetime.timetz+timedelta preserves tzinfo.
|
| 2331 |
+
#
|
| 2332 |
+
# 5. (x+k).n = x.n + k
|
| 2333 |
+
# Again follows from how arithmetic is defined.
|
| 2334 |
+
#
|
| 2335 |
+
# Now we can explain tz.fromutc(x). Let's assume it's an interesting case
|
| 2336 |
+
# (meaning that the various tzinfo methods exist, and don't blow up or return
|
| 2337 |
+
# None when called).
|
| 2338 |
+
#
|
| 2339 |
+
# The function wants to return a datetime y with timezone tz, equivalent to x.
|
| 2340 |
+
# x is already in UTC.
|
| 2341 |
+
#
|
| 2342 |
+
# By #3, we want
|
| 2343 |
+
#
|
| 2344 |
+
# y.n - y.o = x.n [1]
|
| 2345 |
+
#
|
| 2346 |
+
# The algorithm starts by attaching tz to x.n, and calling that y. So
|
| 2347 |
+
# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
|
| 2348 |
+
# becomes true; in effect, we want to solve [2] for k:
|
| 2349 |
+
#
|
| 2350 |
+
# (y+k).n - (y+k).o = x.n [2]
|
| 2351 |
+
#
|
| 2352 |
+
# By #1, this is the same as
|
| 2353 |
+
#
|
| 2354 |
+
# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]
|
| 2355 |
+
#
|
| 2356 |
+
# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
|
| 2357 |
+
# Substituting that into [3],
|
| 2358 |
+
#
|
| 2359 |
+
# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
|
| 2360 |
+
# k - (y+k).s - (y+k).d = 0; rearranging,
|
| 2361 |
+
# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
|
| 2362 |
+
# k = y.s - (y+k).d
|
| 2363 |
+
#
|
| 2364 |
+
# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
|
| 2365 |
+
# approximate k by ignoring the (y+k).d term at first. Note that k can't be
|
| 2366 |
+
# very large, since all offset-returning methods return a duration of magnitude
|
| 2367 |
+
# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
|
| 2368 |
+
# be 0, so ignoring it has no consequence then.
|
| 2369 |
+
#
|
| 2370 |
+
# In any case, the new value is
|
| 2371 |
+
#
|
| 2372 |
+
# z = y + y.s [4]
|
| 2373 |
+
#
|
| 2374 |
+
# It's helpful to step back at look at [4] from a higher level: it's simply
|
| 2375 |
+
# mapping from UTC to tz's standard time.
|
| 2376 |
+
#
|
| 2377 |
+
# At this point, if
|
| 2378 |
+
#
|
| 2379 |
+
# z.n - z.o = x.n [5]
|
| 2380 |
+
#
|
| 2381 |
+
# we have an equivalent time, and are almost done. The insecurity here is
|
| 2382 |
+
# at the start of daylight time. Picture US Eastern for concreteness. The wall
|
| 2383 |
+
# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
|
| 2384 |
+
# sense then. The docs ask that an Eastern tzinfo class consider such a time to
|
| 2385 |
+
# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
|
| 2386 |
+
# on the day DST starts. We want to return the 1:MM EST spelling because that's
|
| 2387 |
+
# the only spelling that makes sense on the local wall clock.
|
| 2388 |
+
#
|
| 2389 |
+
# In fact, if [5] holds at this point, we do have the standard-time spelling,
|
| 2390 |
+
# but that takes a bit of proof. We first prove a stronger result. What's the
|
| 2391 |
+
# difference between the LHS and RHS of [5]? Let
|
| 2392 |
+
#
|
| 2393 |
+
# diff = x.n - (z.n - z.o) [6]
|
| 2394 |
+
#
|
| 2395 |
+
# Now
|
| 2396 |
+
# z.n = by [4]
|
| 2397 |
+
# (y + y.s).n = by #5
|
| 2398 |
+
# y.n + y.s = since y.n = x.n
|
| 2399 |
+
# x.n + y.s = since z and y are have the same tzinfo member,
|
| 2400 |
+
# y.s = z.s by #2
|
| 2401 |
+
# x.n + z.s
|
| 2402 |
+
#
|
| 2403 |
+
# Plugging that back into [6] gives
|
| 2404 |
+
#
|
| 2405 |
+
# diff =
|
| 2406 |
+
# x.n - ((x.n + z.s) - z.o) = expanding
|
| 2407 |
+
# x.n - x.n - z.s + z.o = cancelling
|
| 2408 |
+
# - z.s + z.o = by #2
|
| 2409 |
+
# z.d
|
| 2410 |
+
#
|
| 2411 |
+
# So diff = z.d.
|
| 2412 |
+
#
|
| 2413 |
+
# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
|
| 2414 |
+
# spelling we wanted in the endcase described above. We're done. Contrarily,
|
| 2415 |
+
# if z.d = 0, then we have a UTC equivalent, and are also done.
|
| 2416 |
+
#
|
| 2417 |
+
# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
|
| 2418 |
+
# add to z (in effect, z is in tz's standard time, and we need to shift the
|
| 2419 |
+
# local clock into tz's daylight time).
|
| 2420 |
+
#
|
| 2421 |
+
# Let
|
| 2422 |
+
#
|
| 2423 |
+
# z' = z + z.d = z + diff [7]
|
| 2424 |
+
#
|
| 2425 |
+
# and we can again ask whether
|
| 2426 |
+
#
|
| 2427 |
+
# z'.n - z'.o = x.n [8]
|
| 2428 |
+
#
|
| 2429 |
+
# If so, we're done. If not, the tzinfo class is insane, according to the
|
| 2430 |
+
# assumptions we've made. This also requires a bit of proof. As before, let's
|
| 2431 |
+
# compute the difference between the LHS and RHS of [8] (and skipping some of
|
| 2432 |
+
# the justifications for the kinds of substitutions we've done several times
|
| 2433 |
+
# already):
|
| 2434 |
+
#
|
| 2435 |
+
# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
|
| 2436 |
+
# x.n - (z.n + diff - z'.o) = replacing diff via [6]
|
| 2437 |
+
# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
|
| 2438 |
+
# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
|
| 2439 |
+
# - z.n + z.n - z.o + z'.o = cancel z.n
|
| 2440 |
+
# - z.o + z'.o = #1 twice
|
| 2441 |
+
# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
|
| 2442 |
+
# z'.d - z.d
|
| 2443 |
+
#
|
| 2444 |
+
# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
|
| 2445 |
+
# we've found the UTC-equivalent so are done. In fact, we stop with [7] and
|
| 2446 |
+
# return z', not bothering to compute z'.d.
|
| 2447 |
+
#
|
| 2448 |
+
# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
|
| 2449 |
+
# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
|
| 2450 |
+
# would have to change the result dst() returns: we start in DST, and moving
|
| 2451 |
+
# a little further into it takes us out of DST.
|
| 2452 |
+
#
|
| 2453 |
+
# There isn't a sane case where this can happen. The closest it gets is at
|
| 2454 |
+
# the end of DST, where there's an hour in UTC with no spelling in a hybrid
|
| 2455 |
+
# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
|
| 2456 |
+
# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
|
| 2457 |
+
# UTC) because the docs insist on that, but 0:MM is taken as being in daylight
|
| 2458 |
+
# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
|
| 2459 |
+
# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
|
| 2460 |
+
# standard time. Since that's what the local clock *does*, we want to map both
|
| 2461 |
+
# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
|
| 2462 |
+
# in local time, but so it goes -- it's the way the local clock works.
|
| 2463 |
+
#
|
| 2464 |
+
# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
|
| 2465 |
+
# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
|
| 2466 |
+
# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
|
| 2467 |
+
# (correctly) concludes that z' is not UTC-equivalent to x.
|
| 2468 |
+
#
|
| 2469 |
+
# Because we know z.d said z was in daylight time (else [5] would have held and
|
| 2470 |
+
# we would have stopped then), and we know z.d != z'.d (else [8] would have held
|
| 2471 |
+
# and we have stopped then), and there are only 2 possible values dst() can
|
| 2472 |
+
# return in Eastern, it follows that z'.d must be 0 (which it is in the example,
|
| 2473 |
+
# but the reasoning doesn't depend on the example -- it depends on there being
|
| 2474 |
+
# two possible dst() outcomes, one zero and the other non-zero). Therefore
|
| 2475 |
+
# z' must be in standard time, and is the spelling we want in this case.
|
| 2476 |
+
#
|
| 2477 |
+
# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
|
| 2478 |
+
# concerned (because it takes z' as being in standard time rather than the
|
| 2479 |
+
# daylight time we intend here), but returning it gives the real-life "local
|
| 2480 |
+
# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
|
| 2481 |
+
# tz.
|
| 2482 |
+
#
|
| 2483 |
+
# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
|
| 2484 |
+
# the 1:MM standard time spelling we want.
|
| 2485 |
+
#
|
| 2486 |
+
# So how can this break? One of the assumptions must be violated. Two
|
| 2487 |
+
# possibilities:
|
| 2488 |
+
#
|
| 2489 |
+
# 1) [2] effectively says that y.s is invariant across all y belong to a given
|
| 2490 |
+
# time zone. This isn't true if, for political reasons or continental drift,
|
| 2491 |
+
# a region decides to change its base offset from UTC.
|
| 2492 |
+
#
|
| 2493 |
+
# 2) There may be versions of "double daylight" time where the tail end of
|
| 2494 |
+
# the analysis gives up a step too early. I haven't thought about that
|
| 2495 |
+
# enough to say.
|
| 2496 |
+
#
|
| 2497 |
+
# In any case, it's clear that the default fromutc() is strong enough to handle
|
| 2498 |
+
# "almost all" time zones: so long as the standard offset is invariant, it
|
| 2499 |
+
# doesn't matter if daylight time transition points change from year to year, or
|
| 2500 |
+
# if daylight time is skipped in some years; it doesn't matter how large or
|
| 2501 |
+
# small dst() may get within its bounds; and it doesn't even matter if some
|
| 2502 |
+
# perverse time zone returns a negative dst()). So a breaking case must be
|
| 2503 |
+
# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
|
| 2504 |
+
|
| 2505 |
+
try:
|
| 2506 |
+
from _datetime import *
|
| 2507 |
+
except ImportError:
|
| 2508 |
+
pass
|
| 2509 |
+
else:
|
| 2510 |
+
# Clean up unused names
|
| 2511 |
+
del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
|
| 2512 |
+
_DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
|
| 2513 |
+
_check_date_fields, _check_time_fields,
|
| 2514 |
+
_check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
|
| 2515 |
+
_date_class, _days_before_month, _days_before_year, _days_in_month,
|
| 2516 |
+
_format_time, _format_offset, _index, _is_leap, _isoweek1monday, _math,
|
| 2517 |
+
_ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
|
| 2518 |
+
_divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
|
| 2519 |
+
_parse_hh_mm_ss_ff, _IsoCalendarDate)
|
| 2520 |
+
# XXX Since import * above excludes names that start with _,
|
| 2521 |
+
# docstring does not get overwritten. In the future, it may be
|
| 2522 |
+
# appropriate to maintain a single module level docstring and
|
| 2523 |
+
# remove the following line.
|
| 2524 |
+
from _datetime import __doc__
|
parrot/lib/python3.10/difflib.py
ADDED
|
@@ -0,0 +1,2056 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module difflib -- helpers for computing deltas between objects.
|
| 3 |
+
|
| 4 |
+
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
|
| 5 |
+
Use SequenceMatcher to return list of the best "good enough" matches.
|
| 6 |
+
|
| 7 |
+
Function context_diff(a, b):
|
| 8 |
+
For two lists of strings, return a delta in context diff format.
|
| 9 |
+
|
| 10 |
+
Function ndiff(a, b):
|
| 11 |
+
Return a delta: the difference between `a` and `b` (lists of strings).
|
| 12 |
+
|
| 13 |
+
Function restore(delta, which):
|
| 14 |
+
Return one of the two sequences that generated an ndiff delta.
|
| 15 |
+
|
| 16 |
+
Function unified_diff(a, b):
|
| 17 |
+
For two lists of strings, return a delta in unified diff format.
|
| 18 |
+
|
| 19 |
+
Class SequenceMatcher:
|
| 20 |
+
A flexible class for comparing pairs of sequences of any type.
|
| 21 |
+
|
| 22 |
+
Class Differ:
|
| 23 |
+
For producing human-readable deltas from sequences of lines of text.
|
| 24 |
+
|
| 25 |
+
Class HtmlDiff:
|
| 26 |
+
For producing HTML side by side comparison with change highlights.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
|
| 30 |
+
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
|
| 31 |
+
'unified_diff', 'diff_bytes', 'HtmlDiff', 'Match']
|
| 32 |
+
|
| 33 |
+
from heapq import nlargest as _nlargest
|
| 34 |
+
from collections import namedtuple as _namedtuple
|
| 35 |
+
from types import GenericAlias
|
| 36 |
+
|
| 37 |
+
Match = _namedtuple('Match', 'a b size')
|
| 38 |
+
|
| 39 |
+
def _calculate_ratio(matches, length):
|
| 40 |
+
if length:
|
| 41 |
+
return 2.0 * matches / length
|
| 42 |
+
return 1.0
|
| 43 |
+
|
| 44 |
+
class SequenceMatcher:
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
SequenceMatcher is a flexible class for comparing pairs of sequences of
|
| 48 |
+
any type, so long as the sequence elements are hashable. The basic
|
| 49 |
+
algorithm predates, and is a little fancier than, an algorithm
|
| 50 |
+
published in the late 1980's by Ratcliff and Obershelp under the
|
| 51 |
+
hyperbolic name "gestalt pattern matching". The basic idea is to find
|
| 52 |
+
the longest contiguous matching subsequence that contains no "junk"
|
| 53 |
+
elements (R-O doesn't address junk). The same idea is then applied
|
| 54 |
+
recursively to the pieces of the sequences to the left and to the right
|
| 55 |
+
of the matching subsequence. This does not yield minimal edit
|
| 56 |
+
sequences, but does tend to yield matches that "look right" to people.
|
| 57 |
+
|
| 58 |
+
SequenceMatcher tries to compute a "human-friendly diff" between two
|
| 59 |
+
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
| 60 |
+
longest *contiguous* & junk-free matching subsequence. That's what
|
| 61 |
+
catches peoples' eyes. The Windows(tm) windiff has another interesting
|
| 62 |
+
notion, pairing up elements that appear uniquely in each sequence.
|
| 63 |
+
That, and the method here, appear to yield more intuitive difference
|
| 64 |
+
reports than does diff. This method appears to be the least vulnerable
|
| 65 |
+
to syncing up on blocks of "junk lines", though (like blank lines in
|
| 66 |
+
ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
| 67 |
+
because this is the only method of the 3 that has a *concept* of
|
| 68 |
+
"junk" <wink>.
|
| 69 |
+
|
| 70 |
+
Example, comparing two strings, and considering blanks to be "junk":
|
| 71 |
+
|
| 72 |
+
>>> s = SequenceMatcher(lambda x: x == " ",
|
| 73 |
+
... "private Thread currentThread;",
|
| 74 |
+
... "private volatile Thread currentThread;")
|
| 75 |
+
>>>
|
| 76 |
+
|
| 77 |
+
.ratio() returns a float in [0, 1], measuring the "similarity" of the
|
| 78 |
+
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
|
| 79 |
+
sequences are close matches:
|
| 80 |
+
|
| 81 |
+
>>> print(round(s.ratio(), 3))
|
| 82 |
+
0.866
|
| 83 |
+
>>>
|
| 84 |
+
|
| 85 |
+
If you're only interested in where the sequences match,
|
| 86 |
+
.get_matching_blocks() is handy:
|
| 87 |
+
|
| 88 |
+
>>> for block in s.get_matching_blocks():
|
| 89 |
+
... print("a[%d] and b[%d] match for %d elements" % block)
|
| 90 |
+
a[0] and b[0] match for 8 elements
|
| 91 |
+
a[8] and b[17] match for 21 elements
|
| 92 |
+
a[29] and b[38] match for 0 elements
|
| 93 |
+
|
| 94 |
+
Note that the last tuple returned by .get_matching_blocks() is always a
|
| 95 |
+
dummy, (len(a), len(b), 0), and this is the only case in which the last
|
| 96 |
+
tuple element (number of elements matched) is 0.
|
| 97 |
+
|
| 98 |
+
If you want to know how to change the first sequence into the second,
|
| 99 |
+
use .get_opcodes():
|
| 100 |
+
|
| 101 |
+
>>> for opcode in s.get_opcodes():
|
| 102 |
+
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
|
| 103 |
+
equal a[0:8] b[0:8]
|
| 104 |
+
insert a[8:8] b[8:17]
|
| 105 |
+
equal a[8:29] b[17:38]
|
| 106 |
+
|
| 107 |
+
See the Differ class for a fancy human-friendly file differencer, which
|
| 108 |
+
uses SequenceMatcher both to compare sequences of lines, and to compare
|
| 109 |
+
sequences of characters within similar (near-matching) lines.
|
| 110 |
+
|
| 111 |
+
See also function get_close_matches() in this module, which shows how
|
| 112 |
+
simple code building on SequenceMatcher can be used to do useful work.
|
| 113 |
+
|
| 114 |
+
Timing: Basic R-O is cubic time worst case and quadratic time expected
|
| 115 |
+
case. SequenceMatcher is quadratic time for the worst case and has
|
| 116 |
+
expected-case behavior dependent in a complicated way on how many
|
| 117 |
+
elements the sequences have in common; best case time is linear.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
def __init__(self, isjunk=None, a='', b='', autojunk=True):
|
| 121 |
+
"""Construct a SequenceMatcher.
|
| 122 |
+
|
| 123 |
+
Optional arg isjunk is None (the default), or a one-argument
|
| 124 |
+
function that takes a sequence element and returns true iff the
|
| 125 |
+
element is junk. None is equivalent to passing "lambda x: 0", i.e.
|
| 126 |
+
no elements are considered to be junk. For example, pass
|
| 127 |
+
lambda x: x in " \\t"
|
| 128 |
+
if you're comparing lines as sequences of characters, and don't
|
| 129 |
+
want to synch up on blanks or hard tabs.
|
| 130 |
+
|
| 131 |
+
Optional arg a is the first of two sequences to be compared. By
|
| 132 |
+
default, an empty string. The elements of a must be hashable. See
|
| 133 |
+
also .set_seqs() and .set_seq1().
|
| 134 |
+
|
| 135 |
+
Optional arg b is the second of two sequences to be compared. By
|
| 136 |
+
default, an empty string. The elements of b must be hashable. See
|
| 137 |
+
also .set_seqs() and .set_seq2().
|
| 138 |
+
|
| 139 |
+
Optional arg autojunk should be set to False to disable the
|
| 140 |
+
"automatic junk heuristic" that treats popular elements as junk
|
| 141 |
+
(see module documentation for more information).
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
# Members:
|
| 145 |
+
# a
|
| 146 |
+
# first sequence
|
| 147 |
+
# b
|
| 148 |
+
# second sequence; differences are computed as "what do
|
| 149 |
+
# we need to do to 'a' to change it into 'b'?"
|
| 150 |
+
# b2j
|
| 151 |
+
# for x in b, b2j[x] is a list of the indices (into b)
|
| 152 |
+
# at which x appears; junk and popular elements do not appear
|
| 153 |
+
# fullbcount
|
| 154 |
+
# for x in b, fullbcount[x] == the number of times x
|
| 155 |
+
# appears in b; only materialized if really needed (used
|
| 156 |
+
# only for computing quick_ratio())
|
| 157 |
+
# matching_blocks
|
| 158 |
+
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
|
| 159 |
+
# ascending & non-overlapping in i and in j; terminated by
|
| 160 |
+
# a dummy (len(a), len(b), 0) sentinel
|
| 161 |
+
# opcodes
|
| 162 |
+
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
|
| 163 |
+
# one of
|
| 164 |
+
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
|
| 165 |
+
# 'delete' a[i1:i2] should be deleted
|
| 166 |
+
# 'insert' b[j1:j2] should be inserted
|
| 167 |
+
# 'equal' a[i1:i2] == b[j1:j2]
|
| 168 |
+
# isjunk
|
| 169 |
+
# a user-supplied function taking a sequence element and
|
| 170 |
+
# returning true iff the element is "junk" -- this has
|
| 171 |
+
# subtle but helpful effects on the algorithm, which I'll
|
| 172 |
+
# get around to writing up someday <0.9 wink>.
|
| 173 |
+
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
|
| 174 |
+
# bjunk
|
| 175 |
+
# the items in b for which isjunk is True.
|
| 176 |
+
# bpopular
|
| 177 |
+
# nonjunk items in b treated as junk by the heuristic (if used).
|
| 178 |
+
|
| 179 |
+
self.isjunk = isjunk
|
| 180 |
+
self.a = self.b = None
|
| 181 |
+
self.autojunk = autojunk
|
| 182 |
+
self.set_seqs(a, b)
|
| 183 |
+
|
| 184 |
+
def set_seqs(self, a, b):
|
| 185 |
+
"""Set the two sequences to be compared.
|
| 186 |
+
|
| 187 |
+
>>> s = SequenceMatcher()
|
| 188 |
+
>>> s.set_seqs("abcd", "bcde")
|
| 189 |
+
>>> s.ratio()
|
| 190 |
+
0.75
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
self.set_seq1(a)
|
| 194 |
+
self.set_seq2(b)
|
| 195 |
+
|
| 196 |
+
def set_seq1(self, a):
|
| 197 |
+
"""Set the first sequence to be compared.
|
| 198 |
+
|
| 199 |
+
The second sequence to be compared is not changed.
|
| 200 |
+
|
| 201 |
+
>>> s = SequenceMatcher(None, "abcd", "bcde")
|
| 202 |
+
>>> s.ratio()
|
| 203 |
+
0.75
|
| 204 |
+
>>> s.set_seq1("bcde")
|
| 205 |
+
>>> s.ratio()
|
| 206 |
+
1.0
|
| 207 |
+
>>>
|
| 208 |
+
|
| 209 |
+
SequenceMatcher computes and caches detailed information about the
|
| 210 |
+
second sequence, so if you want to compare one sequence S against
|
| 211 |
+
many sequences, use .set_seq2(S) once and call .set_seq1(x)
|
| 212 |
+
repeatedly for each of the other sequences.
|
| 213 |
+
|
| 214 |
+
See also set_seqs() and set_seq2().
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
if a is self.a:
|
| 218 |
+
return
|
| 219 |
+
self.a = a
|
| 220 |
+
self.matching_blocks = self.opcodes = None
|
| 221 |
+
|
| 222 |
+
def set_seq2(self, b):
|
| 223 |
+
"""Set the second sequence to be compared.
|
| 224 |
+
|
| 225 |
+
The first sequence to be compared is not changed.
|
| 226 |
+
|
| 227 |
+
>>> s = SequenceMatcher(None, "abcd", "bcde")
|
| 228 |
+
>>> s.ratio()
|
| 229 |
+
0.75
|
| 230 |
+
>>> s.set_seq2("abcd")
|
| 231 |
+
>>> s.ratio()
|
| 232 |
+
1.0
|
| 233 |
+
>>>
|
| 234 |
+
|
| 235 |
+
SequenceMatcher computes and caches detailed information about the
|
| 236 |
+
second sequence, so if you want to compare one sequence S against
|
| 237 |
+
many sequences, use .set_seq2(S) once and call .set_seq1(x)
|
| 238 |
+
repeatedly for each of the other sequences.
|
| 239 |
+
|
| 240 |
+
See also set_seqs() and set_seq1().
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
if b is self.b:
|
| 244 |
+
return
|
| 245 |
+
self.b = b
|
| 246 |
+
self.matching_blocks = self.opcodes = None
|
| 247 |
+
self.fullbcount = None
|
| 248 |
+
self.__chain_b()
|
| 249 |
+
|
| 250 |
+
# For each element x in b, set b2j[x] to a list of the indices in
|
| 251 |
+
# b where x appears; the indices are in increasing order; note that
|
| 252 |
+
# the number of times x appears in b is len(b2j[x]) ...
|
| 253 |
+
# when self.isjunk is defined, junk elements don't show up in this
|
| 254 |
+
# map at all, which stops the central find_longest_match method
|
| 255 |
+
# from starting any matching block at a junk element ...
|
| 256 |
+
# b2j also does not contain entries for "popular" elements, meaning
|
| 257 |
+
# elements that account for more than 1 + 1% of the total elements, and
|
| 258 |
+
# when the sequence is reasonably large (>= 200 elements); this can
|
| 259 |
+
# be viewed as an adaptive notion of semi-junk, and yields an enormous
|
| 260 |
+
# speedup when, e.g., comparing program files with hundreds of
|
| 261 |
+
# instances of "return NULL;" ...
|
| 262 |
+
# note that this is only called when b changes; so for cross-product
|
| 263 |
+
# kinds of matches, it's best to call set_seq2 once, then set_seq1
|
| 264 |
+
# repeatedly
|
| 265 |
+
|
| 266 |
+
def __chain_b(self):
|
| 267 |
+
# Because isjunk is a user-defined (not C) function, and we test
|
| 268 |
+
# for junk a LOT, it's important to minimize the number of calls.
|
| 269 |
+
# Before the tricks described here, __chain_b was by far the most
|
| 270 |
+
# time-consuming routine in the whole module! If anyone sees
|
| 271 |
+
# Jim Roskind, thank him again for profile.py -- I never would
|
| 272 |
+
# have guessed that.
|
| 273 |
+
# The first trick is to build b2j ignoring the possibility
|
| 274 |
+
# of junk. I.e., we don't call isjunk at all yet. Throwing
|
| 275 |
+
# out the junk later is much cheaper than building b2j "right"
|
| 276 |
+
# from the start.
|
| 277 |
+
b = self.b
|
| 278 |
+
self.b2j = b2j = {}
|
| 279 |
+
|
| 280 |
+
for i, elt in enumerate(b):
|
| 281 |
+
indices = b2j.setdefault(elt, [])
|
| 282 |
+
indices.append(i)
|
| 283 |
+
|
| 284 |
+
# Purge junk elements
|
| 285 |
+
self.bjunk = junk = set()
|
| 286 |
+
isjunk = self.isjunk
|
| 287 |
+
if isjunk:
|
| 288 |
+
for elt in b2j.keys():
|
| 289 |
+
if isjunk(elt):
|
| 290 |
+
junk.add(elt)
|
| 291 |
+
for elt in junk: # separate loop avoids separate list of keys
|
| 292 |
+
del b2j[elt]
|
| 293 |
+
|
| 294 |
+
# Purge popular elements that are not junk
|
| 295 |
+
self.bpopular = popular = set()
|
| 296 |
+
n = len(b)
|
| 297 |
+
if self.autojunk and n >= 200:
|
| 298 |
+
ntest = n // 100 + 1
|
| 299 |
+
for elt, idxs in b2j.items():
|
| 300 |
+
if len(idxs) > ntest:
|
| 301 |
+
popular.add(elt)
|
| 302 |
+
for elt in popular: # ditto; as fast for 1% deletion
|
| 303 |
+
del b2j[elt]
|
| 304 |
+
|
| 305 |
+
def find_longest_match(self, alo=0, ahi=None, blo=0, bhi=None):
|
| 306 |
+
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
| 307 |
+
|
| 308 |
+
By default it will find the longest match in the entirety of a and b.
|
| 309 |
+
|
| 310 |
+
If isjunk is not defined:
|
| 311 |
+
|
| 312 |
+
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
| 313 |
+
alo <= i <= i+k <= ahi
|
| 314 |
+
blo <= j <= j+k <= bhi
|
| 315 |
+
and for all (i',j',k') meeting those conditions,
|
| 316 |
+
k >= k'
|
| 317 |
+
i <= i'
|
| 318 |
+
and if i == i', j <= j'
|
| 319 |
+
|
| 320 |
+
In other words, of all maximal matching blocks, return one that
|
| 321 |
+
starts earliest in a, and of all those maximal matching blocks that
|
| 322 |
+
start earliest in a, return the one that starts earliest in b.
|
| 323 |
+
|
| 324 |
+
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
|
| 325 |
+
>>> s.find_longest_match(0, 5, 0, 9)
|
| 326 |
+
Match(a=0, b=4, size=5)
|
| 327 |
+
|
| 328 |
+
If isjunk is defined, first the longest matching block is
|
| 329 |
+
determined as above, but with the additional restriction that no
|
| 330 |
+
junk element appears in the block. Then that block is extended as
|
| 331 |
+
far as possible by matching (only) junk elements on both sides. So
|
| 332 |
+
the resulting block never matches on junk except as identical junk
|
| 333 |
+
happens to be adjacent to an "interesting" match.
|
| 334 |
+
|
| 335 |
+
Here's the same example as before, but considering blanks to be
|
| 336 |
+
junk. That prevents " abcd" from matching the " abcd" at the tail
|
| 337 |
+
end of the second sequence directly. Instead only the "abcd" can
|
| 338 |
+
match, and matches the leftmost "abcd" in the second sequence:
|
| 339 |
+
|
| 340 |
+
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
|
| 341 |
+
>>> s.find_longest_match(0, 5, 0, 9)
|
| 342 |
+
Match(a=1, b=0, size=4)
|
| 343 |
+
|
| 344 |
+
If no blocks match, return (alo, blo, 0).
|
| 345 |
+
|
| 346 |
+
>>> s = SequenceMatcher(None, "ab", "c")
|
| 347 |
+
>>> s.find_longest_match(0, 2, 0, 1)
|
| 348 |
+
Match(a=0, b=0, size=0)
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
# CAUTION: stripping common prefix or suffix would be incorrect.
|
| 352 |
+
# E.g.,
|
| 353 |
+
# ab
|
| 354 |
+
# acab
|
| 355 |
+
# Longest matching block is "ab", but if common prefix is
|
| 356 |
+
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
| 357 |
+
# strip, so ends up claiming that ab is changed to acab by
|
| 358 |
+
# inserting "ca" in the middle. That's minimal but unintuitive:
|
| 359 |
+
# "it's obvious" that someone inserted "ac" at the front.
|
| 360 |
+
# Windiff ends up at the same place as diff, but by pairing up
|
| 361 |
+
# the unique 'b's and then matching the first two 'a's.
|
| 362 |
+
|
| 363 |
+
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
|
| 364 |
+
if ahi is None:
|
| 365 |
+
ahi = len(a)
|
| 366 |
+
if bhi is None:
|
| 367 |
+
bhi = len(b)
|
| 368 |
+
besti, bestj, bestsize = alo, blo, 0
|
| 369 |
+
# find longest junk-free match
|
| 370 |
+
# during an iteration of the loop, j2len[j] = length of longest
|
| 371 |
+
# junk-free match ending with a[i-1] and b[j]
|
| 372 |
+
j2len = {}
|
| 373 |
+
nothing = []
|
| 374 |
+
for i in range(alo, ahi):
|
| 375 |
+
# look at all instances of a[i] in b; note that because
|
| 376 |
+
# b2j has no junk keys, the loop is skipped if a[i] is junk
|
| 377 |
+
j2lenget = j2len.get
|
| 378 |
+
newj2len = {}
|
| 379 |
+
for j in b2j.get(a[i], nothing):
|
| 380 |
+
# a[i] matches b[j]
|
| 381 |
+
if j < blo:
|
| 382 |
+
continue
|
| 383 |
+
if j >= bhi:
|
| 384 |
+
break
|
| 385 |
+
k = newj2len[j] = j2lenget(j-1, 0) + 1
|
| 386 |
+
if k > bestsize:
|
| 387 |
+
besti, bestj, bestsize = i-k+1, j-k+1, k
|
| 388 |
+
j2len = newj2len
|
| 389 |
+
|
| 390 |
+
# Extend the best by non-junk elements on each end. In particular,
|
| 391 |
+
# "popular" non-junk elements aren't in b2j, which greatly speeds
|
| 392 |
+
# the inner loop above, but also means "the best" match so far
|
| 393 |
+
# doesn't contain any junk *or* popular non-junk elements.
|
| 394 |
+
while besti > alo and bestj > blo and \
|
| 395 |
+
not isbjunk(b[bestj-1]) and \
|
| 396 |
+
a[besti-1] == b[bestj-1]:
|
| 397 |
+
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
| 398 |
+
while besti+bestsize < ahi and bestj+bestsize < bhi and \
|
| 399 |
+
not isbjunk(b[bestj+bestsize]) and \
|
| 400 |
+
a[besti+bestsize] == b[bestj+bestsize]:
|
| 401 |
+
bestsize += 1
|
| 402 |
+
|
| 403 |
+
# Now that we have a wholly interesting match (albeit possibly
|
| 404 |
+
# empty!), we may as well suck up the matching junk on each
|
| 405 |
+
# side of it too. Can't think of a good reason not to, and it
|
| 406 |
+
# saves post-processing the (possibly considerable) expense of
|
| 407 |
+
# figuring out what to do with it. In the case of an empty
|
| 408 |
+
# interesting match, this is clearly the right thing to do,
|
| 409 |
+
# because no other kind of match is possible in the regions.
|
| 410 |
+
while besti > alo and bestj > blo and \
|
| 411 |
+
isbjunk(b[bestj-1]) and \
|
| 412 |
+
a[besti-1] == b[bestj-1]:
|
| 413 |
+
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
| 414 |
+
while besti+bestsize < ahi and bestj+bestsize < bhi and \
|
| 415 |
+
isbjunk(b[bestj+bestsize]) and \
|
| 416 |
+
a[besti+bestsize] == b[bestj+bestsize]:
|
| 417 |
+
bestsize = bestsize + 1
|
| 418 |
+
|
| 419 |
+
return Match(besti, bestj, bestsize)
|
| 420 |
+
|
| 421 |
+
def get_matching_blocks(self):
|
| 422 |
+
"""Return list of triples describing matching subsequences.
|
| 423 |
+
|
| 424 |
+
Each triple is of the form (i, j, n), and means that
|
| 425 |
+
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
| 426 |
+
i and in j. New in Python 2.5, it's also guaranteed that if
|
| 427 |
+
(i, j, n) and (i', j', n') are adjacent triples in the list, and
|
| 428 |
+
the second is not the last triple in the list, then i+n != i' or
|
| 429 |
+
j+n != j'. IOW, adjacent triples never describe adjacent equal
|
| 430 |
+
blocks.
|
| 431 |
+
|
| 432 |
+
The last triple is a dummy, (len(a), len(b), 0), and is the only
|
| 433 |
+
triple with n==0.
|
| 434 |
+
|
| 435 |
+
>>> s = SequenceMatcher(None, "abxcd", "abcd")
|
| 436 |
+
>>> list(s.get_matching_blocks())
|
| 437 |
+
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
|
| 438 |
+
"""
|
| 439 |
+
|
| 440 |
+
if self.matching_blocks is not None:
|
| 441 |
+
return self.matching_blocks
|
| 442 |
+
la, lb = len(self.a), len(self.b)
|
| 443 |
+
|
| 444 |
+
# This is most naturally expressed as a recursive algorithm, but
|
| 445 |
+
# at least one user bumped into extreme use cases that exceeded
|
| 446 |
+
# the recursion limit on their box. So, now we maintain a list
|
| 447 |
+
# ('queue`) of blocks we still need to look at, and append partial
|
| 448 |
+
# results to `matching_blocks` in a loop; the matches are sorted
|
| 449 |
+
# at the end.
|
| 450 |
+
queue = [(0, la, 0, lb)]
|
| 451 |
+
matching_blocks = []
|
| 452 |
+
while queue:
|
| 453 |
+
alo, ahi, blo, bhi = queue.pop()
|
| 454 |
+
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
|
| 455 |
+
# a[alo:i] vs b[blo:j] unknown
|
| 456 |
+
# a[i:i+k] same as b[j:j+k]
|
| 457 |
+
# a[i+k:ahi] vs b[j+k:bhi] unknown
|
| 458 |
+
if k: # if k is 0, there was no matching block
|
| 459 |
+
matching_blocks.append(x)
|
| 460 |
+
if alo < i and blo < j:
|
| 461 |
+
queue.append((alo, i, blo, j))
|
| 462 |
+
if i+k < ahi and j+k < bhi:
|
| 463 |
+
queue.append((i+k, ahi, j+k, bhi))
|
| 464 |
+
matching_blocks.sort()
|
| 465 |
+
|
| 466 |
+
# It's possible that we have adjacent equal blocks in the
|
| 467 |
+
# matching_blocks list now. Starting with 2.5, this code was added
|
| 468 |
+
# to collapse them.
|
| 469 |
+
i1 = j1 = k1 = 0
|
| 470 |
+
non_adjacent = []
|
| 471 |
+
for i2, j2, k2 in matching_blocks:
|
| 472 |
+
# Is this block adjacent to i1, j1, k1?
|
| 473 |
+
if i1 + k1 == i2 and j1 + k1 == j2:
|
| 474 |
+
# Yes, so collapse them -- this just increases the length of
|
| 475 |
+
# the first block by the length of the second, and the first
|
| 476 |
+
# block so lengthened remains the block to compare against.
|
| 477 |
+
k1 += k2
|
| 478 |
+
else:
|
| 479 |
+
# Not adjacent. Remember the first block (k1==0 means it's
|
| 480 |
+
# the dummy we started with), and make the second block the
|
| 481 |
+
# new block to compare against.
|
| 482 |
+
if k1:
|
| 483 |
+
non_adjacent.append((i1, j1, k1))
|
| 484 |
+
i1, j1, k1 = i2, j2, k2
|
| 485 |
+
if k1:
|
| 486 |
+
non_adjacent.append((i1, j1, k1))
|
| 487 |
+
|
| 488 |
+
non_adjacent.append( (la, lb, 0) )
|
| 489 |
+
self.matching_blocks = list(map(Match._make, non_adjacent))
|
| 490 |
+
return self.matching_blocks
|
| 491 |
+
|
| 492 |
+
def get_opcodes(self):
|
| 493 |
+
"""Return list of 5-tuples describing how to turn a into b.
|
| 494 |
+
|
| 495 |
+
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
| 496 |
+
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
| 497 |
+
tuple preceding it, and likewise for j1 == the previous j2.
|
| 498 |
+
|
| 499 |
+
The tags are strings, with these meanings:
|
| 500 |
+
|
| 501 |
+
'replace': a[i1:i2] should be replaced by b[j1:j2]
|
| 502 |
+
'delete': a[i1:i2] should be deleted.
|
| 503 |
+
Note that j1==j2 in this case.
|
| 504 |
+
'insert': b[j1:j2] should be inserted at a[i1:i1].
|
| 505 |
+
Note that i1==i2 in this case.
|
| 506 |
+
'equal': a[i1:i2] == b[j1:j2]
|
| 507 |
+
|
| 508 |
+
>>> a = "qabxcd"
|
| 509 |
+
>>> b = "abycdf"
|
| 510 |
+
>>> s = SequenceMatcher(None, a, b)
|
| 511 |
+
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
|
| 512 |
+
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
|
| 513 |
+
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
|
| 514 |
+
delete a[0:1] (q) b[0:0] ()
|
| 515 |
+
equal a[1:3] (ab) b[0:2] (ab)
|
| 516 |
+
replace a[3:4] (x) b[2:3] (y)
|
| 517 |
+
equal a[4:6] (cd) b[3:5] (cd)
|
| 518 |
+
insert a[6:6] () b[5:6] (f)
|
| 519 |
+
"""
|
| 520 |
+
|
| 521 |
+
if self.opcodes is not None:
|
| 522 |
+
return self.opcodes
|
| 523 |
+
i = j = 0
|
| 524 |
+
self.opcodes = answer = []
|
| 525 |
+
for ai, bj, size in self.get_matching_blocks():
|
| 526 |
+
# invariant: we've pumped out correct diffs to change
|
| 527 |
+
# a[:i] into b[:j], and the next matching block is
|
| 528 |
+
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
| 529 |
+
# out a diff to change a[i:ai] into b[j:bj], pump out
|
| 530 |
+
# the matching block, and move (i,j) beyond the match
|
| 531 |
+
tag = ''
|
| 532 |
+
if i < ai and j < bj:
|
| 533 |
+
tag = 'replace'
|
| 534 |
+
elif i < ai:
|
| 535 |
+
tag = 'delete'
|
| 536 |
+
elif j < bj:
|
| 537 |
+
tag = 'insert'
|
| 538 |
+
if tag:
|
| 539 |
+
answer.append( (tag, i, ai, j, bj) )
|
| 540 |
+
i, j = ai+size, bj+size
|
| 541 |
+
# the list of matching blocks is terminated by a
|
| 542 |
+
# sentinel with size 0
|
| 543 |
+
if size:
|
| 544 |
+
answer.append( ('equal', ai, i, bj, j) )
|
| 545 |
+
return answer
|
| 546 |
+
|
| 547 |
+
def get_grouped_opcodes(self, n=3):
|
| 548 |
+
""" Isolate change clusters by eliminating ranges with no changes.
|
| 549 |
+
|
| 550 |
+
Return a generator of groups with up to n lines of context.
|
| 551 |
+
Each group is in the same format as returned by get_opcodes().
|
| 552 |
+
|
| 553 |
+
>>> from pprint import pprint
|
| 554 |
+
>>> a = list(map(str, range(1,40)))
|
| 555 |
+
>>> b = a[:]
|
| 556 |
+
>>> b[8:8] = ['i'] # Make an insertion
|
| 557 |
+
>>> b[20] += 'x' # Make a replacement
|
| 558 |
+
>>> b[23:28] = [] # Make a deletion
|
| 559 |
+
>>> b[30] += 'y' # Make another replacement
|
| 560 |
+
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
|
| 561 |
+
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
|
| 562 |
+
[('equal', 16, 19, 17, 20),
|
| 563 |
+
('replace', 19, 20, 20, 21),
|
| 564 |
+
('equal', 20, 22, 21, 23),
|
| 565 |
+
('delete', 22, 27, 23, 23),
|
| 566 |
+
('equal', 27, 30, 23, 26)],
|
| 567 |
+
[('equal', 31, 34, 27, 30),
|
| 568 |
+
('replace', 34, 35, 30, 31),
|
| 569 |
+
('equal', 35, 38, 31, 34)]]
|
| 570 |
+
"""
|
| 571 |
+
|
| 572 |
+
codes = self.get_opcodes()
|
| 573 |
+
if not codes:
|
| 574 |
+
codes = [("equal", 0, 1, 0, 1)]
|
| 575 |
+
# Fixup leading and trailing groups if they show no changes.
|
| 576 |
+
if codes[0][0] == 'equal':
|
| 577 |
+
tag, i1, i2, j1, j2 = codes[0]
|
| 578 |
+
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
|
| 579 |
+
if codes[-1][0] == 'equal':
|
| 580 |
+
tag, i1, i2, j1, j2 = codes[-1]
|
| 581 |
+
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
|
| 582 |
+
|
| 583 |
+
nn = n + n
|
| 584 |
+
group = []
|
| 585 |
+
for tag, i1, i2, j1, j2 in codes:
|
| 586 |
+
# End the current group and start a new one whenever
|
| 587 |
+
# there is a large range with no changes.
|
| 588 |
+
if tag == 'equal' and i2-i1 > nn:
|
| 589 |
+
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
|
| 590 |
+
yield group
|
| 591 |
+
group = []
|
| 592 |
+
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
| 593 |
+
group.append((tag, i1, i2, j1 ,j2))
|
| 594 |
+
if group and not (len(group)==1 and group[0][0] == 'equal'):
|
| 595 |
+
yield group
|
| 596 |
+
|
| 597 |
+
def ratio(self):
|
| 598 |
+
"""Return a measure of the sequences' similarity (float in [0,1]).
|
| 599 |
+
|
| 600 |
+
Where T is the total number of elements in both sequences, and
|
| 601 |
+
M is the number of matches, this is 2.0*M / T.
|
| 602 |
+
Note that this is 1 if the sequences are identical, and 0 if
|
| 603 |
+
they have nothing in common.
|
| 604 |
+
|
| 605 |
+
.ratio() is expensive to compute if you haven't already computed
|
| 606 |
+
.get_matching_blocks() or .get_opcodes(), in which case you may
|
| 607 |
+
want to try .quick_ratio() or .real_quick_ratio() first to get an
|
| 608 |
+
upper bound.
|
| 609 |
+
|
| 610 |
+
>>> s = SequenceMatcher(None, "abcd", "bcde")
|
| 611 |
+
>>> s.ratio()
|
| 612 |
+
0.75
|
| 613 |
+
>>> s.quick_ratio()
|
| 614 |
+
0.75
|
| 615 |
+
>>> s.real_quick_ratio()
|
| 616 |
+
1.0
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
matches = sum(triple[-1] for triple in self.get_matching_blocks())
|
| 620 |
+
return _calculate_ratio(matches, len(self.a) + len(self.b))
|
| 621 |
+
|
| 622 |
+
def quick_ratio(self):
|
| 623 |
+
"""Return an upper bound on ratio() relatively quickly.
|
| 624 |
+
|
| 625 |
+
This isn't defined beyond that it is an upper bound on .ratio(), and
|
| 626 |
+
is faster to compute.
|
| 627 |
+
"""
|
| 628 |
+
|
| 629 |
+
# viewing a and b as multisets, set matches to the cardinality
|
| 630 |
+
# of their intersection; this counts the number of matches
|
| 631 |
+
# without regard to order, so is clearly an upper bound
|
| 632 |
+
if self.fullbcount is None:
|
| 633 |
+
self.fullbcount = fullbcount = {}
|
| 634 |
+
for elt in self.b:
|
| 635 |
+
fullbcount[elt] = fullbcount.get(elt, 0) + 1
|
| 636 |
+
fullbcount = self.fullbcount
|
| 637 |
+
# avail[x] is the number of times x appears in 'b' less the
|
| 638 |
+
# number of times we've seen it in 'a' so far ... kinda
|
| 639 |
+
avail = {}
|
| 640 |
+
availhas, matches = avail.__contains__, 0
|
| 641 |
+
for elt in self.a:
|
| 642 |
+
if availhas(elt):
|
| 643 |
+
numb = avail[elt]
|
| 644 |
+
else:
|
| 645 |
+
numb = fullbcount.get(elt, 0)
|
| 646 |
+
avail[elt] = numb - 1
|
| 647 |
+
if numb > 0:
|
| 648 |
+
matches = matches + 1
|
| 649 |
+
return _calculate_ratio(matches, len(self.a) + len(self.b))
|
| 650 |
+
|
| 651 |
+
def real_quick_ratio(self):
|
| 652 |
+
"""Return an upper bound on ratio() very quickly.
|
| 653 |
+
|
| 654 |
+
This isn't defined beyond that it is an upper bound on .ratio(), and
|
| 655 |
+
is faster to compute than either .ratio() or .quick_ratio().
|
| 656 |
+
"""
|
| 657 |
+
|
| 658 |
+
la, lb = len(self.a), len(self.b)
|
| 659 |
+
# can't have more matches than the number of elements in the
|
| 660 |
+
# shorter sequence
|
| 661 |
+
return _calculate_ratio(min(la, lb), la + lb)
|
| 662 |
+
|
| 663 |
+
__class_getitem__ = classmethod(GenericAlias)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
|
| 667 |
+
"""Use SequenceMatcher to return list of the best "good enough" matches.
|
| 668 |
+
|
| 669 |
+
word is a sequence for which close matches are desired (typically a
|
| 670 |
+
string).
|
| 671 |
+
|
| 672 |
+
possibilities is a list of sequences against which to match word
|
| 673 |
+
(typically a list of strings).
|
| 674 |
+
|
| 675 |
+
Optional arg n (default 3) is the maximum number of close matches to
|
| 676 |
+
return. n must be > 0.
|
| 677 |
+
|
| 678 |
+
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
|
| 679 |
+
that don't score at least that similar to word are ignored.
|
| 680 |
+
|
| 681 |
+
The best (no more than n) matches among the possibilities are returned
|
| 682 |
+
in a list, sorted by similarity score, most similar first.
|
| 683 |
+
|
| 684 |
+
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
|
| 685 |
+
['apple', 'ape']
|
| 686 |
+
>>> import keyword as _keyword
|
| 687 |
+
>>> get_close_matches("wheel", _keyword.kwlist)
|
| 688 |
+
['while']
|
| 689 |
+
>>> get_close_matches("Apple", _keyword.kwlist)
|
| 690 |
+
[]
|
| 691 |
+
>>> get_close_matches("accept", _keyword.kwlist)
|
| 692 |
+
['except']
|
| 693 |
+
"""
|
| 694 |
+
|
| 695 |
+
if not n > 0:
|
| 696 |
+
raise ValueError("n must be > 0: %r" % (n,))
|
| 697 |
+
if not 0.0 <= cutoff <= 1.0:
|
| 698 |
+
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
|
| 699 |
+
result = []
|
| 700 |
+
s = SequenceMatcher()
|
| 701 |
+
s.set_seq2(word)
|
| 702 |
+
for x in possibilities:
|
| 703 |
+
s.set_seq1(x)
|
| 704 |
+
if s.real_quick_ratio() >= cutoff and \
|
| 705 |
+
s.quick_ratio() >= cutoff and \
|
| 706 |
+
s.ratio() >= cutoff:
|
| 707 |
+
result.append((s.ratio(), x))
|
| 708 |
+
|
| 709 |
+
# Move the best scorers to head of list
|
| 710 |
+
result = _nlargest(n, result)
|
| 711 |
+
# Strip scores for the best n matches
|
| 712 |
+
return [x for score, x in result]
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
def _keep_original_ws(s, tag_s):
|
| 716 |
+
"""Replace whitespace with the original whitespace characters in `s`"""
|
| 717 |
+
return ''.join(
|
| 718 |
+
c if tag_c == " " and c.isspace() else tag_c
|
| 719 |
+
for c, tag_c in zip(s, tag_s)
|
| 720 |
+
)
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
class Differ:
|
| 725 |
+
r"""
|
| 726 |
+
Differ is a class for comparing sequences of lines of text, and
|
| 727 |
+
producing human-readable differences or deltas. Differ uses
|
| 728 |
+
SequenceMatcher both to compare sequences of lines, and to compare
|
| 729 |
+
sequences of characters within similar (near-matching) lines.
|
| 730 |
+
|
| 731 |
+
Each line of a Differ delta begins with a two-letter code:
|
| 732 |
+
|
| 733 |
+
'- ' line unique to sequence 1
|
| 734 |
+
'+ ' line unique to sequence 2
|
| 735 |
+
' ' line common to both sequences
|
| 736 |
+
'? ' line not present in either input sequence
|
| 737 |
+
|
| 738 |
+
Lines beginning with '? ' attempt to guide the eye to intraline
|
| 739 |
+
differences, and were not present in either input sequence. These lines
|
| 740 |
+
can be confusing if the sequences contain tab characters.
|
| 741 |
+
|
| 742 |
+
Note that Differ makes no claim to produce a *minimal* diff. To the
|
| 743 |
+
contrary, minimal diffs are often counter-intuitive, because they synch
|
| 744 |
+
up anywhere possible, sometimes accidental matches 100 pages apart.
|
| 745 |
+
Restricting synch points to contiguous matches preserves some notion of
|
| 746 |
+
locality, at the occasional cost of producing a longer diff.
|
| 747 |
+
|
| 748 |
+
Example: Comparing two texts.
|
| 749 |
+
|
| 750 |
+
First we set up the texts, sequences of individual single-line strings
|
| 751 |
+
ending with newlines (such sequences can also be obtained from the
|
| 752 |
+
`readlines()` method of file-like objects):
|
| 753 |
+
|
| 754 |
+
>>> text1 = ''' 1. Beautiful is better than ugly.
|
| 755 |
+
... 2. Explicit is better than implicit.
|
| 756 |
+
... 3. Simple is better than complex.
|
| 757 |
+
... 4. Complex is better than complicated.
|
| 758 |
+
... '''.splitlines(keepends=True)
|
| 759 |
+
>>> len(text1)
|
| 760 |
+
4
|
| 761 |
+
>>> text1[0][-1]
|
| 762 |
+
'\n'
|
| 763 |
+
>>> text2 = ''' 1. Beautiful is better than ugly.
|
| 764 |
+
... 3. Simple is better than complex.
|
| 765 |
+
... 4. Complicated is better than complex.
|
| 766 |
+
... 5. Flat is better than nested.
|
| 767 |
+
... '''.splitlines(keepends=True)
|
| 768 |
+
|
| 769 |
+
Next we instantiate a Differ object:
|
| 770 |
+
|
| 771 |
+
>>> d = Differ()
|
| 772 |
+
|
| 773 |
+
Note that when instantiating a Differ object we may pass functions to
|
| 774 |
+
filter out line and character 'junk'. See Differ.__init__ for details.
|
| 775 |
+
|
| 776 |
+
Finally, we compare the two:
|
| 777 |
+
|
| 778 |
+
>>> result = list(d.compare(text1, text2))
|
| 779 |
+
|
| 780 |
+
'result' is a list of strings, so let's pretty-print it:
|
| 781 |
+
|
| 782 |
+
>>> from pprint import pprint as _pprint
|
| 783 |
+
>>> _pprint(result)
|
| 784 |
+
[' 1. Beautiful is better than ugly.\n',
|
| 785 |
+
'- 2. Explicit is better than implicit.\n',
|
| 786 |
+
'- 3. Simple is better than complex.\n',
|
| 787 |
+
'+ 3. Simple is better than complex.\n',
|
| 788 |
+
'? ++\n',
|
| 789 |
+
'- 4. Complex is better than complicated.\n',
|
| 790 |
+
'? ^ ---- ^\n',
|
| 791 |
+
'+ 4. Complicated is better than complex.\n',
|
| 792 |
+
'? ++++ ^ ^\n',
|
| 793 |
+
'+ 5. Flat is better than nested.\n']
|
| 794 |
+
|
| 795 |
+
As a single multi-line string it looks like this:
|
| 796 |
+
|
| 797 |
+
>>> print(''.join(result), end="")
|
| 798 |
+
1. Beautiful is better than ugly.
|
| 799 |
+
- 2. Explicit is better than implicit.
|
| 800 |
+
- 3. Simple is better than complex.
|
| 801 |
+
+ 3. Simple is better than complex.
|
| 802 |
+
? ++
|
| 803 |
+
- 4. Complex is better than complicated.
|
| 804 |
+
? ^ ---- ^
|
| 805 |
+
+ 4. Complicated is better than complex.
|
| 806 |
+
? ++++ ^ ^
|
| 807 |
+
+ 5. Flat is better than nested.
|
| 808 |
+
"""
|
| 809 |
+
|
| 810 |
+
def __init__(self, linejunk=None, charjunk=None):
|
| 811 |
+
"""
|
| 812 |
+
Construct a text differencer, with optional filters.
|
| 813 |
+
|
| 814 |
+
The two optional keyword parameters are for filter functions:
|
| 815 |
+
|
| 816 |
+
- `linejunk`: A function that should accept a single string argument,
|
| 817 |
+
and return true iff the string is junk. The module-level function
|
| 818 |
+
`IS_LINE_JUNK` may be used to filter out lines without visible
|
| 819 |
+
characters, except for at most one splat ('#'). It is recommended
|
| 820 |
+
to leave linejunk None; the underlying SequenceMatcher class has
|
| 821 |
+
an adaptive notion of "noise" lines that's better than any static
|
| 822 |
+
definition the author has ever been able to craft.
|
| 823 |
+
|
| 824 |
+
- `charjunk`: A function that should accept a string of length 1. The
|
| 825 |
+
module-level function `IS_CHARACTER_JUNK` may be used to filter out
|
| 826 |
+
whitespace characters (a blank or tab; **note**: bad idea to include
|
| 827 |
+
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
|
| 828 |
+
"""
|
| 829 |
+
|
| 830 |
+
self.linejunk = linejunk
|
| 831 |
+
self.charjunk = charjunk
|
| 832 |
+
|
| 833 |
+
def compare(self, a, b):
|
| 834 |
+
r"""
|
| 835 |
+
Compare two sequences of lines; generate the resulting delta.
|
| 836 |
+
|
| 837 |
+
Each sequence must contain individual single-line strings ending with
|
| 838 |
+
newlines. Such sequences can be obtained from the `readlines()` method
|
| 839 |
+
of file-like objects. The delta generated also consists of newline-
|
| 840 |
+
terminated strings, ready to be printed as-is via the writelines()
|
| 841 |
+
method of a file-like object.
|
| 842 |
+
|
| 843 |
+
Example:
|
| 844 |
+
|
| 845 |
+
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
|
| 846 |
+
... 'ore\ntree\nemu\n'.splitlines(True))),
|
| 847 |
+
... end="")
|
| 848 |
+
- one
|
| 849 |
+
? ^
|
| 850 |
+
+ ore
|
| 851 |
+
? ^
|
| 852 |
+
- two
|
| 853 |
+
- three
|
| 854 |
+
? -
|
| 855 |
+
+ tree
|
| 856 |
+
+ emu
|
| 857 |
+
"""
|
| 858 |
+
|
| 859 |
+
cruncher = SequenceMatcher(self.linejunk, a, b)
|
| 860 |
+
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
|
| 861 |
+
if tag == 'replace':
|
| 862 |
+
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
|
| 863 |
+
elif tag == 'delete':
|
| 864 |
+
g = self._dump('-', a, alo, ahi)
|
| 865 |
+
elif tag == 'insert':
|
| 866 |
+
g = self._dump('+', b, blo, bhi)
|
| 867 |
+
elif tag == 'equal':
|
| 868 |
+
g = self._dump(' ', a, alo, ahi)
|
| 869 |
+
else:
|
| 870 |
+
raise ValueError('unknown tag %r' % (tag,))
|
| 871 |
+
|
| 872 |
+
yield from g
|
| 873 |
+
|
| 874 |
+
def _dump(self, tag, x, lo, hi):
|
| 875 |
+
"""Generate comparison results for a same-tagged range."""
|
| 876 |
+
for i in range(lo, hi):
|
| 877 |
+
yield '%s %s' % (tag, x[i])
|
| 878 |
+
|
| 879 |
+
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
|
| 880 |
+
assert alo < ahi and blo < bhi
|
| 881 |
+
# dump the shorter block first -- reduces the burden on short-term
|
| 882 |
+
# memory if the blocks are of very different sizes
|
| 883 |
+
if bhi - blo < ahi - alo:
|
| 884 |
+
first = self._dump('+', b, blo, bhi)
|
| 885 |
+
second = self._dump('-', a, alo, ahi)
|
| 886 |
+
else:
|
| 887 |
+
first = self._dump('-', a, alo, ahi)
|
| 888 |
+
second = self._dump('+', b, blo, bhi)
|
| 889 |
+
|
| 890 |
+
for g in first, second:
|
| 891 |
+
yield from g
|
| 892 |
+
|
| 893 |
+
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
|
| 894 |
+
r"""
|
| 895 |
+
When replacing one block of lines with another, search the blocks
|
| 896 |
+
for *similar* lines; the best-matching pair (if any) is used as a
|
| 897 |
+
synch point, and intraline difference marking is done on the
|
| 898 |
+
similar pair. Lots of work, but often worth it.
|
| 899 |
+
|
| 900 |
+
Example:
|
| 901 |
+
|
| 902 |
+
>>> d = Differ()
|
| 903 |
+
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
|
| 904 |
+
... ['abcdefGhijkl\n'], 0, 1)
|
| 905 |
+
>>> print(''.join(results), end="")
|
| 906 |
+
- abcDefghiJkl
|
| 907 |
+
? ^ ^ ^
|
| 908 |
+
+ abcdefGhijkl
|
| 909 |
+
? ^ ^ ^
|
| 910 |
+
"""
|
| 911 |
+
|
| 912 |
+
# don't synch up unless the lines have a similarity score of at
|
| 913 |
+
# least cutoff; best_ratio tracks the best score seen so far
|
| 914 |
+
best_ratio, cutoff = 0.74, 0.75
|
| 915 |
+
cruncher = SequenceMatcher(self.charjunk)
|
| 916 |
+
eqi, eqj = None, None # 1st indices of equal lines (if any)
|
| 917 |
+
|
| 918 |
+
# search for the pair that matches best without being identical
|
| 919 |
+
# (identical lines must be junk lines, & we don't want to synch up
|
| 920 |
+
# on junk -- unless we have to)
|
| 921 |
+
for j in range(blo, bhi):
|
| 922 |
+
bj = b[j]
|
| 923 |
+
cruncher.set_seq2(bj)
|
| 924 |
+
for i in range(alo, ahi):
|
| 925 |
+
ai = a[i]
|
| 926 |
+
if ai == bj:
|
| 927 |
+
if eqi is None:
|
| 928 |
+
eqi, eqj = i, j
|
| 929 |
+
continue
|
| 930 |
+
cruncher.set_seq1(ai)
|
| 931 |
+
# computing similarity is expensive, so use the quick
|
| 932 |
+
# upper bounds first -- have seen this speed up messy
|
| 933 |
+
# compares by a factor of 3.
|
| 934 |
+
# note that ratio() is only expensive to compute the first
|
| 935 |
+
# time it's called on a sequence pair; the expensive part
|
| 936 |
+
# of the computation is cached by cruncher
|
| 937 |
+
if cruncher.real_quick_ratio() > best_ratio and \
|
| 938 |
+
cruncher.quick_ratio() > best_ratio and \
|
| 939 |
+
cruncher.ratio() > best_ratio:
|
| 940 |
+
best_ratio, best_i, best_j = cruncher.ratio(), i, j
|
| 941 |
+
if best_ratio < cutoff:
|
| 942 |
+
# no non-identical "pretty close" pair
|
| 943 |
+
if eqi is None:
|
| 944 |
+
# no identical pair either -- treat it as a straight replace
|
| 945 |
+
yield from self._plain_replace(a, alo, ahi, b, blo, bhi)
|
| 946 |
+
return
|
| 947 |
+
# no close pair, but an identical pair -- synch up on that
|
| 948 |
+
best_i, best_j, best_ratio = eqi, eqj, 1.0
|
| 949 |
+
else:
|
| 950 |
+
# there's a close pair, so forget the identical pair (if any)
|
| 951 |
+
eqi = None
|
| 952 |
+
|
| 953 |
+
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
|
| 954 |
+
# identical
|
| 955 |
+
|
| 956 |
+
# pump out diffs from before the synch point
|
| 957 |
+
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
|
| 958 |
+
|
| 959 |
+
# do intraline marking on the synch pair
|
| 960 |
+
aelt, belt = a[best_i], b[best_j]
|
| 961 |
+
if eqi is None:
|
| 962 |
+
# pump out a '-', '?', '+', '?' quad for the synched lines
|
| 963 |
+
atags = btags = ""
|
| 964 |
+
cruncher.set_seqs(aelt, belt)
|
| 965 |
+
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
|
| 966 |
+
la, lb = ai2 - ai1, bj2 - bj1
|
| 967 |
+
if tag == 'replace':
|
| 968 |
+
atags += '^' * la
|
| 969 |
+
btags += '^' * lb
|
| 970 |
+
elif tag == 'delete':
|
| 971 |
+
atags += '-' * la
|
| 972 |
+
elif tag == 'insert':
|
| 973 |
+
btags += '+' * lb
|
| 974 |
+
elif tag == 'equal':
|
| 975 |
+
atags += ' ' * la
|
| 976 |
+
btags += ' ' * lb
|
| 977 |
+
else:
|
| 978 |
+
raise ValueError('unknown tag %r' % (tag,))
|
| 979 |
+
yield from self._qformat(aelt, belt, atags, btags)
|
| 980 |
+
else:
|
| 981 |
+
# the synch pair is identical
|
| 982 |
+
yield ' ' + aelt
|
| 983 |
+
|
| 984 |
+
# pump out diffs from after the synch point
|
| 985 |
+
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
|
| 986 |
+
|
| 987 |
+
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
|
| 988 |
+
g = []
|
| 989 |
+
if alo < ahi:
|
| 990 |
+
if blo < bhi:
|
| 991 |
+
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
|
| 992 |
+
else:
|
| 993 |
+
g = self._dump('-', a, alo, ahi)
|
| 994 |
+
elif blo < bhi:
|
| 995 |
+
g = self._dump('+', b, blo, bhi)
|
| 996 |
+
|
| 997 |
+
yield from g
|
| 998 |
+
|
| 999 |
+
def _qformat(self, aline, bline, atags, btags):
|
| 1000 |
+
r"""
|
| 1001 |
+
Format "?" output and deal with tabs.
|
| 1002 |
+
|
| 1003 |
+
Example:
|
| 1004 |
+
|
| 1005 |
+
>>> d = Differ()
|
| 1006 |
+
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
|
| 1007 |
+
... ' ^ ^ ^ ', ' ^ ^ ^ ')
|
| 1008 |
+
>>> for line in results: print(repr(line))
|
| 1009 |
+
...
|
| 1010 |
+
'- \tabcDefghiJkl\n'
|
| 1011 |
+
'? \t ^ ^ ^\n'
|
| 1012 |
+
'+ \tabcdefGhijkl\n'
|
| 1013 |
+
'? \t ^ ^ ^\n'
|
| 1014 |
+
"""
|
| 1015 |
+
atags = _keep_original_ws(aline, atags).rstrip()
|
| 1016 |
+
btags = _keep_original_ws(bline, btags).rstrip()
|
| 1017 |
+
|
| 1018 |
+
yield "- " + aline
|
| 1019 |
+
if atags:
|
| 1020 |
+
yield f"? {atags}\n"
|
| 1021 |
+
|
| 1022 |
+
yield "+ " + bline
|
| 1023 |
+
if btags:
|
| 1024 |
+
yield f"? {btags}\n"
|
| 1025 |
+
|
| 1026 |
+
# With respect to junk, an earlier version of ndiff simply refused to
|
| 1027 |
+
# *start* a match with a junk element. The result was cases like this:
|
| 1028 |
+
# before: private Thread currentThread;
|
| 1029 |
+
# after: private volatile Thread currentThread;
|
| 1030 |
+
# If you consider whitespace to be junk, the longest contiguous match
|
| 1031 |
+
# not starting with junk is "e Thread currentThread". So ndiff reported
|
| 1032 |
+
# that "e volatil" was inserted between the 't' and the 'e' in "private".
|
| 1033 |
+
# While an accurate view, to people that's absurd. The current version
|
| 1034 |
+
# looks for matching blocks that are entirely junk-free, then extends the
|
| 1035 |
+
# longest one of those as far as possible but only with matching junk.
|
| 1036 |
+
# So now "currentThread" is matched, then extended to suck up the
|
| 1037 |
+
# preceding blank; then "private" is matched, and extended to suck up the
|
| 1038 |
+
# following blank; then "Thread" is matched; and finally ndiff reports
|
| 1039 |
+
# that "volatile " was inserted before "Thread". The only quibble
|
| 1040 |
+
# remaining is that perhaps it was really the case that " volatile"
|
| 1041 |
+
# was inserted after "private". I can live with that <wink>.
|
| 1042 |
+
|
| 1043 |
+
import re
|
| 1044 |
+
|
| 1045 |
+
def IS_LINE_JUNK(line, pat=re.compile(r"\s*(?:#\s*)?$").match):
|
| 1046 |
+
r"""
|
| 1047 |
+
Return True for ignorable line: iff `line` is blank or contains a single '#'.
|
| 1048 |
+
|
| 1049 |
+
Examples:
|
| 1050 |
+
|
| 1051 |
+
>>> IS_LINE_JUNK('\n')
|
| 1052 |
+
True
|
| 1053 |
+
>>> IS_LINE_JUNK(' # \n')
|
| 1054 |
+
True
|
| 1055 |
+
>>> IS_LINE_JUNK('hello\n')
|
| 1056 |
+
False
|
| 1057 |
+
"""
|
| 1058 |
+
|
| 1059 |
+
return pat(line) is not None
|
| 1060 |
+
|
| 1061 |
+
def IS_CHARACTER_JUNK(ch, ws=" \t"):
|
| 1062 |
+
r"""
|
| 1063 |
+
Return True for ignorable character: iff `ch` is a space or tab.
|
| 1064 |
+
|
| 1065 |
+
Examples:
|
| 1066 |
+
|
| 1067 |
+
>>> IS_CHARACTER_JUNK(' ')
|
| 1068 |
+
True
|
| 1069 |
+
>>> IS_CHARACTER_JUNK('\t')
|
| 1070 |
+
True
|
| 1071 |
+
>>> IS_CHARACTER_JUNK('\n')
|
| 1072 |
+
False
|
| 1073 |
+
>>> IS_CHARACTER_JUNK('x')
|
| 1074 |
+
False
|
| 1075 |
+
"""
|
| 1076 |
+
|
| 1077 |
+
return ch in ws
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
########################################################################
|
| 1081 |
+
### Unified Diff
|
| 1082 |
+
########################################################################
|
| 1083 |
+
|
| 1084 |
+
def _format_range_unified(start, stop):
|
| 1085 |
+
'Convert range to the "ed" format'
|
| 1086 |
+
# Per the diff spec at http://www.unix.org/single_unix_specification/
|
| 1087 |
+
beginning = start + 1 # lines start numbering with one
|
| 1088 |
+
length = stop - start
|
| 1089 |
+
if length == 1:
|
| 1090 |
+
return '{}'.format(beginning)
|
| 1091 |
+
if not length:
|
| 1092 |
+
beginning -= 1 # empty ranges begin at line just before the range
|
| 1093 |
+
return '{},{}'.format(beginning, length)
|
| 1094 |
+
|
| 1095 |
+
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
|
| 1096 |
+
tofiledate='', n=3, lineterm='\n'):
|
| 1097 |
+
r"""
|
| 1098 |
+
Compare two sequences of lines; generate the delta as a unified diff.
|
| 1099 |
+
|
| 1100 |
+
Unified diffs are a compact way of showing line changes and a few
|
| 1101 |
+
lines of context. The number of context lines is set by 'n' which
|
| 1102 |
+
defaults to three.
|
| 1103 |
+
|
| 1104 |
+
By default, the diff control lines (those with ---, +++, or @@) are
|
| 1105 |
+
created with a trailing newline. This is helpful so that inputs
|
| 1106 |
+
created from file.readlines() result in diffs that are suitable for
|
| 1107 |
+
file.writelines() since both the inputs and outputs have trailing
|
| 1108 |
+
newlines.
|
| 1109 |
+
|
| 1110 |
+
For inputs that do not have trailing newlines, set the lineterm
|
| 1111 |
+
argument to "" so that the output will be uniformly newline free.
|
| 1112 |
+
|
| 1113 |
+
The unidiff format normally has a header for filenames and modification
|
| 1114 |
+
times. Any or all of these may be specified using strings for
|
| 1115 |
+
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
| 1116 |
+
The modification times are normally expressed in the ISO 8601 format.
|
| 1117 |
+
|
| 1118 |
+
Example:
|
| 1119 |
+
|
| 1120 |
+
>>> for line in unified_diff('one two three four'.split(),
|
| 1121 |
+
... 'zero one tree four'.split(), 'Original', 'Current',
|
| 1122 |
+
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
|
| 1123 |
+
... lineterm=''):
|
| 1124 |
+
... print(line) # doctest: +NORMALIZE_WHITESPACE
|
| 1125 |
+
--- Original 2005-01-26 23:30:50
|
| 1126 |
+
+++ Current 2010-04-02 10:20:52
|
| 1127 |
+
@@ -1,4 +1,4 @@
|
| 1128 |
+
+zero
|
| 1129 |
+
one
|
| 1130 |
+
-two
|
| 1131 |
+
-three
|
| 1132 |
+
+tree
|
| 1133 |
+
four
|
| 1134 |
+
"""
|
| 1135 |
+
|
| 1136 |
+
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
|
| 1137 |
+
started = False
|
| 1138 |
+
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
|
| 1139 |
+
if not started:
|
| 1140 |
+
started = True
|
| 1141 |
+
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
|
| 1142 |
+
todate = '\t{}'.format(tofiledate) if tofiledate else ''
|
| 1143 |
+
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
|
| 1144 |
+
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
|
| 1145 |
+
|
| 1146 |
+
first, last = group[0], group[-1]
|
| 1147 |
+
file1_range = _format_range_unified(first[1], last[2])
|
| 1148 |
+
file2_range = _format_range_unified(first[3], last[4])
|
| 1149 |
+
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
|
| 1150 |
+
|
| 1151 |
+
for tag, i1, i2, j1, j2 in group:
|
| 1152 |
+
if tag == 'equal':
|
| 1153 |
+
for line in a[i1:i2]:
|
| 1154 |
+
yield ' ' + line
|
| 1155 |
+
continue
|
| 1156 |
+
if tag in {'replace', 'delete'}:
|
| 1157 |
+
for line in a[i1:i2]:
|
| 1158 |
+
yield '-' + line
|
| 1159 |
+
if tag in {'replace', 'insert'}:
|
| 1160 |
+
for line in b[j1:j2]:
|
| 1161 |
+
yield '+' + line
|
| 1162 |
+
|
| 1163 |
+
|
| 1164 |
+
########################################################################
|
| 1165 |
+
### Context Diff
|
| 1166 |
+
########################################################################
|
| 1167 |
+
|
| 1168 |
+
def _format_range_context(start, stop):
|
| 1169 |
+
'Convert range to the "ed" format'
|
| 1170 |
+
# Per the diff spec at http://www.unix.org/single_unix_specification/
|
| 1171 |
+
beginning = start + 1 # lines start numbering with one
|
| 1172 |
+
length = stop - start
|
| 1173 |
+
if not length:
|
| 1174 |
+
beginning -= 1 # empty ranges begin at line just before the range
|
| 1175 |
+
if length <= 1:
|
| 1176 |
+
return '{}'.format(beginning)
|
| 1177 |
+
return '{},{}'.format(beginning, beginning + length - 1)
|
| 1178 |
+
|
| 1179 |
+
# See http://www.unix.org/single_unix_specification/
|
| 1180 |
+
def context_diff(a, b, fromfile='', tofile='',
|
| 1181 |
+
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
|
| 1182 |
+
r"""
|
| 1183 |
+
Compare two sequences of lines; generate the delta as a context diff.
|
| 1184 |
+
|
| 1185 |
+
Context diffs are a compact way of showing line changes and a few
|
| 1186 |
+
lines of context. The number of context lines is set by 'n' which
|
| 1187 |
+
defaults to three.
|
| 1188 |
+
|
| 1189 |
+
By default, the diff control lines (those with *** or ---) are
|
| 1190 |
+
created with a trailing newline. This is helpful so that inputs
|
| 1191 |
+
created from file.readlines() result in diffs that are suitable for
|
| 1192 |
+
file.writelines() since both the inputs and outputs have trailing
|
| 1193 |
+
newlines.
|
| 1194 |
+
|
| 1195 |
+
For inputs that do not have trailing newlines, set the lineterm
|
| 1196 |
+
argument to "" so that the output will be uniformly newline free.
|
| 1197 |
+
|
| 1198 |
+
The context diff format normally has a header for filenames and
|
| 1199 |
+
modification times. Any or all of these may be specified using
|
| 1200 |
+
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
| 1201 |
+
The modification times are normally expressed in the ISO 8601 format.
|
| 1202 |
+
If not specified, the strings default to blanks.
|
| 1203 |
+
|
| 1204 |
+
Example:
|
| 1205 |
+
|
| 1206 |
+
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
|
| 1207 |
+
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
|
| 1208 |
+
... end="")
|
| 1209 |
+
*** Original
|
| 1210 |
+
--- Current
|
| 1211 |
+
***************
|
| 1212 |
+
*** 1,4 ****
|
| 1213 |
+
one
|
| 1214 |
+
! two
|
| 1215 |
+
! three
|
| 1216 |
+
four
|
| 1217 |
+
--- 1,4 ----
|
| 1218 |
+
+ zero
|
| 1219 |
+
one
|
| 1220 |
+
! tree
|
| 1221 |
+
four
|
| 1222 |
+
"""
|
| 1223 |
+
|
| 1224 |
+
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
|
| 1225 |
+
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
|
| 1226 |
+
started = False
|
| 1227 |
+
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
|
| 1228 |
+
if not started:
|
| 1229 |
+
started = True
|
| 1230 |
+
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
|
| 1231 |
+
todate = '\t{}'.format(tofiledate) if tofiledate else ''
|
| 1232 |
+
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
|
| 1233 |
+
yield '--- {}{}{}'.format(tofile, todate, lineterm)
|
| 1234 |
+
|
| 1235 |
+
first, last = group[0], group[-1]
|
| 1236 |
+
yield '***************' + lineterm
|
| 1237 |
+
|
| 1238 |
+
file1_range = _format_range_context(first[1], last[2])
|
| 1239 |
+
yield '*** {} ****{}'.format(file1_range, lineterm)
|
| 1240 |
+
|
| 1241 |
+
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
|
| 1242 |
+
for tag, i1, i2, _, _ in group:
|
| 1243 |
+
if tag != 'insert':
|
| 1244 |
+
for line in a[i1:i2]:
|
| 1245 |
+
yield prefix[tag] + line
|
| 1246 |
+
|
| 1247 |
+
file2_range = _format_range_context(first[3], last[4])
|
| 1248 |
+
yield '--- {} ----{}'.format(file2_range, lineterm)
|
| 1249 |
+
|
| 1250 |
+
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
|
| 1251 |
+
for tag, _, _, j1, j2 in group:
|
| 1252 |
+
if tag != 'delete':
|
| 1253 |
+
for line in b[j1:j2]:
|
| 1254 |
+
yield prefix[tag] + line
|
| 1255 |
+
|
| 1256 |
+
def _check_types(a, b, *args):
|
| 1257 |
+
# Checking types is weird, but the alternative is garbled output when
|
| 1258 |
+
# someone passes mixed bytes and str to {unified,context}_diff(). E.g.
|
| 1259 |
+
# without this check, passing filenames as bytes results in output like
|
| 1260 |
+
# --- b'oldfile.txt'
|
| 1261 |
+
# +++ b'newfile.txt'
|
| 1262 |
+
# because of how str.format() incorporates bytes objects.
|
| 1263 |
+
if a and not isinstance(a[0], str):
|
| 1264 |
+
raise TypeError('lines to compare must be str, not %s (%r)' %
|
| 1265 |
+
(type(a[0]).__name__, a[0]))
|
| 1266 |
+
if b and not isinstance(b[0], str):
|
| 1267 |
+
raise TypeError('lines to compare must be str, not %s (%r)' %
|
| 1268 |
+
(type(b[0]).__name__, b[0]))
|
| 1269 |
+
for arg in args:
|
| 1270 |
+
if not isinstance(arg, str):
|
| 1271 |
+
raise TypeError('all arguments must be str, not: %r' % (arg,))
|
| 1272 |
+
|
| 1273 |
+
def diff_bytes(dfunc, a, b, fromfile=b'', tofile=b'',
|
| 1274 |
+
fromfiledate=b'', tofiledate=b'', n=3, lineterm=b'\n'):
|
| 1275 |
+
r"""
|
| 1276 |
+
Compare `a` and `b`, two sequences of lines represented as bytes rather
|
| 1277 |
+
than str. This is a wrapper for `dfunc`, which is typically either
|
| 1278 |
+
unified_diff() or context_diff(). Inputs are losslessly converted to
|
| 1279 |
+
strings so that `dfunc` only has to worry about strings, and encoded
|
| 1280 |
+
back to bytes on return. This is necessary to compare files with
|
| 1281 |
+
unknown or inconsistent encoding. All other inputs (except `n`) must be
|
| 1282 |
+
bytes rather than str.
|
| 1283 |
+
"""
|
| 1284 |
+
def decode(s):
|
| 1285 |
+
try:
|
| 1286 |
+
return s.decode('ascii', 'surrogateescape')
|
| 1287 |
+
except AttributeError as err:
|
| 1288 |
+
msg = ('all arguments must be bytes, not %s (%r)' %
|
| 1289 |
+
(type(s).__name__, s))
|
| 1290 |
+
raise TypeError(msg) from err
|
| 1291 |
+
a = list(map(decode, a))
|
| 1292 |
+
b = list(map(decode, b))
|
| 1293 |
+
fromfile = decode(fromfile)
|
| 1294 |
+
tofile = decode(tofile)
|
| 1295 |
+
fromfiledate = decode(fromfiledate)
|
| 1296 |
+
tofiledate = decode(tofiledate)
|
| 1297 |
+
lineterm = decode(lineterm)
|
| 1298 |
+
|
| 1299 |
+
lines = dfunc(a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm)
|
| 1300 |
+
for line in lines:
|
| 1301 |
+
yield line.encode('ascii', 'surrogateescape')
|
| 1302 |
+
|
| 1303 |
+
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
|
| 1304 |
+
r"""
|
| 1305 |
+
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
|
| 1306 |
+
|
| 1307 |
+
Optional keyword parameters `linejunk` and `charjunk` are for filter
|
| 1308 |
+
functions, or can be None:
|
| 1309 |
+
|
| 1310 |
+
- linejunk: A function that should accept a single string argument and
|
| 1311 |
+
return true iff the string is junk. The default is None, and is
|
| 1312 |
+
recommended; the underlying SequenceMatcher class has an adaptive
|
| 1313 |
+
notion of "noise" lines.
|
| 1314 |
+
|
| 1315 |
+
- charjunk: A function that accepts a character (string of length
|
| 1316 |
+
1), and returns true iff the character is junk. The default is
|
| 1317 |
+
the module-level function IS_CHARACTER_JUNK, which filters out
|
| 1318 |
+
whitespace characters (a blank or tab; note: it's a bad idea to
|
| 1319 |
+
include newline in this!).
|
| 1320 |
+
|
| 1321 |
+
Tools/scripts/ndiff.py is a command-line front-end to this function.
|
| 1322 |
+
|
| 1323 |
+
Example:
|
| 1324 |
+
|
| 1325 |
+
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
|
| 1326 |
+
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
|
| 1327 |
+
>>> print(''.join(diff), end="")
|
| 1328 |
+
- one
|
| 1329 |
+
? ^
|
| 1330 |
+
+ ore
|
| 1331 |
+
? ^
|
| 1332 |
+
- two
|
| 1333 |
+
- three
|
| 1334 |
+
? -
|
| 1335 |
+
+ tree
|
| 1336 |
+
+ emu
|
| 1337 |
+
"""
|
| 1338 |
+
return Differ(linejunk, charjunk).compare(a, b)
|
| 1339 |
+
|
| 1340 |
+
def _mdiff(fromlines, tolines, context=None, linejunk=None,
|
| 1341 |
+
charjunk=IS_CHARACTER_JUNK):
|
| 1342 |
+
r"""Returns generator yielding marked up from/to side by side differences.
|
| 1343 |
+
|
| 1344 |
+
Arguments:
|
| 1345 |
+
fromlines -- list of text lines to compared to tolines
|
| 1346 |
+
tolines -- list of text lines to be compared to fromlines
|
| 1347 |
+
context -- number of context lines to display on each side of difference,
|
| 1348 |
+
if None, all from/to text lines will be generated.
|
| 1349 |
+
linejunk -- passed on to ndiff (see ndiff documentation)
|
| 1350 |
+
charjunk -- passed on to ndiff (see ndiff documentation)
|
| 1351 |
+
|
| 1352 |
+
This function returns an iterator which returns a tuple:
|
| 1353 |
+
(from line tuple, to line tuple, boolean flag)
|
| 1354 |
+
|
| 1355 |
+
from/to line tuple -- (line num, line text)
|
| 1356 |
+
line num -- integer or None (to indicate a context separation)
|
| 1357 |
+
line text -- original line text with following markers inserted:
|
| 1358 |
+
'\0+' -- marks start of added text
|
| 1359 |
+
'\0-' -- marks start of deleted text
|
| 1360 |
+
'\0^' -- marks start of changed text
|
| 1361 |
+
'\1' -- marks end of added/deleted/changed text
|
| 1362 |
+
|
| 1363 |
+
boolean flag -- None indicates context separation, True indicates
|
| 1364 |
+
either "from" or "to" line contains a change, otherwise False.
|
| 1365 |
+
|
| 1366 |
+
This function/iterator was originally developed to generate side by side
|
| 1367 |
+
file difference for making HTML pages (see HtmlDiff class for example
|
| 1368 |
+
usage).
|
| 1369 |
+
|
| 1370 |
+
Note, this function utilizes the ndiff function to generate the side by
|
| 1371 |
+
side difference markup. Optional ndiff arguments may be passed to this
|
| 1372 |
+
function and they in turn will be passed to ndiff.
|
| 1373 |
+
"""
|
| 1374 |
+
import re
|
| 1375 |
+
|
| 1376 |
+
# regular expression for finding intraline change indices
|
| 1377 |
+
change_re = re.compile(r'(\++|\-+|\^+)')
|
| 1378 |
+
|
| 1379 |
+
# create the difference iterator to generate the differences
|
| 1380 |
+
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
|
| 1381 |
+
|
| 1382 |
+
def _make_line(lines, format_key, side, num_lines=[0,0]):
|
| 1383 |
+
"""Returns line of text with user's change markup and line formatting.
|
| 1384 |
+
|
| 1385 |
+
lines -- list of lines from the ndiff generator to produce a line of
|
| 1386 |
+
text from. When producing the line of text to return, the
|
| 1387 |
+
lines used are removed from this list.
|
| 1388 |
+
format_key -- '+' return first line in list with "add" markup around
|
| 1389 |
+
the entire line.
|
| 1390 |
+
'-' return first line in list with "delete" markup around
|
| 1391 |
+
the entire line.
|
| 1392 |
+
'?' return first line in list with add/delete/change
|
| 1393 |
+
intraline markup (indices obtained from second line)
|
| 1394 |
+
None return first line in list with no markup
|
| 1395 |
+
side -- indice into the num_lines list (0=from,1=to)
|
| 1396 |
+
num_lines -- from/to current line number. This is NOT intended to be a
|
| 1397 |
+
passed parameter. It is present as a keyword argument to
|
| 1398 |
+
maintain memory of the current line numbers between calls
|
| 1399 |
+
of this function.
|
| 1400 |
+
|
| 1401 |
+
Note, this function is purposefully not defined at the module scope so
|
| 1402 |
+
that data it needs from its parent function (within whose context it
|
| 1403 |
+
is defined) does not need to be of module scope.
|
| 1404 |
+
"""
|
| 1405 |
+
num_lines[side] += 1
|
| 1406 |
+
# Handle case where no user markup is to be added, just return line of
|
| 1407 |
+
# text with user's line format to allow for usage of the line number.
|
| 1408 |
+
if format_key is None:
|
| 1409 |
+
return (num_lines[side],lines.pop(0)[2:])
|
| 1410 |
+
# Handle case of intraline changes
|
| 1411 |
+
if format_key == '?':
|
| 1412 |
+
text, markers = lines.pop(0), lines.pop(0)
|
| 1413 |
+
# find intraline changes (store change type and indices in tuples)
|
| 1414 |
+
sub_info = []
|
| 1415 |
+
def record_sub_info(match_object,sub_info=sub_info):
|
| 1416 |
+
sub_info.append([match_object.group(1)[0],match_object.span()])
|
| 1417 |
+
return match_object.group(1)
|
| 1418 |
+
change_re.sub(record_sub_info,markers)
|
| 1419 |
+
# process each tuple inserting our special marks that won't be
|
| 1420 |
+
# noticed by an xml/html escaper.
|
| 1421 |
+
for key,(begin,end) in reversed(sub_info):
|
| 1422 |
+
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
|
| 1423 |
+
text = text[2:]
|
| 1424 |
+
# Handle case of add/delete entire line
|
| 1425 |
+
else:
|
| 1426 |
+
text = lines.pop(0)[2:]
|
| 1427 |
+
# if line of text is just a newline, insert a space so there is
|
| 1428 |
+
# something for the user to highlight and see.
|
| 1429 |
+
if not text:
|
| 1430 |
+
text = ' '
|
| 1431 |
+
# insert marks that won't be noticed by an xml/html escaper.
|
| 1432 |
+
text = '\0' + format_key + text + '\1'
|
| 1433 |
+
# Return line of text, first allow user's line formatter to do its
|
| 1434 |
+
# thing (such as adding the line number) then replace the special
|
| 1435 |
+
# marks with what the user's change markup.
|
| 1436 |
+
return (num_lines[side],text)
|
| 1437 |
+
|
| 1438 |
+
def _line_iterator():
|
| 1439 |
+
"""Yields from/to lines of text with a change indication.
|
| 1440 |
+
|
| 1441 |
+
This function is an iterator. It itself pulls lines from a
|
| 1442 |
+
differencing iterator, processes them and yields them. When it can
|
| 1443 |
+
it yields both a "from" and a "to" line, otherwise it will yield one
|
| 1444 |
+
or the other. In addition to yielding the lines of from/to text, a
|
| 1445 |
+
boolean flag is yielded to indicate if the text line(s) have
|
| 1446 |
+
differences in them.
|
| 1447 |
+
|
| 1448 |
+
Note, this function is purposefully not defined at the module scope so
|
| 1449 |
+
that data it needs from its parent function (within whose context it
|
| 1450 |
+
is defined) does not need to be of module scope.
|
| 1451 |
+
"""
|
| 1452 |
+
lines = []
|
| 1453 |
+
num_blanks_pending, num_blanks_to_yield = 0, 0
|
| 1454 |
+
while True:
|
| 1455 |
+
# Load up next 4 lines so we can look ahead, create strings which
|
| 1456 |
+
# are a concatenation of the first character of each of the 4 lines
|
| 1457 |
+
# so we can do some very readable comparisons.
|
| 1458 |
+
while len(lines) < 4:
|
| 1459 |
+
lines.append(next(diff_lines_iterator, 'X'))
|
| 1460 |
+
s = ''.join([line[0] for line in lines])
|
| 1461 |
+
if s.startswith('X'):
|
| 1462 |
+
# When no more lines, pump out any remaining blank lines so the
|
| 1463 |
+
# corresponding add/delete lines get a matching blank line so
|
| 1464 |
+
# all line pairs get yielded at the next level.
|
| 1465 |
+
num_blanks_to_yield = num_blanks_pending
|
| 1466 |
+
elif s.startswith('-?+?'):
|
| 1467 |
+
# simple intraline change
|
| 1468 |
+
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
|
| 1469 |
+
continue
|
| 1470 |
+
elif s.startswith('--++'):
|
| 1471 |
+
# in delete block, add block coming: we do NOT want to get
|
| 1472 |
+
# caught up on blank lines yet, just process the delete line
|
| 1473 |
+
num_blanks_pending -= 1
|
| 1474 |
+
yield _make_line(lines,'-',0), None, True
|
| 1475 |
+
continue
|
| 1476 |
+
elif s.startswith(('--?+', '--+', '- ')):
|
| 1477 |
+
# in delete block and see an intraline change or unchanged line
|
| 1478 |
+
# coming: yield the delete line and then blanks
|
| 1479 |
+
from_line,to_line = _make_line(lines,'-',0), None
|
| 1480 |
+
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
|
| 1481 |
+
elif s.startswith('-+?'):
|
| 1482 |
+
# intraline change
|
| 1483 |
+
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
|
| 1484 |
+
continue
|
| 1485 |
+
elif s.startswith('-?+'):
|
| 1486 |
+
# intraline change
|
| 1487 |
+
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
|
| 1488 |
+
continue
|
| 1489 |
+
elif s.startswith('-'):
|
| 1490 |
+
# delete FROM line
|
| 1491 |
+
num_blanks_pending -= 1
|
| 1492 |
+
yield _make_line(lines,'-',0), None, True
|
| 1493 |
+
continue
|
| 1494 |
+
elif s.startswith('+--'):
|
| 1495 |
+
# in add block, delete block coming: we do NOT want to get
|
| 1496 |
+
# caught up on blank lines yet, just process the add line
|
| 1497 |
+
num_blanks_pending += 1
|
| 1498 |
+
yield None, _make_line(lines,'+',1), True
|
| 1499 |
+
continue
|
| 1500 |
+
elif s.startswith(('+ ', '+-')):
|
| 1501 |
+
# will be leaving an add block: yield blanks then add line
|
| 1502 |
+
from_line, to_line = None, _make_line(lines,'+',1)
|
| 1503 |
+
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
|
| 1504 |
+
elif s.startswith('+'):
|
| 1505 |
+
# inside an add block, yield the add line
|
| 1506 |
+
num_blanks_pending += 1
|
| 1507 |
+
yield None, _make_line(lines,'+',1), True
|
| 1508 |
+
continue
|
| 1509 |
+
elif s.startswith(' '):
|
| 1510 |
+
# unchanged text, yield it to both sides
|
| 1511 |
+
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
|
| 1512 |
+
continue
|
| 1513 |
+
# Catch up on the blank lines so when we yield the next from/to
|
| 1514 |
+
# pair, they are lined up.
|
| 1515 |
+
while(num_blanks_to_yield < 0):
|
| 1516 |
+
num_blanks_to_yield += 1
|
| 1517 |
+
yield None,('','\n'),True
|
| 1518 |
+
while(num_blanks_to_yield > 0):
|
| 1519 |
+
num_blanks_to_yield -= 1
|
| 1520 |
+
yield ('','\n'),None,True
|
| 1521 |
+
if s.startswith('X'):
|
| 1522 |
+
return
|
| 1523 |
+
else:
|
| 1524 |
+
yield from_line,to_line,True
|
| 1525 |
+
|
| 1526 |
+
def _line_pair_iterator():
|
| 1527 |
+
"""Yields from/to lines of text with a change indication.
|
| 1528 |
+
|
| 1529 |
+
This function is an iterator. It itself pulls lines from the line
|
| 1530 |
+
iterator. Its difference from that iterator is that this function
|
| 1531 |
+
always yields a pair of from/to text lines (with the change
|
| 1532 |
+
indication). If necessary it will collect single from/to lines
|
| 1533 |
+
until it has a matching pair from/to pair to yield.
|
| 1534 |
+
|
| 1535 |
+
Note, this function is purposefully not defined at the module scope so
|
| 1536 |
+
that data it needs from its parent function (within whose context it
|
| 1537 |
+
is defined) does not need to be of module scope.
|
| 1538 |
+
"""
|
| 1539 |
+
line_iterator = _line_iterator()
|
| 1540 |
+
fromlines,tolines=[],[]
|
| 1541 |
+
while True:
|
| 1542 |
+
# Collecting lines of text until we have a from/to pair
|
| 1543 |
+
while (len(fromlines)==0 or len(tolines)==0):
|
| 1544 |
+
try:
|
| 1545 |
+
from_line, to_line, found_diff = next(line_iterator)
|
| 1546 |
+
except StopIteration:
|
| 1547 |
+
return
|
| 1548 |
+
if from_line is not None:
|
| 1549 |
+
fromlines.append((from_line,found_diff))
|
| 1550 |
+
if to_line is not None:
|
| 1551 |
+
tolines.append((to_line,found_diff))
|
| 1552 |
+
# Once we have a pair, remove them from the collection and yield it
|
| 1553 |
+
from_line, fromDiff = fromlines.pop(0)
|
| 1554 |
+
to_line, to_diff = tolines.pop(0)
|
| 1555 |
+
yield (from_line,to_line,fromDiff or to_diff)
|
| 1556 |
+
|
| 1557 |
+
# Handle case where user does not want context differencing, just yield
|
| 1558 |
+
# them up without doing anything else with them.
|
| 1559 |
+
line_pair_iterator = _line_pair_iterator()
|
| 1560 |
+
if context is None:
|
| 1561 |
+
yield from line_pair_iterator
|
| 1562 |
+
# Handle case where user wants context differencing. We must do some
|
| 1563 |
+
# storage of lines until we know for sure that they are to be yielded.
|
| 1564 |
+
else:
|
| 1565 |
+
context += 1
|
| 1566 |
+
lines_to_write = 0
|
| 1567 |
+
while True:
|
| 1568 |
+
# Store lines up until we find a difference, note use of a
|
| 1569 |
+
# circular queue because we only need to keep around what
|
| 1570 |
+
# we need for context.
|
| 1571 |
+
index, contextLines = 0, [None]*(context)
|
| 1572 |
+
found_diff = False
|
| 1573 |
+
while(found_diff is False):
|
| 1574 |
+
try:
|
| 1575 |
+
from_line, to_line, found_diff = next(line_pair_iterator)
|
| 1576 |
+
except StopIteration:
|
| 1577 |
+
return
|
| 1578 |
+
i = index % context
|
| 1579 |
+
contextLines[i] = (from_line, to_line, found_diff)
|
| 1580 |
+
index += 1
|
| 1581 |
+
# Yield lines that we have collected so far, but first yield
|
| 1582 |
+
# the user's separator.
|
| 1583 |
+
if index > context:
|
| 1584 |
+
yield None, None, None
|
| 1585 |
+
lines_to_write = context
|
| 1586 |
+
else:
|
| 1587 |
+
lines_to_write = index
|
| 1588 |
+
index = 0
|
| 1589 |
+
while(lines_to_write):
|
| 1590 |
+
i = index % context
|
| 1591 |
+
index += 1
|
| 1592 |
+
yield contextLines[i]
|
| 1593 |
+
lines_to_write -= 1
|
| 1594 |
+
# Now yield the context lines after the change
|
| 1595 |
+
lines_to_write = context-1
|
| 1596 |
+
try:
|
| 1597 |
+
while(lines_to_write):
|
| 1598 |
+
from_line, to_line, found_diff = next(line_pair_iterator)
|
| 1599 |
+
# If another change within the context, extend the context
|
| 1600 |
+
if found_diff:
|
| 1601 |
+
lines_to_write = context-1
|
| 1602 |
+
else:
|
| 1603 |
+
lines_to_write -= 1
|
| 1604 |
+
yield from_line, to_line, found_diff
|
| 1605 |
+
except StopIteration:
|
| 1606 |
+
# Catch exception from next() and return normally
|
| 1607 |
+
return
|
| 1608 |
+
|
| 1609 |
+
|
| 1610 |
+
_file_template = """
|
| 1611 |
+
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
| 1612 |
+
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
| 1613 |
+
|
| 1614 |
+
<html>
|
| 1615 |
+
|
| 1616 |
+
<head>
|
| 1617 |
+
<meta http-equiv="Content-Type"
|
| 1618 |
+
content="text/html; charset=%(charset)s" />
|
| 1619 |
+
<title></title>
|
| 1620 |
+
<style type="text/css">%(styles)s
|
| 1621 |
+
</style>
|
| 1622 |
+
</head>
|
| 1623 |
+
|
| 1624 |
+
<body>
|
| 1625 |
+
%(table)s%(legend)s
|
| 1626 |
+
</body>
|
| 1627 |
+
|
| 1628 |
+
</html>"""
|
| 1629 |
+
|
| 1630 |
+
_styles = """
|
| 1631 |
+
table.diff {font-family:Courier; border:medium;}
|
| 1632 |
+
.diff_header {background-color:#e0e0e0}
|
| 1633 |
+
td.diff_header {text-align:right}
|
| 1634 |
+
.diff_next {background-color:#c0c0c0}
|
| 1635 |
+
.diff_add {background-color:#aaffaa}
|
| 1636 |
+
.diff_chg {background-color:#ffff77}
|
| 1637 |
+
.diff_sub {background-color:#ffaaaa}"""
|
| 1638 |
+
|
| 1639 |
+
_table_template = """
|
| 1640 |
+
<table class="diff" id="difflib_chg_%(prefix)s_top"
|
| 1641 |
+
cellspacing="0" cellpadding="0" rules="groups" >
|
| 1642 |
+
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
|
| 1643 |
+
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
|
| 1644 |
+
%(header_row)s
|
| 1645 |
+
<tbody>
|
| 1646 |
+
%(data_rows)s </tbody>
|
| 1647 |
+
</table>"""
|
| 1648 |
+
|
| 1649 |
+
_legend = """
|
| 1650 |
+
<table class="diff" summary="Legends">
|
| 1651 |
+
<tr> <th colspan="2"> Legends </th> </tr>
|
| 1652 |
+
<tr> <td> <table border="" summary="Colors">
|
| 1653 |
+
<tr><th> Colors </th> </tr>
|
| 1654 |
+
<tr><td class="diff_add"> Added </td></tr>
|
| 1655 |
+
<tr><td class="diff_chg">Changed</td> </tr>
|
| 1656 |
+
<tr><td class="diff_sub">Deleted</td> </tr>
|
| 1657 |
+
</table></td>
|
| 1658 |
+
<td> <table border="" summary="Links">
|
| 1659 |
+
<tr><th colspan="2"> Links </th> </tr>
|
| 1660 |
+
<tr><td>(f)irst change</td> </tr>
|
| 1661 |
+
<tr><td>(n)ext change</td> </tr>
|
| 1662 |
+
<tr><td>(t)op</td> </tr>
|
| 1663 |
+
</table></td> </tr>
|
| 1664 |
+
</table>"""
|
| 1665 |
+
|
| 1666 |
+
class HtmlDiff(object):
|
| 1667 |
+
"""For producing HTML side by side comparison with change highlights.
|
| 1668 |
+
|
| 1669 |
+
This class can be used to create an HTML table (or a complete HTML file
|
| 1670 |
+
containing the table) showing a side by side, line by line comparison
|
| 1671 |
+
of text with inter-line and intra-line change highlights. The table can
|
| 1672 |
+
be generated in either full or contextual difference mode.
|
| 1673 |
+
|
| 1674 |
+
The following methods are provided for HTML generation:
|
| 1675 |
+
|
| 1676 |
+
make_table -- generates HTML for a single side by side table
|
| 1677 |
+
make_file -- generates complete HTML file with a single side by side table
|
| 1678 |
+
|
| 1679 |
+
See tools/scripts/diff.py for an example usage of this class.
|
| 1680 |
+
"""
|
| 1681 |
+
|
| 1682 |
+
_file_template = _file_template
|
| 1683 |
+
_styles = _styles
|
| 1684 |
+
_table_template = _table_template
|
| 1685 |
+
_legend = _legend
|
| 1686 |
+
_default_prefix = 0
|
| 1687 |
+
|
| 1688 |
+
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
|
| 1689 |
+
charjunk=IS_CHARACTER_JUNK):
|
| 1690 |
+
"""HtmlDiff instance initializer
|
| 1691 |
+
|
| 1692 |
+
Arguments:
|
| 1693 |
+
tabsize -- tab stop spacing, defaults to 8.
|
| 1694 |
+
wrapcolumn -- column number where lines are broken and wrapped,
|
| 1695 |
+
defaults to None where lines are not wrapped.
|
| 1696 |
+
linejunk,charjunk -- keyword arguments passed into ndiff() (used by
|
| 1697 |
+
HtmlDiff() to generate the side by side HTML differences). See
|
| 1698 |
+
ndiff() documentation for argument default values and descriptions.
|
| 1699 |
+
"""
|
| 1700 |
+
self._tabsize = tabsize
|
| 1701 |
+
self._wrapcolumn = wrapcolumn
|
| 1702 |
+
self._linejunk = linejunk
|
| 1703 |
+
self._charjunk = charjunk
|
| 1704 |
+
|
| 1705 |
+
def make_file(self, fromlines, tolines, fromdesc='', todesc='',
|
| 1706 |
+
context=False, numlines=5, *, charset='utf-8'):
|
| 1707 |
+
"""Returns HTML file of side by side comparison with change highlights
|
| 1708 |
+
|
| 1709 |
+
Arguments:
|
| 1710 |
+
fromlines -- list of "from" lines
|
| 1711 |
+
tolines -- list of "to" lines
|
| 1712 |
+
fromdesc -- "from" file column header string
|
| 1713 |
+
todesc -- "to" file column header string
|
| 1714 |
+
context -- set to True for contextual differences (defaults to False
|
| 1715 |
+
which shows full differences).
|
| 1716 |
+
numlines -- number of context lines. When context is set True,
|
| 1717 |
+
controls number of lines displayed before and after the change.
|
| 1718 |
+
When context is False, controls the number of lines to place
|
| 1719 |
+
the "next" link anchors before the next change (so click of
|
| 1720 |
+
"next" link jumps to just before the change).
|
| 1721 |
+
charset -- charset of the HTML document
|
| 1722 |
+
"""
|
| 1723 |
+
|
| 1724 |
+
return (self._file_template % dict(
|
| 1725 |
+
styles=self._styles,
|
| 1726 |
+
legend=self._legend,
|
| 1727 |
+
table=self.make_table(fromlines, tolines, fromdesc, todesc,
|
| 1728 |
+
context=context, numlines=numlines),
|
| 1729 |
+
charset=charset
|
| 1730 |
+
)).encode(charset, 'xmlcharrefreplace').decode(charset)
|
| 1731 |
+
|
| 1732 |
+
def _tab_newline_replace(self,fromlines,tolines):
|
| 1733 |
+
"""Returns from/to line lists with tabs expanded and newlines removed.
|
| 1734 |
+
|
| 1735 |
+
Instead of tab characters being replaced by the number of spaces
|
| 1736 |
+
needed to fill in to the next tab stop, this function will fill
|
| 1737 |
+
the space with tab characters. This is done so that the difference
|
| 1738 |
+
algorithms can identify changes in a file when tabs are replaced by
|
| 1739 |
+
spaces and vice versa. At the end of the HTML generation, the tab
|
| 1740 |
+
characters will be replaced with a nonbreakable space.
|
| 1741 |
+
"""
|
| 1742 |
+
def expand_tabs(line):
|
| 1743 |
+
# hide real spaces
|
| 1744 |
+
line = line.replace(' ','\0')
|
| 1745 |
+
# expand tabs into spaces
|
| 1746 |
+
line = line.expandtabs(self._tabsize)
|
| 1747 |
+
# replace spaces from expanded tabs back into tab characters
|
| 1748 |
+
# (we'll replace them with markup after we do differencing)
|
| 1749 |
+
line = line.replace(' ','\t')
|
| 1750 |
+
return line.replace('\0',' ').rstrip('\n')
|
| 1751 |
+
fromlines = [expand_tabs(line) for line in fromlines]
|
| 1752 |
+
tolines = [expand_tabs(line) for line in tolines]
|
| 1753 |
+
return fromlines,tolines
|
| 1754 |
+
|
| 1755 |
+
def _split_line(self,data_list,line_num,text):
|
| 1756 |
+
"""Builds list of text lines by splitting text lines at wrap point
|
| 1757 |
+
|
| 1758 |
+
This function will determine if the input text line needs to be
|
| 1759 |
+
wrapped (split) into separate lines. If so, the first wrap point
|
| 1760 |
+
will be determined and the first line appended to the output
|
| 1761 |
+
text line list. This function is used recursively to handle
|
| 1762 |
+
the second part of the split line to further split it.
|
| 1763 |
+
"""
|
| 1764 |
+
# if blank line or context separator, just add it to the output list
|
| 1765 |
+
if not line_num:
|
| 1766 |
+
data_list.append((line_num,text))
|
| 1767 |
+
return
|
| 1768 |
+
|
| 1769 |
+
# if line text doesn't need wrapping, just add it to the output list
|
| 1770 |
+
size = len(text)
|
| 1771 |
+
max = self._wrapcolumn
|
| 1772 |
+
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
|
| 1773 |
+
data_list.append((line_num,text))
|
| 1774 |
+
return
|
| 1775 |
+
|
| 1776 |
+
# scan text looking for the wrap point, keeping track if the wrap
|
| 1777 |
+
# point is inside markers
|
| 1778 |
+
i = 0
|
| 1779 |
+
n = 0
|
| 1780 |
+
mark = ''
|
| 1781 |
+
while n < max and i < size:
|
| 1782 |
+
if text[i] == '\0':
|
| 1783 |
+
i += 1
|
| 1784 |
+
mark = text[i]
|
| 1785 |
+
i += 1
|
| 1786 |
+
elif text[i] == '\1':
|
| 1787 |
+
i += 1
|
| 1788 |
+
mark = ''
|
| 1789 |
+
else:
|
| 1790 |
+
i += 1
|
| 1791 |
+
n += 1
|
| 1792 |
+
|
| 1793 |
+
# wrap point is inside text, break it up into separate lines
|
| 1794 |
+
line1 = text[:i]
|
| 1795 |
+
line2 = text[i:]
|
| 1796 |
+
|
| 1797 |
+
# if wrap point is inside markers, place end marker at end of first
|
| 1798 |
+
# line and start marker at beginning of second line because each
|
| 1799 |
+
# line will have its own table tag markup around it.
|
| 1800 |
+
if mark:
|
| 1801 |
+
line1 = line1 + '\1'
|
| 1802 |
+
line2 = '\0' + mark + line2
|
| 1803 |
+
|
| 1804 |
+
# tack on first line onto the output list
|
| 1805 |
+
data_list.append((line_num,line1))
|
| 1806 |
+
|
| 1807 |
+
# use this routine again to wrap the remaining text
|
| 1808 |
+
self._split_line(data_list,'>',line2)
|
| 1809 |
+
|
| 1810 |
+
def _line_wrapper(self,diffs):
|
| 1811 |
+
"""Returns iterator that splits (wraps) mdiff text lines"""
|
| 1812 |
+
|
| 1813 |
+
# pull from/to data and flags from mdiff iterator
|
| 1814 |
+
for fromdata,todata,flag in diffs:
|
| 1815 |
+
# check for context separators and pass them through
|
| 1816 |
+
if flag is None:
|
| 1817 |
+
yield fromdata,todata,flag
|
| 1818 |
+
continue
|
| 1819 |
+
(fromline,fromtext),(toline,totext) = fromdata,todata
|
| 1820 |
+
# for each from/to line split it at the wrap column to form
|
| 1821 |
+
# list of text lines.
|
| 1822 |
+
fromlist,tolist = [],[]
|
| 1823 |
+
self._split_line(fromlist,fromline,fromtext)
|
| 1824 |
+
self._split_line(tolist,toline,totext)
|
| 1825 |
+
# yield from/to line in pairs inserting blank lines as
|
| 1826 |
+
# necessary when one side has more wrapped lines
|
| 1827 |
+
while fromlist or tolist:
|
| 1828 |
+
if fromlist:
|
| 1829 |
+
fromdata = fromlist.pop(0)
|
| 1830 |
+
else:
|
| 1831 |
+
fromdata = ('',' ')
|
| 1832 |
+
if tolist:
|
| 1833 |
+
todata = tolist.pop(0)
|
| 1834 |
+
else:
|
| 1835 |
+
todata = ('',' ')
|
| 1836 |
+
yield fromdata,todata,flag
|
| 1837 |
+
|
| 1838 |
+
def _collect_lines(self,diffs):
|
| 1839 |
+
"""Collects mdiff output into separate lists
|
| 1840 |
+
|
| 1841 |
+
Before storing the mdiff from/to data into a list, it is converted
|
| 1842 |
+
into a single line of text with HTML markup.
|
| 1843 |
+
"""
|
| 1844 |
+
|
| 1845 |
+
fromlist,tolist,flaglist = [],[],[]
|
| 1846 |
+
# pull from/to data and flags from mdiff style iterator
|
| 1847 |
+
for fromdata,todata,flag in diffs:
|
| 1848 |
+
try:
|
| 1849 |
+
# store HTML markup of the lines into the lists
|
| 1850 |
+
fromlist.append(self._format_line(0,flag,*fromdata))
|
| 1851 |
+
tolist.append(self._format_line(1,flag,*todata))
|
| 1852 |
+
except TypeError:
|
| 1853 |
+
# exceptions occur for lines where context separators go
|
| 1854 |
+
fromlist.append(None)
|
| 1855 |
+
tolist.append(None)
|
| 1856 |
+
flaglist.append(flag)
|
| 1857 |
+
return fromlist,tolist,flaglist
|
| 1858 |
+
|
| 1859 |
+
def _format_line(self,side,flag,linenum,text):
|
| 1860 |
+
"""Returns HTML markup of "from" / "to" text lines
|
| 1861 |
+
|
| 1862 |
+
side -- 0 or 1 indicating "from" or "to" text
|
| 1863 |
+
flag -- indicates if difference on line
|
| 1864 |
+
linenum -- line number (used for line number column)
|
| 1865 |
+
text -- line text to be marked up
|
| 1866 |
+
"""
|
| 1867 |
+
try:
|
| 1868 |
+
linenum = '%d' % linenum
|
| 1869 |
+
id = ' id="%s%s"' % (self._prefix[side],linenum)
|
| 1870 |
+
except TypeError:
|
| 1871 |
+
# handle blank lines where linenum is '>' or ''
|
| 1872 |
+
id = ''
|
| 1873 |
+
# replace those things that would get confused with HTML symbols
|
| 1874 |
+
text=text.replace("&","&").replace(">",">").replace("<","<")
|
| 1875 |
+
|
| 1876 |
+
# make space non-breakable so they don't get compressed or line wrapped
|
| 1877 |
+
text = text.replace(' ',' ').rstrip()
|
| 1878 |
+
|
| 1879 |
+
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
|
| 1880 |
+
% (id,linenum,text)
|
| 1881 |
+
|
| 1882 |
+
def _make_prefix(self):
|
| 1883 |
+
"""Create unique anchor prefixes"""
|
| 1884 |
+
|
| 1885 |
+
# Generate a unique anchor prefix so multiple tables
|
| 1886 |
+
# can exist on the same HTML page without conflicts.
|
| 1887 |
+
fromprefix = "from%d_" % HtmlDiff._default_prefix
|
| 1888 |
+
toprefix = "to%d_" % HtmlDiff._default_prefix
|
| 1889 |
+
HtmlDiff._default_prefix += 1
|
| 1890 |
+
# store prefixes so line format method has access
|
| 1891 |
+
self._prefix = [fromprefix,toprefix]
|
| 1892 |
+
|
| 1893 |
+
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
|
| 1894 |
+
"""Makes list of "next" links"""
|
| 1895 |
+
|
| 1896 |
+
# all anchor names will be generated using the unique "to" prefix
|
| 1897 |
+
toprefix = self._prefix[1]
|
| 1898 |
+
|
| 1899 |
+
# process change flags, generating middle column of next anchors/links
|
| 1900 |
+
next_id = ['']*len(flaglist)
|
| 1901 |
+
next_href = ['']*len(flaglist)
|
| 1902 |
+
num_chg, in_change = 0, False
|
| 1903 |
+
last = 0
|
| 1904 |
+
for i,flag in enumerate(flaglist):
|
| 1905 |
+
if flag:
|
| 1906 |
+
if not in_change:
|
| 1907 |
+
in_change = True
|
| 1908 |
+
last = i
|
| 1909 |
+
# at the beginning of a change, drop an anchor a few lines
|
| 1910 |
+
# (the context lines) before the change for the previous
|
| 1911 |
+
# link
|
| 1912 |
+
i = max([0,i-numlines])
|
| 1913 |
+
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
|
| 1914 |
+
# at the beginning of a change, drop a link to the next
|
| 1915 |
+
# change
|
| 1916 |
+
num_chg += 1
|
| 1917 |
+
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
|
| 1918 |
+
toprefix,num_chg)
|
| 1919 |
+
else:
|
| 1920 |
+
in_change = False
|
| 1921 |
+
# check for cases where there is no content to avoid exceptions
|
| 1922 |
+
if not flaglist:
|
| 1923 |
+
flaglist = [False]
|
| 1924 |
+
next_id = ['']
|
| 1925 |
+
next_href = ['']
|
| 1926 |
+
last = 0
|
| 1927 |
+
if context:
|
| 1928 |
+
fromlist = ['<td></td><td> No Differences Found </td>']
|
| 1929 |
+
tolist = fromlist
|
| 1930 |
+
else:
|
| 1931 |
+
fromlist = tolist = ['<td></td><td> Empty File </td>']
|
| 1932 |
+
# if not a change on first line, drop a link
|
| 1933 |
+
if not flaglist[0]:
|
| 1934 |
+
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
|
| 1935 |
+
# redo the last link to link to the top
|
| 1936 |
+
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
|
| 1937 |
+
|
| 1938 |
+
return fromlist,tolist,flaglist,next_href,next_id
|
| 1939 |
+
|
| 1940 |
+
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
|
| 1941 |
+
numlines=5):
|
| 1942 |
+
"""Returns HTML table of side by side comparison with change highlights
|
| 1943 |
+
|
| 1944 |
+
Arguments:
|
| 1945 |
+
fromlines -- list of "from" lines
|
| 1946 |
+
tolines -- list of "to" lines
|
| 1947 |
+
fromdesc -- "from" file column header string
|
| 1948 |
+
todesc -- "to" file column header string
|
| 1949 |
+
context -- set to True for contextual differences (defaults to False
|
| 1950 |
+
which shows full differences).
|
| 1951 |
+
numlines -- number of context lines. When context is set True,
|
| 1952 |
+
controls number of lines displayed before and after the change.
|
| 1953 |
+
When context is False, controls the number of lines to place
|
| 1954 |
+
the "next" link anchors before the next change (so click of
|
| 1955 |
+
"next" link jumps to just before the change).
|
| 1956 |
+
"""
|
| 1957 |
+
|
| 1958 |
+
# make unique anchor prefixes so that multiple tables may exist
|
| 1959 |
+
# on the same page without conflict.
|
| 1960 |
+
self._make_prefix()
|
| 1961 |
+
|
| 1962 |
+
# change tabs to spaces before it gets more difficult after we insert
|
| 1963 |
+
# markup
|
| 1964 |
+
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
|
| 1965 |
+
|
| 1966 |
+
# create diffs iterator which generates side by side from/to data
|
| 1967 |
+
if context:
|
| 1968 |
+
context_lines = numlines
|
| 1969 |
+
else:
|
| 1970 |
+
context_lines = None
|
| 1971 |
+
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
|
| 1972 |
+
charjunk=self._charjunk)
|
| 1973 |
+
|
| 1974 |
+
# set up iterator to wrap lines that exceed desired width
|
| 1975 |
+
if self._wrapcolumn:
|
| 1976 |
+
diffs = self._line_wrapper(diffs)
|
| 1977 |
+
|
| 1978 |
+
# collect up from/to lines and flags into lists (also format the lines)
|
| 1979 |
+
fromlist,tolist,flaglist = self._collect_lines(diffs)
|
| 1980 |
+
|
| 1981 |
+
# process change flags, generating middle column of next anchors/links
|
| 1982 |
+
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
|
| 1983 |
+
fromlist,tolist,flaglist,context,numlines)
|
| 1984 |
+
|
| 1985 |
+
s = []
|
| 1986 |
+
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
|
| 1987 |
+
'<td class="diff_next">%s</td>%s</tr>\n'
|
| 1988 |
+
for i in range(len(flaglist)):
|
| 1989 |
+
if flaglist[i] is None:
|
| 1990 |
+
# mdiff yields None on separator lines skip the bogus ones
|
| 1991 |
+
# generated for the first line
|
| 1992 |
+
if i > 0:
|
| 1993 |
+
s.append(' </tbody> \n <tbody>\n')
|
| 1994 |
+
else:
|
| 1995 |
+
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
|
| 1996 |
+
next_href[i],tolist[i]))
|
| 1997 |
+
if fromdesc or todesc:
|
| 1998 |
+
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
|
| 1999 |
+
'<th class="diff_next"><br /></th>',
|
| 2000 |
+
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
|
| 2001 |
+
'<th class="diff_next"><br /></th>',
|
| 2002 |
+
'<th colspan="2" class="diff_header">%s</th>' % todesc)
|
| 2003 |
+
else:
|
| 2004 |
+
header_row = ''
|
| 2005 |
+
|
| 2006 |
+
table = self._table_template % dict(
|
| 2007 |
+
data_rows=''.join(s),
|
| 2008 |
+
header_row=header_row,
|
| 2009 |
+
prefix=self._prefix[1])
|
| 2010 |
+
|
| 2011 |
+
return table.replace('\0+','<span class="diff_add">'). \
|
| 2012 |
+
replace('\0-','<span class="diff_sub">'). \
|
| 2013 |
+
replace('\0^','<span class="diff_chg">'). \
|
| 2014 |
+
replace('\1','</span>'). \
|
| 2015 |
+
replace('\t',' ')
|
| 2016 |
+
|
| 2017 |
+
del re
|
| 2018 |
+
|
| 2019 |
+
def restore(delta, which):
|
| 2020 |
+
r"""
|
| 2021 |
+
Generate one of the two sequences that generated a delta.
|
| 2022 |
+
|
| 2023 |
+
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
|
| 2024 |
+
lines originating from file 1 or 2 (parameter `which`), stripping off line
|
| 2025 |
+
prefixes.
|
| 2026 |
+
|
| 2027 |
+
Examples:
|
| 2028 |
+
|
| 2029 |
+
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
|
| 2030 |
+
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
|
| 2031 |
+
>>> diff = list(diff)
|
| 2032 |
+
>>> print(''.join(restore(diff, 1)), end="")
|
| 2033 |
+
one
|
| 2034 |
+
two
|
| 2035 |
+
three
|
| 2036 |
+
>>> print(''.join(restore(diff, 2)), end="")
|
| 2037 |
+
ore
|
| 2038 |
+
tree
|
| 2039 |
+
emu
|
| 2040 |
+
"""
|
| 2041 |
+
try:
|
| 2042 |
+
tag = {1: "- ", 2: "+ "}[int(which)]
|
| 2043 |
+
except KeyError:
|
| 2044 |
+
raise ValueError('unknown delta choice (must be 1 or 2): %r'
|
| 2045 |
+
% which) from None
|
| 2046 |
+
prefixes = (" ", tag)
|
| 2047 |
+
for line in delta:
|
| 2048 |
+
if line[:2] in prefixes:
|
| 2049 |
+
yield line[2:]
|
| 2050 |
+
|
| 2051 |
+
def _test():
|
| 2052 |
+
import doctest, difflib
|
| 2053 |
+
return doctest.testmod(difflib)
|
| 2054 |
+
|
| 2055 |
+
if __name__ == "__main__":
|
| 2056 |
+
_test()
|
parrot/lib/python3.10/distutils/debug.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
# If DISTUTILS_DEBUG is anything other than the empty string, we run in
|
| 4 |
+
# debug mode.
|
| 5 |
+
DEBUG = os.environ.get('DISTUTILS_DEBUG')
|
parrot/lib/python3.10/distutils/dist.py
ADDED
|
@@ -0,0 +1,1256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.dist
|
| 2 |
+
|
| 3 |
+
Provides the Distribution class, which represents the module distribution
|
| 4 |
+
being built/installed/distributed.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
from email import message_from_file
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import warnings
|
| 14 |
+
except ImportError:
|
| 15 |
+
warnings = None
|
| 16 |
+
|
| 17 |
+
from distutils.errors import *
|
| 18 |
+
from distutils.fancy_getopt import FancyGetopt, translate_longopt
|
| 19 |
+
from distutils.util import check_environ, strtobool, rfc822_escape
|
| 20 |
+
from distutils import log
|
| 21 |
+
from distutils.debug import DEBUG
|
| 22 |
+
|
| 23 |
+
# Regex to define acceptable Distutils command names. This is not *quite*
|
| 24 |
+
# the same as a Python NAME -- I don't allow leading underscores. The fact
|
| 25 |
+
# that they're very similar is no coincidence; the default naming scheme is
|
| 26 |
+
# to look for a Python module named after the command.
|
| 27 |
+
command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _ensure_list(value, fieldname):
|
| 31 |
+
if isinstance(value, str):
|
| 32 |
+
# a string containing comma separated values is okay. It will
|
| 33 |
+
# be converted to a list by Distribution.finalize_options().
|
| 34 |
+
pass
|
| 35 |
+
elif not isinstance(value, list):
|
| 36 |
+
# passing a tuple or an iterator perhaps, warn and convert
|
| 37 |
+
typename = type(value).__name__
|
| 38 |
+
msg = f"Warning: '{fieldname}' should be a list, got type '{typename}'"
|
| 39 |
+
log.log(log.WARN, msg)
|
| 40 |
+
value = list(value)
|
| 41 |
+
return value
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Distribution:
|
| 45 |
+
"""The core of the Distutils. Most of the work hiding behind 'setup'
|
| 46 |
+
is really done within a Distribution instance, which farms the work out
|
| 47 |
+
to the Distutils commands specified on the command line.
|
| 48 |
+
|
| 49 |
+
Setup scripts will almost never instantiate Distribution directly,
|
| 50 |
+
unless the 'setup()' function is totally inadequate to their needs.
|
| 51 |
+
However, it is conceivable that a setup script might wish to subclass
|
| 52 |
+
Distribution for some specialized purpose, and then pass the subclass
|
| 53 |
+
to 'setup()' as the 'distclass' keyword argument. If so, it is
|
| 54 |
+
necessary to respect the expectations that 'setup' has of Distribution.
|
| 55 |
+
See the code for 'setup()', in core.py, for details.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
# 'global_options' describes the command-line options that may be
|
| 59 |
+
# supplied to the setup script prior to any actual commands.
|
| 60 |
+
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
|
| 61 |
+
# these global options. This list should be kept to a bare minimum,
|
| 62 |
+
# since every global option is also valid as a command option -- and we
|
| 63 |
+
# don't want to pollute the commands with too many options that they
|
| 64 |
+
# have minimal control over.
|
| 65 |
+
# The fourth entry for verbose means that it can be repeated.
|
| 66 |
+
global_options = [
|
| 67 |
+
('verbose', 'v', "run verbosely (default)", 1),
|
| 68 |
+
('quiet', 'q', "run quietly (turns verbosity off)"),
|
| 69 |
+
('dry-run', 'n', "don't actually do anything"),
|
| 70 |
+
('help', 'h', "show detailed help message"),
|
| 71 |
+
('no-user-cfg', None,
|
| 72 |
+
'ignore pydistutils.cfg in your home directory'),
|
| 73 |
+
]
|
| 74 |
+
|
| 75 |
+
# 'common_usage' is a short (2-3 line) string describing the common
|
| 76 |
+
# usage of the setup script.
|
| 77 |
+
common_usage = """\
|
| 78 |
+
Common commands: (see '--help-commands' for more)
|
| 79 |
+
|
| 80 |
+
setup.py build will build the package underneath 'build/'
|
| 81 |
+
setup.py install will install the package
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
# options that are not propagated to the commands
|
| 85 |
+
display_options = [
|
| 86 |
+
('help-commands', None,
|
| 87 |
+
"list all available commands"),
|
| 88 |
+
('name', None,
|
| 89 |
+
"print package name"),
|
| 90 |
+
('version', 'V',
|
| 91 |
+
"print package version"),
|
| 92 |
+
('fullname', None,
|
| 93 |
+
"print <package name>-<version>"),
|
| 94 |
+
('author', None,
|
| 95 |
+
"print the author's name"),
|
| 96 |
+
('author-email', None,
|
| 97 |
+
"print the author's email address"),
|
| 98 |
+
('maintainer', None,
|
| 99 |
+
"print the maintainer's name"),
|
| 100 |
+
('maintainer-email', None,
|
| 101 |
+
"print the maintainer's email address"),
|
| 102 |
+
('contact', None,
|
| 103 |
+
"print the maintainer's name if known, else the author's"),
|
| 104 |
+
('contact-email', None,
|
| 105 |
+
"print the maintainer's email address if known, else the author's"),
|
| 106 |
+
('url', None,
|
| 107 |
+
"print the URL for this package"),
|
| 108 |
+
('license', None,
|
| 109 |
+
"print the license of the package"),
|
| 110 |
+
('licence', None,
|
| 111 |
+
"alias for --license"),
|
| 112 |
+
('description', None,
|
| 113 |
+
"print the package description"),
|
| 114 |
+
('long-description', None,
|
| 115 |
+
"print the long package description"),
|
| 116 |
+
('platforms', None,
|
| 117 |
+
"print the list of platforms"),
|
| 118 |
+
('classifiers', None,
|
| 119 |
+
"print the list of classifiers"),
|
| 120 |
+
('keywords', None,
|
| 121 |
+
"print the list of keywords"),
|
| 122 |
+
('provides', None,
|
| 123 |
+
"print the list of packages/modules provided"),
|
| 124 |
+
('requires', None,
|
| 125 |
+
"print the list of packages/modules required"),
|
| 126 |
+
('obsoletes', None,
|
| 127 |
+
"print the list of packages/modules made obsolete")
|
| 128 |
+
]
|
| 129 |
+
display_option_names = [translate_longopt(x[0]) for x in display_options]
|
| 130 |
+
|
| 131 |
+
# negative options are options that exclude other options
|
| 132 |
+
negative_opt = {'quiet': 'verbose'}
|
| 133 |
+
|
| 134 |
+
# -- Creation/initialization methods -------------------------------
|
| 135 |
+
|
| 136 |
+
def __init__(self, attrs=None):
|
| 137 |
+
"""Construct a new Distribution instance: initialize all the
|
| 138 |
+
attributes of a Distribution, and then use 'attrs' (a dictionary
|
| 139 |
+
mapping attribute names to values) to assign some of those
|
| 140 |
+
attributes their "real" values. (Any attributes not mentioned in
|
| 141 |
+
'attrs' will be assigned to some null value: 0, None, an empty list
|
| 142 |
+
or dictionary, etc.) Most importantly, initialize the
|
| 143 |
+
'command_obj' attribute to the empty dictionary; this will be
|
| 144 |
+
filled in with real command objects by 'parse_command_line()'.
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
# Default values for our command-line options
|
| 148 |
+
self.verbose = 1
|
| 149 |
+
self.dry_run = 0
|
| 150 |
+
self.help = 0
|
| 151 |
+
for attr in self.display_option_names:
|
| 152 |
+
setattr(self, attr, 0)
|
| 153 |
+
|
| 154 |
+
# Store the distribution meta-data (name, version, author, and so
|
| 155 |
+
# forth) in a separate object -- we're getting to have enough
|
| 156 |
+
# information here (and enough command-line options) that it's
|
| 157 |
+
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
|
| 158 |
+
# object in a sneaky and underhanded (but efficient!) way.
|
| 159 |
+
self.metadata = DistributionMetadata()
|
| 160 |
+
for basename in self.metadata._METHOD_BASENAMES:
|
| 161 |
+
method_name = "get_" + basename
|
| 162 |
+
setattr(self, method_name, getattr(self.metadata, method_name))
|
| 163 |
+
|
| 164 |
+
# 'cmdclass' maps command names to class objects, so we
|
| 165 |
+
# can 1) quickly figure out which class to instantiate when
|
| 166 |
+
# we need to create a new command object, and 2) have a way
|
| 167 |
+
# for the setup script to override command classes
|
| 168 |
+
self.cmdclass = {}
|
| 169 |
+
|
| 170 |
+
# 'command_packages' is a list of packages in which commands
|
| 171 |
+
# are searched for. The factory for command 'foo' is expected
|
| 172 |
+
# to be named 'foo' in the module 'foo' in one of the packages
|
| 173 |
+
# named here. This list is searched from the left; an error
|
| 174 |
+
# is raised if no named package provides the command being
|
| 175 |
+
# searched for. (Always access using get_command_packages().)
|
| 176 |
+
self.command_packages = None
|
| 177 |
+
|
| 178 |
+
# 'script_name' and 'script_args' are usually set to sys.argv[0]
|
| 179 |
+
# and sys.argv[1:], but they can be overridden when the caller is
|
| 180 |
+
# not necessarily a setup script run from the command-line.
|
| 181 |
+
self.script_name = None
|
| 182 |
+
self.script_args = None
|
| 183 |
+
|
| 184 |
+
# 'command_options' is where we store command options between
|
| 185 |
+
# parsing them (from config files, the command-line, etc.) and when
|
| 186 |
+
# they are actually needed -- ie. when the command in question is
|
| 187 |
+
# instantiated. It is a dictionary of dictionaries of 2-tuples:
|
| 188 |
+
# command_options = { command_name : { option : (source, value) } }
|
| 189 |
+
self.command_options = {}
|
| 190 |
+
|
| 191 |
+
# 'dist_files' is the list of (command, pyversion, file) that
|
| 192 |
+
# have been created by any dist commands run so far. This is
|
| 193 |
+
# filled regardless of whether the run is dry or not. pyversion
|
| 194 |
+
# gives sysconfig.get_python_version() if the dist file is
|
| 195 |
+
# specific to a Python version, 'any' if it is good for all
|
| 196 |
+
# Python versions on the target platform, and '' for a source
|
| 197 |
+
# file. pyversion should not be used to specify minimum or
|
| 198 |
+
# maximum required Python versions; use the metainfo for that
|
| 199 |
+
# instead.
|
| 200 |
+
self.dist_files = []
|
| 201 |
+
|
| 202 |
+
# These options are really the business of various commands, rather
|
| 203 |
+
# than of the Distribution itself. We provide aliases for them in
|
| 204 |
+
# Distribution as a convenience to the developer.
|
| 205 |
+
self.packages = None
|
| 206 |
+
self.package_data = {}
|
| 207 |
+
self.package_dir = None
|
| 208 |
+
self.py_modules = None
|
| 209 |
+
self.libraries = None
|
| 210 |
+
self.headers = None
|
| 211 |
+
self.ext_modules = None
|
| 212 |
+
self.ext_package = None
|
| 213 |
+
self.include_dirs = None
|
| 214 |
+
self.extra_path = None
|
| 215 |
+
self.scripts = None
|
| 216 |
+
self.data_files = None
|
| 217 |
+
self.password = ''
|
| 218 |
+
|
| 219 |
+
# And now initialize bookkeeping stuff that can't be supplied by
|
| 220 |
+
# the caller at all. 'command_obj' maps command names to
|
| 221 |
+
# Command instances -- that's how we enforce that every command
|
| 222 |
+
# class is a singleton.
|
| 223 |
+
self.command_obj = {}
|
| 224 |
+
|
| 225 |
+
# 'have_run' maps command names to boolean values; it keeps track
|
| 226 |
+
# of whether we have actually run a particular command, to make it
|
| 227 |
+
# cheap to "run" a command whenever we think we might need to -- if
|
| 228 |
+
# it's already been done, no need for expensive filesystem
|
| 229 |
+
# operations, we just check the 'have_run' dictionary and carry on.
|
| 230 |
+
# It's only safe to query 'have_run' for a command class that has
|
| 231 |
+
# been instantiated -- a false value will be inserted when the
|
| 232 |
+
# command object is created, and replaced with a true value when
|
| 233 |
+
# the command is successfully run. Thus it's probably best to use
|
| 234 |
+
# '.get()' rather than a straight lookup.
|
| 235 |
+
self.have_run = {}
|
| 236 |
+
|
| 237 |
+
# Now we'll use the attrs dictionary (ultimately, keyword args from
|
| 238 |
+
# the setup script) to possibly override any or all of these
|
| 239 |
+
# distribution options.
|
| 240 |
+
|
| 241 |
+
if attrs:
|
| 242 |
+
# Pull out the set of command options and work on them
|
| 243 |
+
# specifically. Note that this order guarantees that aliased
|
| 244 |
+
# command options will override any supplied redundantly
|
| 245 |
+
# through the general options dictionary.
|
| 246 |
+
options = attrs.get('options')
|
| 247 |
+
if options is not None:
|
| 248 |
+
del attrs['options']
|
| 249 |
+
for (command, cmd_options) in options.items():
|
| 250 |
+
opt_dict = self.get_option_dict(command)
|
| 251 |
+
for (opt, val) in cmd_options.items():
|
| 252 |
+
opt_dict[opt] = ("setup script", val)
|
| 253 |
+
|
| 254 |
+
if 'licence' in attrs:
|
| 255 |
+
attrs['license'] = attrs['licence']
|
| 256 |
+
del attrs['licence']
|
| 257 |
+
msg = "'licence' distribution option is deprecated; use 'license'"
|
| 258 |
+
if warnings is not None:
|
| 259 |
+
warnings.warn(msg)
|
| 260 |
+
else:
|
| 261 |
+
sys.stderr.write(msg + "\n")
|
| 262 |
+
|
| 263 |
+
# Now work on the rest of the attributes. Any attribute that's
|
| 264 |
+
# not already defined is invalid!
|
| 265 |
+
for (key, val) in attrs.items():
|
| 266 |
+
if hasattr(self.metadata, "set_" + key):
|
| 267 |
+
getattr(self.metadata, "set_" + key)(val)
|
| 268 |
+
elif hasattr(self.metadata, key):
|
| 269 |
+
setattr(self.metadata, key, val)
|
| 270 |
+
elif hasattr(self, key):
|
| 271 |
+
setattr(self, key, val)
|
| 272 |
+
else:
|
| 273 |
+
msg = "Unknown distribution option: %s" % repr(key)
|
| 274 |
+
warnings.warn(msg)
|
| 275 |
+
|
| 276 |
+
# no-user-cfg is handled before other command line args
|
| 277 |
+
# because other args override the config files, and this
|
| 278 |
+
# one is needed before we can load the config files.
|
| 279 |
+
# If attrs['script_args'] wasn't passed, assume false.
|
| 280 |
+
#
|
| 281 |
+
# This also make sure we just look at the global options
|
| 282 |
+
self.want_user_cfg = True
|
| 283 |
+
|
| 284 |
+
if self.script_args is not None:
|
| 285 |
+
for arg in self.script_args:
|
| 286 |
+
if not arg.startswith('-'):
|
| 287 |
+
break
|
| 288 |
+
if arg == '--no-user-cfg':
|
| 289 |
+
self.want_user_cfg = False
|
| 290 |
+
break
|
| 291 |
+
|
| 292 |
+
self.finalize_options()
|
| 293 |
+
|
| 294 |
+
def get_option_dict(self, command):
|
| 295 |
+
"""Get the option dictionary for a given command. If that
|
| 296 |
+
command's option dictionary hasn't been created yet, then create it
|
| 297 |
+
and return the new dictionary; otherwise, return the existing
|
| 298 |
+
option dictionary.
|
| 299 |
+
"""
|
| 300 |
+
dict = self.command_options.get(command)
|
| 301 |
+
if dict is None:
|
| 302 |
+
dict = self.command_options[command] = {}
|
| 303 |
+
return dict
|
| 304 |
+
|
| 305 |
+
def dump_option_dicts(self, header=None, commands=None, indent=""):
|
| 306 |
+
from pprint import pformat
|
| 307 |
+
|
| 308 |
+
if commands is None: # dump all command option dicts
|
| 309 |
+
commands = sorted(self.command_options.keys())
|
| 310 |
+
|
| 311 |
+
if header is not None:
|
| 312 |
+
self.announce(indent + header)
|
| 313 |
+
indent = indent + " "
|
| 314 |
+
|
| 315 |
+
if not commands:
|
| 316 |
+
self.announce(indent + "no commands known yet")
|
| 317 |
+
return
|
| 318 |
+
|
| 319 |
+
for cmd_name in commands:
|
| 320 |
+
opt_dict = self.command_options.get(cmd_name)
|
| 321 |
+
if opt_dict is None:
|
| 322 |
+
self.announce(indent +
|
| 323 |
+
"no option dict for '%s' command" % cmd_name)
|
| 324 |
+
else:
|
| 325 |
+
self.announce(indent +
|
| 326 |
+
"option dict for '%s' command:" % cmd_name)
|
| 327 |
+
out = pformat(opt_dict)
|
| 328 |
+
for line in out.split('\n'):
|
| 329 |
+
self.announce(indent + " " + line)
|
| 330 |
+
|
| 331 |
+
# -- Config file finding/parsing methods ---------------------------
|
| 332 |
+
|
| 333 |
+
def find_config_files(self):
|
| 334 |
+
"""Find as many configuration files as should be processed for this
|
| 335 |
+
platform, and return a list of filenames in the order in which they
|
| 336 |
+
should be parsed. The filenames returned are guaranteed to exist
|
| 337 |
+
(modulo nasty race conditions).
|
| 338 |
+
|
| 339 |
+
There are three possible config files: distutils.cfg in the
|
| 340 |
+
Distutils installation directory (ie. where the top-level
|
| 341 |
+
Distutils __inst__.py file lives), a file in the user's home
|
| 342 |
+
directory named .pydistutils.cfg on Unix and pydistutils.cfg
|
| 343 |
+
on Windows/Mac; and setup.cfg in the current directory.
|
| 344 |
+
|
| 345 |
+
The file in the user's home directory can be disabled with the
|
| 346 |
+
--no-user-cfg option.
|
| 347 |
+
"""
|
| 348 |
+
files = []
|
| 349 |
+
check_environ()
|
| 350 |
+
|
| 351 |
+
# Where to look for the system-wide Distutils config file
|
| 352 |
+
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
|
| 353 |
+
|
| 354 |
+
# Look for the system config file
|
| 355 |
+
sys_file = os.path.join(sys_dir, "distutils.cfg")
|
| 356 |
+
if os.path.isfile(sys_file):
|
| 357 |
+
files.append(sys_file)
|
| 358 |
+
|
| 359 |
+
# What to call the per-user config file
|
| 360 |
+
if os.name == 'posix':
|
| 361 |
+
user_filename = ".pydistutils.cfg"
|
| 362 |
+
else:
|
| 363 |
+
user_filename = "pydistutils.cfg"
|
| 364 |
+
|
| 365 |
+
# And look for the user config file
|
| 366 |
+
if self.want_user_cfg:
|
| 367 |
+
user_file = os.path.join(os.path.expanduser('~'), user_filename)
|
| 368 |
+
if os.path.isfile(user_file):
|
| 369 |
+
files.append(user_file)
|
| 370 |
+
|
| 371 |
+
# All platforms support local setup.cfg
|
| 372 |
+
local_file = "setup.cfg"
|
| 373 |
+
if os.path.isfile(local_file):
|
| 374 |
+
files.append(local_file)
|
| 375 |
+
|
| 376 |
+
if DEBUG:
|
| 377 |
+
self.announce("using config files: %s" % ', '.join(files))
|
| 378 |
+
|
| 379 |
+
return files
|
| 380 |
+
|
| 381 |
+
def parse_config_files(self, filenames=None):
|
| 382 |
+
from configparser import ConfigParser
|
| 383 |
+
|
| 384 |
+
# Ignore install directory options if we have a venv
|
| 385 |
+
if sys.prefix != sys.base_prefix:
|
| 386 |
+
ignore_options = [
|
| 387 |
+
'install-base', 'install-platbase', 'install-lib',
|
| 388 |
+
'install-platlib', 'install-purelib', 'install-headers',
|
| 389 |
+
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
|
| 390 |
+
'home', 'user', 'root']
|
| 391 |
+
else:
|
| 392 |
+
ignore_options = []
|
| 393 |
+
|
| 394 |
+
ignore_options = frozenset(ignore_options)
|
| 395 |
+
|
| 396 |
+
if filenames is None:
|
| 397 |
+
filenames = self.find_config_files()
|
| 398 |
+
|
| 399 |
+
if DEBUG:
|
| 400 |
+
self.announce("Distribution.parse_config_files():")
|
| 401 |
+
|
| 402 |
+
parser = ConfigParser()
|
| 403 |
+
for filename in filenames:
|
| 404 |
+
if DEBUG:
|
| 405 |
+
self.announce(" reading %s" % filename)
|
| 406 |
+
parser.read(filename)
|
| 407 |
+
for section in parser.sections():
|
| 408 |
+
options = parser.options(section)
|
| 409 |
+
opt_dict = self.get_option_dict(section)
|
| 410 |
+
|
| 411 |
+
for opt in options:
|
| 412 |
+
if opt != '__name__' and opt not in ignore_options:
|
| 413 |
+
val = parser.get(section,opt)
|
| 414 |
+
opt = opt.replace('-', '_')
|
| 415 |
+
opt_dict[opt] = (filename, val)
|
| 416 |
+
|
| 417 |
+
# Make the ConfigParser forget everything (so we retain
|
| 418 |
+
# the original filenames that options come from)
|
| 419 |
+
parser.__init__()
|
| 420 |
+
|
| 421 |
+
# If there was a "global" section in the config file, use it
|
| 422 |
+
# to set Distribution options.
|
| 423 |
+
|
| 424 |
+
if 'global' in self.command_options:
|
| 425 |
+
for (opt, (src, val)) in self.command_options['global'].items():
|
| 426 |
+
alias = self.negative_opt.get(opt)
|
| 427 |
+
try:
|
| 428 |
+
if alias:
|
| 429 |
+
setattr(self, alias, not strtobool(val))
|
| 430 |
+
elif opt in ('verbose', 'dry_run'): # ugh!
|
| 431 |
+
setattr(self, opt, strtobool(val))
|
| 432 |
+
else:
|
| 433 |
+
setattr(self, opt, val)
|
| 434 |
+
except ValueError as msg:
|
| 435 |
+
raise DistutilsOptionError(msg)
|
| 436 |
+
|
| 437 |
+
# -- Command-line parsing methods ----------------------------------
|
| 438 |
+
|
| 439 |
+
def parse_command_line(self):
|
| 440 |
+
"""Parse the setup script's command line, taken from the
|
| 441 |
+
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
|
| 442 |
+
-- see 'setup()' in core.py). This list is first processed for
|
| 443 |
+
"global options" -- options that set attributes of the Distribution
|
| 444 |
+
instance. Then, it is alternately scanned for Distutils commands
|
| 445 |
+
and options for that command. Each new command terminates the
|
| 446 |
+
options for the previous command. The allowed options for a
|
| 447 |
+
command are determined by the 'user_options' attribute of the
|
| 448 |
+
command class -- thus, we have to be able to load command classes
|
| 449 |
+
in order to parse the command line. Any error in that 'options'
|
| 450 |
+
attribute raises DistutilsGetoptError; any error on the
|
| 451 |
+
command-line raises DistutilsArgError. If no Distutils commands
|
| 452 |
+
were found on the command line, raises DistutilsArgError. Return
|
| 453 |
+
true if command-line was successfully parsed and we should carry
|
| 454 |
+
on with executing commands; false if no errors but we shouldn't
|
| 455 |
+
execute commands (currently, this only happens if user asks for
|
| 456 |
+
help).
|
| 457 |
+
"""
|
| 458 |
+
#
|
| 459 |
+
# We now have enough information to show the Macintosh dialog
|
| 460 |
+
# that allows the user to interactively specify the "command line".
|
| 461 |
+
#
|
| 462 |
+
toplevel_options = self._get_toplevel_options()
|
| 463 |
+
|
| 464 |
+
# We have to parse the command line a bit at a time -- global
|
| 465 |
+
# options, then the first command, then its options, and so on --
|
| 466 |
+
# because each command will be handled by a different class, and
|
| 467 |
+
# the options that are valid for a particular class aren't known
|
| 468 |
+
# until we have loaded the command class, which doesn't happen
|
| 469 |
+
# until we know what the command is.
|
| 470 |
+
|
| 471 |
+
self.commands = []
|
| 472 |
+
parser = FancyGetopt(toplevel_options + self.display_options)
|
| 473 |
+
parser.set_negative_aliases(self.negative_opt)
|
| 474 |
+
parser.set_aliases({'licence': 'license'})
|
| 475 |
+
args = parser.getopt(args=self.script_args, object=self)
|
| 476 |
+
option_order = parser.get_option_order()
|
| 477 |
+
log.set_verbosity(self.verbose)
|
| 478 |
+
|
| 479 |
+
# for display options we return immediately
|
| 480 |
+
if self.handle_display_options(option_order):
|
| 481 |
+
return
|
| 482 |
+
while args:
|
| 483 |
+
args = self._parse_command_opts(parser, args)
|
| 484 |
+
if args is None: # user asked for help (and got it)
|
| 485 |
+
return
|
| 486 |
+
|
| 487 |
+
# Handle the cases of --help as a "global" option, ie.
|
| 488 |
+
# "setup.py --help" and "setup.py --help command ...". For the
|
| 489 |
+
# former, we show global options (--verbose, --dry-run, etc.)
|
| 490 |
+
# and display-only options (--name, --version, etc.); for the
|
| 491 |
+
# latter, we omit the display-only options and show help for
|
| 492 |
+
# each command listed on the command line.
|
| 493 |
+
if self.help:
|
| 494 |
+
self._show_help(parser,
|
| 495 |
+
display_options=len(self.commands) == 0,
|
| 496 |
+
commands=self.commands)
|
| 497 |
+
return
|
| 498 |
+
|
| 499 |
+
# Oops, no commands found -- an end-user error
|
| 500 |
+
if not self.commands:
|
| 501 |
+
raise DistutilsArgError("no commands supplied")
|
| 502 |
+
|
| 503 |
+
# All is well: return true
|
| 504 |
+
return True
|
| 505 |
+
|
| 506 |
+
def _get_toplevel_options(self):
|
| 507 |
+
"""Return the non-display options recognized at the top level.
|
| 508 |
+
|
| 509 |
+
This includes options that are recognized *only* at the top
|
| 510 |
+
level as well as options recognized for commands.
|
| 511 |
+
"""
|
| 512 |
+
return self.global_options + [
|
| 513 |
+
("command-packages=", None,
|
| 514 |
+
"list of packages that provide distutils commands"),
|
| 515 |
+
]
|
| 516 |
+
|
| 517 |
+
def _parse_command_opts(self, parser, args):
|
| 518 |
+
"""Parse the command-line options for a single command.
|
| 519 |
+
'parser' must be a FancyGetopt instance; 'args' must be the list
|
| 520 |
+
of arguments, starting with the current command (whose options
|
| 521 |
+
we are about to parse). Returns a new version of 'args' with
|
| 522 |
+
the next command at the front of the list; will be the empty
|
| 523 |
+
list if there are no more commands on the command line. Returns
|
| 524 |
+
None if the user asked for help on this command.
|
| 525 |
+
"""
|
| 526 |
+
# late import because of mutual dependence between these modules
|
| 527 |
+
from distutils.cmd import Command
|
| 528 |
+
|
| 529 |
+
# Pull the current command from the head of the command line
|
| 530 |
+
command = args[0]
|
| 531 |
+
if not command_re.match(command):
|
| 532 |
+
raise SystemExit("invalid command name '%s'" % command)
|
| 533 |
+
self.commands.append(command)
|
| 534 |
+
|
| 535 |
+
# Dig up the command class that implements this command, so we
|
| 536 |
+
# 1) know that it's a valid command, and 2) know which options
|
| 537 |
+
# it takes.
|
| 538 |
+
try:
|
| 539 |
+
cmd_class = self.get_command_class(command)
|
| 540 |
+
except DistutilsModuleError as msg:
|
| 541 |
+
raise DistutilsArgError(msg)
|
| 542 |
+
|
| 543 |
+
# Require that the command class be derived from Command -- want
|
| 544 |
+
# to be sure that the basic "command" interface is implemented.
|
| 545 |
+
if not issubclass(cmd_class, Command):
|
| 546 |
+
raise DistutilsClassError(
|
| 547 |
+
"command class %s must subclass Command" % cmd_class)
|
| 548 |
+
|
| 549 |
+
# Also make sure that the command object provides a list of its
|
| 550 |
+
# known options.
|
| 551 |
+
if not (hasattr(cmd_class, 'user_options') and
|
| 552 |
+
isinstance(cmd_class.user_options, list)):
|
| 553 |
+
msg = ("command class %s must provide "
|
| 554 |
+
"'user_options' attribute (a list of tuples)")
|
| 555 |
+
raise DistutilsClassError(msg % cmd_class)
|
| 556 |
+
|
| 557 |
+
# If the command class has a list of negative alias options,
|
| 558 |
+
# merge it in with the global negative aliases.
|
| 559 |
+
negative_opt = self.negative_opt
|
| 560 |
+
if hasattr(cmd_class, 'negative_opt'):
|
| 561 |
+
negative_opt = negative_opt.copy()
|
| 562 |
+
negative_opt.update(cmd_class.negative_opt)
|
| 563 |
+
|
| 564 |
+
# Check for help_options in command class. They have a different
|
| 565 |
+
# format (tuple of four) so we need to preprocess them here.
|
| 566 |
+
if (hasattr(cmd_class, 'help_options') and
|
| 567 |
+
isinstance(cmd_class.help_options, list)):
|
| 568 |
+
help_options = fix_help_options(cmd_class.help_options)
|
| 569 |
+
else:
|
| 570 |
+
help_options = []
|
| 571 |
+
|
| 572 |
+
# All commands support the global options too, just by adding
|
| 573 |
+
# in 'global_options'.
|
| 574 |
+
parser.set_option_table(self.global_options +
|
| 575 |
+
cmd_class.user_options +
|
| 576 |
+
help_options)
|
| 577 |
+
parser.set_negative_aliases(negative_opt)
|
| 578 |
+
(args, opts) = parser.getopt(args[1:])
|
| 579 |
+
if hasattr(opts, 'help') and opts.help:
|
| 580 |
+
self._show_help(parser, display_options=0, commands=[cmd_class])
|
| 581 |
+
return
|
| 582 |
+
|
| 583 |
+
if (hasattr(cmd_class, 'help_options') and
|
| 584 |
+
isinstance(cmd_class.help_options, list)):
|
| 585 |
+
help_option_found=0
|
| 586 |
+
for (help_option, short, desc, func) in cmd_class.help_options:
|
| 587 |
+
if hasattr(opts, parser.get_attr_name(help_option)):
|
| 588 |
+
help_option_found=1
|
| 589 |
+
if callable(func):
|
| 590 |
+
func()
|
| 591 |
+
else:
|
| 592 |
+
raise DistutilsClassError(
|
| 593 |
+
"invalid help function %r for help option '%s': "
|
| 594 |
+
"must be a callable object (function, etc.)"
|
| 595 |
+
% (func, help_option))
|
| 596 |
+
|
| 597 |
+
if help_option_found:
|
| 598 |
+
return
|
| 599 |
+
|
| 600 |
+
# Put the options from the command-line into their official
|
| 601 |
+
# holding pen, the 'command_options' dictionary.
|
| 602 |
+
opt_dict = self.get_option_dict(command)
|
| 603 |
+
for (name, value) in vars(opts).items():
|
| 604 |
+
opt_dict[name] = ("command line", value)
|
| 605 |
+
|
| 606 |
+
return args
|
| 607 |
+
|
| 608 |
+
def finalize_options(self):
|
| 609 |
+
"""Set final values for all the options on the Distribution
|
| 610 |
+
instance, analogous to the .finalize_options() method of Command
|
| 611 |
+
objects.
|
| 612 |
+
"""
|
| 613 |
+
for attr in ('keywords', 'platforms'):
|
| 614 |
+
value = getattr(self.metadata, attr)
|
| 615 |
+
if value is None:
|
| 616 |
+
continue
|
| 617 |
+
if isinstance(value, str):
|
| 618 |
+
value = [elm.strip() for elm in value.split(',')]
|
| 619 |
+
setattr(self.metadata, attr, value)
|
| 620 |
+
|
| 621 |
+
def _show_help(self, parser, global_options=1, display_options=1,
|
| 622 |
+
commands=[]):
|
| 623 |
+
"""Show help for the setup script command-line in the form of
|
| 624 |
+
several lists of command-line options. 'parser' should be a
|
| 625 |
+
FancyGetopt instance; do not expect it to be returned in the
|
| 626 |
+
same state, as its option table will be reset to make it
|
| 627 |
+
generate the correct help text.
|
| 628 |
+
|
| 629 |
+
If 'global_options' is true, lists the global options:
|
| 630 |
+
--verbose, --dry-run, etc. If 'display_options' is true, lists
|
| 631 |
+
the "display-only" options: --name, --version, etc. Finally,
|
| 632 |
+
lists per-command help for every command name or command class
|
| 633 |
+
in 'commands'.
|
| 634 |
+
"""
|
| 635 |
+
# late import because of mutual dependence between these modules
|
| 636 |
+
from distutils.core import gen_usage
|
| 637 |
+
from distutils.cmd import Command
|
| 638 |
+
|
| 639 |
+
if global_options:
|
| 640 |
+
if display_options:
|
| 641 |
+
options = self._get_toplevel_options()
|
| 642 |
+
else:
|
| 643 |
+
options = self.global_options
|
| 644 |
+
parser.set_option_table(options)
|
| 645 |
+
parser.print_help(self.common_usage + "\nGlobal options:")
|
| 646 |
+
print('')
|
| 647 |
+
|
| 648 |
+
if display_options:
|
| 649 |
+
parser.set_option_table(self.display_options)
|
| 650 |
+
parser.print_help(
|
| 651 |
+
"Information display options (just display " +
|
| 652 |
+
"information, ignore any commands)")
|
| 653 |
+
print('')
|
| 654 |
+
|
| 655 |
+
for command in self.commands:
|
| 656 |
+
if isinstance(command, type) and issubclass(command, Command):
|
| 657 |
+
klass = command
|
| 658 |
+
else:
|
| 659 |
+
klass = self.get_command_class(command)
|
| 660 |
+
if (hasattr(klass, 'help_options') and
|
| 661 |
+
isinstance(klass.help_options, list)):
|
| 662 |
+
parser.set_option_table(klass.user_options +
|
| 663 |
+
fix_help_options(klass.help_options))
|
| 664 |
+
else:
|
| 665 |
+
parser.set_option_table(klass.user_options)
|
| 666 |
+
parser.print_help("Options for '%s' command:" % klass.__name__)
|
| 667 |
+
print('')
|
| 668 |
+
|
| 669 |
+
print(gen_usage(self.script_name))
|
| 670 |
+
|
| 671 |
+
def handle_display_options(self, option_order):
|
| 672 |
+
"""If there were any non-global "display-only" options
|
| 673 |
+
(--help-commands or the metadata display options) on the command
|
| 674 |
+
line, display the requested info and return true; else return
|
| 675 |
+
false.
|
| 676 |
+
"""
|
| 677 |
+
from distutils.core import gen_usage
|
| 678 |
+
|
| 679 |
+
# User just wants a list of commands -- we'll print it out and stop
|
| 680 |
+
# processing now (ie. if they ran "setup --help-commands foo bar",
|
| 681 |
+
# we ignore "foo bar").
|
| 682 |
+
if self.help_commands:
|
| 683 |
+
self.print_commands()
|
| 684 |
+
print('')
|
| 685 |
+
print(gen_usage(self.script_name))
|
| 686 |
+
return 1
|
| 687 |
+
|
| 688 |
+
# If user supplied any of the "display metadata" options, then
|
| 689 |
+
# display that metadata in the order in which the user supplied the
|
| 690 |
+
# metadata options.
|
| 691 |
+
any_display_options = 0
|
| 692 |
+
is_display_option = {}
|
| 693 |
+
for option in self.display_options:
|
| 694 |
+
is_display_option[option[0]] = 1
|
| 695 |
+
|
| 696 |
+
for (opt, val) in option_order:
|
| 697 |
+
if val and is_display_option.get(opt):
|
| 698 |
+
opt = translate_longopt(opt)
|
| 699 |
+
value = getattr(self.metadata, "get_"+opt)()
|
| 700 |
+
if opt in ['keywords', 'platforms']:
|
| 701 |
+
print(','.join(value))
|
| 702 |
+
elif opt in ('classifiers', 'provides', 'requires',
|
| 703 |
+
'obsoletes'):
|
| 704 |
+
print('\n'.join(value))
|
| 705 |
+
else:
|
| 706 |
+
print(value)
|
| 707 |
+
any_display_options = 1
|
| 708 |
+
|
| 709 |
+
return any_display_options
|
| 710 |
+
|
| 711 |
+
def print_command_list(self, commands, header, max_length):
|
| 712 |
+
"""Print a subset of the list of all commands -- used by
|
| 713 |
+
'print_commands()'.
|
| 714 |
+
"""
|
| 715 |
+
print(header + ":")
|
| 716 |
+
|
| 717 |
+
for cmd in commands:
|
| 718 |
+
klass = self.cmdclass.get(cmd)
|
| 719 |
+
if not klass:
|
| 720 |
+
klass = self.get_command_class(cmd)
|
| 721 |
+
try:
|
| 722 |
+
description = klass.description
|
| 723 |
+
except AttributeError:
|
| 724 |
+
description = "(no description available)"
|
| 725 |
+
|
| 726 |
+
print(" %-*s %s" % (max_length, cmd, description))
|
| 727 |
+
|
| 728 |
+
def print_commands(self):
|
| 729 |
+
"""Print out a help message listing all available commands with a
|
| 730 |
+
description of each. The list is divided into "standard commands"
|
| 731 |
+
(listed in distutils.command.__all__) and "extra commands"
|
| 732 |
+
(mentioned in self.cmdclass, but not a standard command). The
|
| 733 |
+
descriptions come from the command class attribute
|
| 734 |
+
'description'.
|
| 735 |
+
"""
|
| 736 |
+
import distutils.command
|
| 737 |
+
std_commands = distutils.command.__all__
|
| 738 |
+
is_std = {}
|
| 739 |
+
for cmd in std_commands:
|
| 740 |
+
is_std[cmd] = 1
|
| 741 |
+
|
| 742 |
+
extra_commands = []
|
| 743 |
+
for cmd in self.cmdclass.keys():
|
| 744 |
+
if not is_std.get(cmd):
|
| 745 |
+
extra_commands.append(cmd)
|
| 746 |
+
|
| 747 |
+
max_length = 0
|
| 748 |
+
for cmd in (std_commands + extra_commands):
|
| 749 |
+
if len(cmd) > max_length:
|
| 750 |
+
max_length = len(cmd)
|
| 751 |
+
|
| 752 |
+
self.print_command_list(std_commands,
|
| 753 |
+
"Standard commands",
|
| 754 |
+
max_length)
|
| 755 |
+
if extra_commands:
|
| 756 |
+
print()
|
| 757 |
+
self.print_command_list(extra_commands,
|
| 758 |
+
"Extra commands",
|
| 759 |
+
max_length)
|
| 760 |
+
|
| 761 |
+
def get_command_list(self):
|
| 762 |
+
"""Get a list of (command, description) tuples.
|
| 763 |
+
The list is divided into "standard commands" (listed in
|
| 764 |
+
distutils.command.__all__) and "extra commands" (mentioned in
|
| 765 |
+
self.cmdclass, but not a standard command). The descriptions come
|
| 766 |
+
from the command class attribute 'description'.
|
| 767 |
+
"""
|
| 768 |
+
# Currently this is only used on Mac OS, for the Mac-only GUI
|
| 769 |
+
# Distutils interface (by Jack Jansen)
|
| 770 |
+
import distutils.command
|
| 771 |
+
std_commands = distutils.command.__all__
|
| 772 |
+
is_std = {}
|
| 773 |
+
for cmd in std_commands:
|
| 774 |
+
is_std[cmd] = 1
|
| 775 |
+
|
| 776 |
+
extra_commands = []
|
| 777 |
+
for cmd in self.cmdclass.keys():
|
| 778 |
+
if not is_std.get(cmd):
|
| 779 |
+
extra_commands.append(cmd)
|
| 780 |
+
|
| 781 |
+
rv = []
|
| 782 |
+
for cmd in (std_commands + extra_commands):
|
| 783 |
+
klass = self.cmdclass.get(cmd)
|
| 784 |
+
if not klass:
|
| 785 |
+
klass = self.get_command_class(cmd)
|
| 786 |
+
try:
|
| 787 |
+
description = klass.description
|
| 788 |
+
except AttributeError:
|
| 789 |
+
description = "(no description available)"
|
| 790 |
+
rv.append((cmd, description))
|
| 791 |
+
return rv
|
| 792 |
+
|
| 793 |
+
# -- Command class/object methods ----------------------------------
|
| 794 |
+
|
| 795 |
+
def get_command_packages(self):
|
| 796 |
+
"""Return a list of packages from which commands are loaded."""
|
| 797 |
+
pkgs = self.command_packages
|
| 798 |
+
if not isinstance(pkgs, list):
|
| 799 |
+
if pkgs is None:
|
| 800 |
+
pkgs = ''
|
| 801 |
+
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
|
| 802 |
+
if "distutils.command" not in pkgs:
|
| 803 |
+
pkgs.insert(0, "distutils.command")
|
| 804 |
+
self.command_packages = pkgs
|
| 805 |
+
return pkgs
|
| 806 |
+
|
| 807 |
+
def get_command_class(self, command):
|
| 808 |
+
"""Return the class that implements the Distutils command named by
|
| 809 |
+
'command'. First we check the 'cmdclass' dictionary; if the
|
| 810 |
+
command is mentioned there, we fetch the class object from the
|
| 811 |
+
dictionary and return it. Otherwise we load the command module
|
| 812 |
+
("distutils.command." + command) and fetch the command class from
|
| 813 |
+
the module. The loaded class is also stored in 'cmdclass'
|
| 814 |
+
to speed future calls to 'get_command_class()'.
|
| 815 |
+
|
| 816 |
+
Raises DistutilsModuleError if the expected module could not be
|
| 817 |
+
found, or if that module does not define the expected class.
|
| 818 |
+
"""
|
| 819 |
+
klass = self.cmdclass.get(command)
|
| 820 |
+
if klass:
|
| 821 |
+
return klass
|
| 822 |
+
|
| 823 |
+
for pkgname in self.get_command_packages():
|
| 824 |
+
module_name = "%s.%s" % (pkgname, command)
|
| 825 |
+
klass_name = command
|
| 826 |
+
|
| 827 |
+
try:
|
| 828 |
+
__import__(module_name)
|
| 829 |
+
module = sys.modules[module_name]
|
| 830 |
+
except ImportError:
|
| 831 |
+
continue
|
| 832 |
+
|
| 833 |
+
try:
|
| 834 |
+
klass = getattr(module, klass_name)
|
| 835 |
+
except AttributeError:
|
| 836 |
+
raise DistutilsModuleError(
|
| 837 |
+
"invalid command '%s' (no class '%s' in module '%s')"
|
| 838 |
+
% (command, klass_name, module_name))
|
| 839 |
+
|
| 840 |
+
self.cmdclass[command] = klass
|
| 841 |
+
return klass
|
| 842 |
+
|
| 843 |
+
raise DistutilsModuleError("invalid command '%s'" % command)
|
| 844 |
+
|
| 845 |
+
def get_command_obj(self, command, create=1):
|
| 846 |
+
"""Return the command object for 'command'. Normally this object
|
| 847 |
+
is cached on a previous call to 'get_command_obj()'; if no command
|
| 848 |
+
object for 'command' is in the cache, then we either create and
|
| 849 |
+
return it (if 'create' is true) or return None.
|
| 850 |
+
"""
|
| 851 |
+
cmd_obj = self.command_obj.get(command)
|
| 852 |
+
if not cmd_obj and create:
|
| 853 |
+
if DEBUG:
|
| 854 |
+
self.announce("Distribution.get_command_obj(): "
|
| 855 |
+
"creating '%s' command object" % command)
|
| 856 |
+
|
| 857 |
+
klass = self.get_command_class(command)
|
| 858 |
+
cmd_obj = self.command_obj[command] = klass(self)
|
| 859 |
+
self.have_run[command] = 0
|
| 860 |
+
|
| 861 |
+
# Set any options that were supplied in config files
|
| 862 |
+
# or on the command line. (NB. support for error
|
| 863 |
+
# reporting is lame here: any errors aren't reported
|
| 864 |
+
# until 'finalize_options()' is called, which means
|
| 865 |
+
# we won't report the source of the error.)
|
| 866 |
+
options = self.command_options.get(command)
|
| 867 |
+
if options:
|
| 868 |
+
self._set_command_options(cmd_obj, options)
|
| 869 |
+
|
| 870 |
+
return cmd_obj
|
| 871 |
+
|
| 872 |
+
def _set_command_options(self, command_obj, option_dict=None):
|
| 873 |
+
"""Set the options for 'command_obj' from 'option_dict'. Basically
|
| 874 |
+
this means copying elements of a dictionary ('option_dict') to
|
| 875 |
+
attributes of an instance ('command').
|
| 876 |
+
|
| 877 |
+
'command_obj' must be a Command instance. If 'option_dict' is not
|
| 878 |
+
supplied, uses the standard option dictionary for this command
|
| 879 |
+
(from 'self.command_options').
|
| 880 |
+
"""
|
| 881 |
+
command_name = command_obj.get_command_name()
|
| 882 |
+
if option_dict is None:
|
| 883 |
+
option_dict = self.get_option_dict(command_name)
|
| 884 |
+
|
| 885 |
+
if DEBUG:
|
| 886 |
+
self.announce(" setting options for '%s' command:" % command_name)
|
| 887 |
+
for (option, (source, value)) in option_dict.items():
|
| 888 |
+
if DEBUG:
|
| 889 |
+
self.announce(" %s = %s (from %s)" % (option, value,
|
| 890 |
+
source))
|
| 891 |
+
try:
|
| 892 |
+
bool_opts = [translate_longopt(o)
|
| 893 |
+
for o in command_obj.boolean_options]
|
| 894 |
+
except AttributeError:
|
| 895 |
+
bool_opts = []
|
| 896 |
+
try:
|
| 897 |
+
neg_opt = command_obj.negative_opt
|
| 898 |
+
except AttributeError:
|
| 899 |
+
neg_opt = {}
|
| 900 |
+
|
| 901 |
+
try:
|
| 902 |
+
is_string = isinstance(value, str)
|
| 903 |
+
if option in neg_opt and is_string:
|
| 904 |
+
setattr(command_obj, neg_opt[option], not strtobool(value))
|
| 905 |
+
elif option in bool_opts and is_string:
|
| 906 |
+
setattr(command_obj, option, strtobool(value))
|
| 907 |
+
elif hasattr(command_obj, option):
|
| 908 |
+
setattr(command_obj, option, value)
|
| 909 |
+
else:
|
| 910 |
+
raise DistutilsOptionError(
|
| 911 |
+
"error in %s: command '%s' has no such option '%s'"
|
| 912 |
+
% (source, command_name, option))
|
| 913 |
+
except ValueError as msg:
|
| 914 |
+
raise DistutilsOptionError(msg)
|
| 915 |
+
|
| 916 |
+
def reinitialize_command(self, command, reinit_subcommands=0):
|
| 917 |
+
"""Reinitializes a command to the state it was in when first
|
| 918 |
+
returned by 'get_command_obj()': ie., initialized but not yet
|
| 919 |
+
finalized. This provides the opportunity to sneak option
|
| 920 |
+
values in programmatically, overriding or supplementing
|
| 921 |
+
user-supplied values from the config files and command line.
|
| 922 |
+
You'll have to re-finalize the command object (by calling
|
| 923 |
+
'finalize_options()' or 'ensure_finalized()') before using it for
|
| 924 |
+
real.
|
| 925 |
+
|
| 926 |
+
'command' should be a command name (string) or command object. If
|
| 927 |
+
'reinit_subcommands' is true, also reinitializes the command's
|
| 928 |
+
sub-commands, as declared by the 'sub_commands' class attribute (if
|
| 929 |
+
it has one). See the "install" command for an example. Only
|
| 930 |
+
reinitializes the sub-commands that actually matter, ie. those
|
| 931 |
+
whose test predicates return true.
|
| 932 |
+
|
| 933 |
+
Returns the reinitialized command object.
|
| 934 |
+
"""
|
| 935 |
+
from distutils.cmd import Command
|
| 936 |
+
if not isinstance(command, Command):
|
| 937 |
+
command_name = command
|
| 938 |
+
command = self.get_command_obj(command_name)
|
| 939 |
+
else:
|
| 940 |
+
command_name = command.get_command_name()
|
| 941 |
+
|
| 942 |
+
if not command.finalized:
|
| 943 |
+
return command
|
| 944 |
+
command.initialize_options()
|
| 945 |
+
command.finalized = 0
|
| 946 |
+
self.have_run[command_name] = 0
|
| 947 |
+
self._set_command_options(command)
|
| 948 |
+
|
| 949 |
+
if reinit_subcommands:
|
| 950 |
+
for sub in command.get_sub_commands():
|
| 951 |
+
self.reinitialize_command(sub, reinit_subcommands)
|
| 952 |
+
|
| 953 |
+
return command
|
| 954 |
+
|
| 955 |
+
# -- Methods that operate on the Distribution ----------------------
|
| 956 |
+
|
| 957 |
+
def announce(self, msg, level=log.INFO):
|
| 958 |
+
log.log(level, msg)
|
| 959 |
+
|
| 960 |
+
def run_commands(self):
|
| 961 |
+
"""Run each command that was seen on the setup script command line.
|
| 962 |
+
Uses the list of commands found and cache of command objects
|
| 963 |
+
created by 'get_command_obj()'.
|
| 964 |
+
"""
|
| 965 |
+
for cmd in self.commands:
|
| 966 |
+
self.run_command(cmd)
|
| 967 |
+
|
| 968 |
+
# -- Methods that operate on its Commands --------------------------
|
| 969 |
+
|
| 970 |
+
def run_command(self, command):
|
| 971 |
+
"""Do whatever it takes to run a command (including nothing at all,
|
| 972 |
+
if the command has already been run). Specifically: if we have
|
| 973 |
+
already created and run the command named by 'command', return
|
| 974 |
+
silently without doing anything. If the command named by 'command'
|
| 975 |
+
doesn't even have a command object yet, create one. Then invoke
|
| 976 |
+
'run()' on that command object (or an existing one).
|
| 977 |
+
"""
|
| 978 |
+
# Already been here, done that? then return silently.
|
| 979 |
+
if self.have_run.get(command):
|
| 980 |
+
return
|
| 981 |
+
|
| 982 |
+
log.info("running %s", command)
|
| 983 |
+
cmd_obj = self.get_command_obj(command)
|
| 984 |
+
cmd_obj.ensure_finalized()
|
| 985 |
+
cmd_obj.run()
|
| 986 |
+
self.have_run[command] = 1
|
| 987 |
+
|
| 988 |
+
# -- Distribution query methods ------------------------------------
|
| 989 |
+
|
| 990 |
+
def has_pure_modules(self):
|
| 991 |
+
return len(self.packages or self.py_modules or []) > 0
|
| 992 |
+
|
| 993 |
+
def has_ext_modules(self):
|
| 994 |
+
return self.ext_modules and len(self.ext_modules) > 0
|
| 995 |
+
|
| 996 |
+
def has_c_libraries(self):
|
| 997 |
+
return self.libraries and len(self.libraries) > 0
|
| 998 |
+
|
| 999 |
+
def has_modules(self):
|
| 1000 |
+
return self.has_pure_modules() or self.has_ext_modules()
|
| 1001 |
+
|
| 1002 |
+
def has_headers(self):
|
| 1003 |
+
return self.headers and len(self.headers) > 0
|
| 1004 |
+
|
| 1005 |
+
def has_scripts(self):
|
| 1006 |
+
return self.scripts and len(self.scripts) > 0
|
| 1007 |
+
|
| 1008 |
+
def has_data_files(self):
|
| 1009 |
+
return self.data_files and len(self.data_files) > 0
|
| 1010 |
+
|
| 1011 |
+
def is_pure(self):
|
| 1012 |
+
return (self.has_pure_modules() and
|
| 1013 |
+
not self.has_ext_modules() and
|
| 1014 |
+
not self.has_c_libraries())
|
| 1015 |
+
|
| 1016 |
+
# -- Metadata query methods ----------------------------------------
|
| 1017 |
+
|
| 1018 |
+
# If you're looking for 'get_name()', 'get_version()', and so forth,
|
| 1019 |
+
# they are defined in a sneaky way: the constructor binds self.get_XXX
|
| 1020 |
+
# to self.metadata.get_XXX. The actual code is in the
|
| 1021 |
+
# DistributionMetadata class, below.
|
| 1022 |
+
|
| 1023 |
+
class DistributionMetadata:
|
| 1024 |
+
"""Dummy class to hold the distribution meta-data: name, version,
|
| 1025 |
+
author, and so forth.
|
| 1026 |
+
"""
|
| 1027 |
+
|
| 1028 |
+
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
|
| 1029 |
+
"maintainer", "maintainer_email", "url",
|
| 1030 |
+
"license", "description", "long_description",
|
| 1031 |
+
"keywords", "platforms", "fullname", "contact",
|
| 1032 |
+
"contact_email", "classifiers", "download_url",
|
| 1033 |
+
# PEP 314
|
| 1034 |
+
"provides", "requires", "obsoletes",
|
| 1035 |
+
)
|
| 1036 |
+
|
| 1037 |
+
def __init__(self, path=None):
|
| 1038 |
+
if path is not None:
|
| 1039 |
+
self.read_pkg_file(open(path))
|
| 1040 |
+
else:
|
| 1041 |
+
self.name = None
|
| 1042 |
+
self.version = None
|
| 1043 |
+
self.author = None
|
| 1044 |
+
self.author_email = None
|
| 1045 |
+
self.maintainer = None
|
| 1046 |
+
self.maintainer_email = None
|
| 1047 |
+
self.url = None
|
| 1048 |
+
self.license = None
|
| 1049 |
+
self.description = None
|
| 1050 |
+
self.long_description = None
|
| 1051 |
+
self.keywords = None
|
| 1052 |
+
self.platforms = None
|
| 1053 |
+
self.classifiers = None
|
| 1054 |
+
self.download_url = None
|
| 1055 |
+
# PEP 314
|
| 1056 |
+
self.provides = None
|
| 1057 |
+
self.requires = None
|
| 1058 |
+
self.obsoletes = None
|
| 1059 |
+
|
| 1060 |
+
def read_pkg_file(self, file):
|
| 1061 |
+
"""Reads the metadata values from a file object."""
|
| 1062 |
+
msg = message_from_file(file)
|
| 1063 |
+
|
| 1064 |
+
def _read_field(name):
|
| 1065 |
+
value = msg[name]
|
| 1066 |
+
if value == 'UNKNOWN':
|
| 1067 |
+
return None
|
| 1068 |
+
return value
|
| 1069 |
+
|
| 1070 |
+
def _read_list(name):
|
| 1071 |
+
values = msg.get_all(name, None)
|
| 1072 |
+
if values == []:
|
| 1073 |
+
return None
|
| 1074 |
+
return values
|
| 1075 |
+
|
| 1076 |
+
metadata_version = msg['metadata-version']
|
| 1077 |
+
self.name = _read_field('name')
|
| 1078 |
+
self.version = _read_field('version')
|
| 1079 |
+
self.description = _read_field('summary')
|
| 1080 |
+
# we are filling author only.
|
| 1081 |
+
self.author = _read_field('author')
|
| 1082 |
+
self.maintainer = None
|
| 1083 |
+
self.author_email = _read_field('author-email')
|
| 1084 |
+
self.maintainer_email = None
|
| 1085 |
+
self.url = _read_field('home-page')
|
| 1086 |
+
self.license = _read_field('license')
|
| 1087 |
+
|
| 1088 |
+
if 'download-url' in msg:
|
| 1089 |
+
self.download_url = _read_field('download-url')
|
| 1090 |
+
else:
|
| 1091 |
+
self.download_url = None
|
| 1092 |
+
|
| 1093 |
+
self.long_description = _read_field('description')
|
| 1094 |
+
self.description = _read_field('summary')
|
| 1095 |
+
|
| 1096 |
+
if 'keywords' in msg:
|
| 1097 |
+
self.keywords = _read_field('keywords').split(',')
|
| 1098 |
+
|
| 1099 |
+
self.platforms = _read_list('platform')
|
| 1100 |
+
self.classifiers = _read_list('classifier')
|
| 1101 |
+
|
| 1102 |
+
# PEP 314 - these fields only exist in 1.1
|
| 1103 |
+
if metadata_version == '1.1':
|
| 1104 |
+
self.requires = _read_list('requires')
|
| 1105 |
+
self.provides = _read_list('provides')
|
| 1106 |
+
self.obsoletes = _read_list('obsoletes')
|
| 1107 |
+
else:
|
| 1108 |
+
self.requires = None
|
| 1109 |
+
self.provides = None
|
| 1110 |
+
self.obsoletes = None
|
| 1111 |
+
|
| 1112 |
+
def write_pkg_info(self, base_dir):
|
| 1113 |
+
"""Write the PKG-INFO file into the release tree.
|
| 1114 |
+
"""
|
| 1115 |
+
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
|
| 1116 |
+
encoding='UTF-8') as pkg_info:
|
| 1117 |
+
self.write_pkg_file(pkg_info)
|
| 1118 |
+
|
| 1119 |
+
def write_pkg_file(self, file):
|
| 1120 |
+
"""Write the PKG-INFO format data to a file object.
|
| 1121 |
+
"""
|
| 1122 |
+
version = '1.0'
|
| 1123 |
+
if (self.provides or self.requires or self.obsoletes or
|
| 1124 |
+
self.classifiers or self.download_url):
|
| 1125 |
+
version = '1.1'
|
| 1126 |
+
|
| 1127 |
+
file.write('Metadata-Version: %s\n' % version)
|
| 1128 |
+
file.write('Name: %s\n' % self.get_name())
|
| 1129 |
+
file.write('Version: %s\n' % self.get_version())
|
| 1130 |
+
file.write('Summary: %s\n' % self.get_description())
|
| 1131 |
+
file.write('Home-page: %s\n' % self.get_url())
|
| 1132 |
+
file.write('Author: %s\n' % self.get_contact())
|
| 1133 |
+
file.write('Author-email: %s\n' % self.get_contact_email())
|
| 1134 |
+
file.write('License: %s\n' % self.get_license())
|
| 1135 |
+
if self.download_url:
|
| 1136 |
+
file.write('Download-URL: %s\n' % self.download_url)
|
| 1137 |
+
|
| 1138 |
+
long_desc = rfc822_escape(self.get_long_description())
|
| 1139 |
+
file.write('Description: %s\n' % long_desc)
|
| 1140 |
+
|
| 1141 |
+
keywords = ','.join(self.get_keywords())
|
| 1142 |
+
if keywords:
|
| 1143 |
+
file.write('Keywords: %s\n' % keywords)
|
| 1144 |
+
|
| 1145 |
+
self._write_list(file, 'Platform', self.get_platforms())
|
| 1146 |
+
self._write_list(file, 'Classifier', self.get_classifiers())
|
| 1147 |
+
|
| 1148 |
+
# PEP 314
|
| 1149 |
+
self._write_list(file, 'Requires', self.get_requires())
|
| 1150 |
+
self._write_list(file, 'Provides', self.get_provides())
|
| 1151 |
+
self._write_list(file, 'Obsoletes', self.get_obsoletes())
|
| 1152 |
+
|
| 1153 |
+
def _write_list(self, file, name, values):
|
| 1154 |
+
for value in values:
|
| 1155 |
+
file.write('%s: %s\n' % (name, value))
|
| 1156 |
+
|
| 1157 |
+
# -- Metadata query methods ----------------------------------------
|
| 1158 |
+
|
| 1159 |
+
def get_name(self):
|
| 1160 |
+
return self.name or "UNKNOWN"
|
| 1161 |
+
|
| 1162 |
+
def get_version(self):
|
| 1163 |
+
return self.version or "0.0.0"
|
| 1164 |
+
|
| 1165 |
+
def get_fullname(self):
|
| 1166 |
+
return "%s-%s" % (self.get_name(), self.get_version())
|
| 1167 |
+
|
| 1168 |
+
def get_author(self):
|
| 1169 |
+
return self.author or "UNKNOWN"
|
| 1170 |
+
|
| 1171 |
+
def get_author_email(self):
|
| 1172 |
+
return self.author_email or "UNKNOWN"
|
| 1173 |
+
|
| 1174 |
+
def get_maintainer(self):
|
| 1175 |
+
return self.maintainer or "UNKNOWN"
|
| 1176 |
+
|
| 1177 |
+
def get_maintainer_email(self):
|
| 1178 |
+
return self.maintainer_email or "UNKNOWN"
|
| 1179 |
+
|
| 1180 |
+
def get_contact(self):
|
| 1181 |
+
return self.maintainer or self.author or "UNKNOWN"
|
| 1182 |
+
|
| 1183 |
+
def get_contact_email(self):
|
| 1184 |
+
return self.maintainer_email or self.author_email or "UNKNOWN"
|
| 1185 |
+
|
| 1186 |
+
def get_url(self):
|
| 1187 |
+
return self.url or "UNKNOWN"
|
| 1188 |
+
|
| 1189 |
+
def get_license(self):
|
| 1190 |
+
return self.license or "UNKNOWN"
|
| 1191 |
+
get_licence = get_license
|
| 1192 |
+
|
| 1193 |
+
def get_description(self):
|
| 1194 |
+
return self.description or "UNKNOWN"
|
| 1195 |
+
|
| 1196 |
+
def get_long_description(self):
|
| 1197 |
+
return self.long_description or "UNKNOWN"
|
| 1198 |
+
|
| 1199 |
+
def get_keywords(self):
|
| 1200 |
+
return self.keywords or []
|
| 1201 |
+
|
| 1202 |
+
def set_keywords(self, value):
|
| 1203 |
+
self.keywords = _ensure_list(value, 'keywords')
|
| 1204 |
+
|
| 1205 |
+
def get_platforms(self):
|
| 1206 |
+
return self.platforms or ["UNKNOWN"]
|
| 1207 |
+
|
| 1208 |
+
def set_platforms(self, value):
|
| 1209 |
+
self.platforms = _ensure_list(value, 'platforms')
|
| 1210 |
+
|
| 1211 |
+
def get_classifiers(self):
|
| 1212 |
+
return self.classifiers or []
|
| 1213 |
+
|
| 1214 |
+
def set_classifiers(self, value):
|
| 1215 |
+
self.classifiers = _ensure_list(value, 'classifiers')
|
| 1216 |
+
|
| 1217 |
+
def get_download_url(self):
|
| 1218 |
+
return self.download_url or "UNKNOWN"
|
| 1219 |
+
|
| 1220 |
+
# PEP 314
|
| 1221 |
+
def get_requires(self):
|
| 1222 |
+
return self.requires or []
|
| 1223 |
+
|
| 1224 |
+
def set_requires(self, value):
|
| 1225 |
+
import distutils.versionpredicate
|
| 1226 |
+
for v in value:
|
| 1227 |
+
distutils.versionpredicate.VersionPredicate(v)
|
| 1228 |
+
self.requires = list(value)
|
| 1229 |
+
|
| 1230 |
+
def get_provides(self):
|
| 1231 |
+
return self.provides or []
|
| 1232 |
+
|
| 1233 |
+
def set_provides(self, value):
|
| 1234 |
+
value = [v.strip() for v in value]
|
| 1235 |
+
for v in value:
|
| 1236 |
+
import distutils.versionpredicate
|
| 1237 |
+
distutils.versionpredicate.split_provision(v)
|
| 1238 |
+
self.provides = value
|
| 1239 |
+
|
| 1240 |
+
def get_obsoletes(self):
|
| 1241 |
+
return self.obsoletes or []
|
| 1242 |
+
|
| 1243 |
+
def set_obsoletes(self, value):
|
| 1244 |
+
import distutils.versionpredicate
|
| 1245 |
+
for v in value:
|
| 1246 |
+
distutils.versionpredicate.VersionPredicate(v)
|
| 1247 |
+
self.obsoletes = list(value)
|
| 1248 |
+
|
| 1249 |
+
def fix_help_options(options):
|
| 1250 |
+
"""Convert a 4-tuple 'help_options' list as found in various command
|
| 1251 |
+
classes to the 3-tuple form required by FancyGetopt.
|
| 1252 |
+
"""
|
| 1253 |
+
new_options = []
|
| 1254 |
+
for help_tuple in options:
|
| 1255 |
+
new_options.append(help_tuple[0:3])
|
| 1256 |
+
return new_options
|
parrot/lib/python3.10/distutils/errors.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.errors
|
| 2 |
+
|
| 3 |
+
Provides exceptions used by the Distutils modules. Note that Distutils
|
| 4 |
+
modules may raise standard exceptions; in particular, SystemExit is
|
| 5 |
+
usually raised for errors that are obviously the end-user's fault
|
| 6 |
+
(eg. bad command-line arguments).
|
| 7 |
+
|
| 8 |
+
This module is safe to use in "from ... import *" mode; it only exports
|
| 9 |
+
symbols whose names start with "Distutils" and end with "Error"."""
|
| 10 |
+
|
| 11 |
+
class DistutilsError (Exception):
|
| 12 |
+
"""The root of all Distutils evil."""
|
| 13 |
+
pass
|
| 14 |
+
|
| 15 |
+
class DistutilsModuleError (DistutilsError):
|
| 16 |
+
"""Unable to load an expected module, or to find an expected class
|
| 17 |
+
within some module (in particular, command modules and classes)."""
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
class DistutilsClassError (DistutilsError):
|
| 21 |
+
"""Some command class (or possibly distribution class, if anyone
|
| 22 |
+
feels a need to subclass Distribution) is found not to be holding
|
| 23 |
+
up its end of the bargain, ie. implementing some part of the
|
| 24 |
+
"command "interface."""
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
class DistutilsGetoptError (DistutilsError):
|
| 28 |
+
"""The option table provided to 'fancy_getopt()' is bogus."""
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
class DistutilsArgError (DistutilsError):
|
| 32 |
+
"""Raised by fancy_getopt in response to getopt.error -- ie. an
|
| 33 |
+
error in the command line usage."""
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
class DistutilsFileError (DistutilsError):
|
| 37 |
+
"""Any problems in the filesystem: expected file not found, etc.
|
| 38 |
+
Typically this is for problems that we detect before OSError
|
| 39 |
+
could be raised."""
|
| 40 |
+
pass
|
| 41 |
+
|
| 42 |
+
class DistutilsOptionError (DistutilsError):
|
| 43 |
+
"""Syntactic/semantic errors in command options, such as use of
|
| 44 |
+
mutually conflicting options, or inconsistent options,
|
| 45 |
+
badly-spelled values, etc. No distinction is made between option
|
| 46 |
+
values originating in the setup script, the command line, config
|
| 47 |
+
files, or what-have-you -- but if we *know* something originated in
|
| 48 |
+
the setup script, we'll raise DistutilsSetupError instead."""
|
| 49 |
+
pass
|
| 50 |
+
|
| 51 |
+
class DistutilsSetupError (DistutilsError):
|
| 52 |
+
"""For errors that can be definitely blamed on the setup script,
|
| 53 |
+
such as invalid keyword arguments to 'setup()'."""
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
class DistutilsPlatformError (DistutilsError):
|
| 57 |
+
"""We don't know how to do something on the current platform (but
|
| 58 |
+
we do know how to do it on some platform) -- eg. trying to compile
|
| 59 |
+
C files on a platform not supported by a CCompiler subclass."""
|
| 60 |
+
pass
|
| 61 |
+
|
| 62 |
+
class DistutilsExecError (DistutilsError):
|
| 63 |
+
"""Any problems executing an external program (such as the C
|
| 64 |
+
compiler, when compiling C files)."""
|
| 65 |
+
pass
|
| 66 |
+
|
| 67 |
+
class DistutilsInternalError (DistutilsError):
|
| 68 |
+
"""Internal inconsistencies or impossibilities (obviously, this
|
| 69 |
+
should never be seen if the code is working!)."""
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
class DistutilsTemplateError (DistutilsError):
|
| 73 |
+
"""Syntax error in a file list template."""
|
| 74 |
+
|
| 75 |
+
class DistutilsByteCompileError(DistutilsError):
|
| 76 |
+
"""Byte compile error."""
|
| 77 |
+
|
| 78 |
+
# Exception classes used by the CCompiler implementation classes
|
| 79 |
+
class CCompilerError (Exception):
|
| 80 |
+
"""Some compile/link operation failed."""
|
| 81 |
+
|
| 82 |
+
class PreprocessError (CCompilerError):
|
| 83 |
+
"""Failure to preprocess one or more C/C++ files."""
|
| 84 |
+
|
| 85 |
+
class CompileError (CCompilerError):
|
| 86 |
+
"""Failure to compile one or more C/C++ source files."""
|
| 87 |
+
|
| 88 |
+
class LibError (CCompilerError):
|
| 89 |
+
"""Failure to create a static library from one or more C/C++ object
|
| 90 |
+
files."""
|
| 91 |
+
|
| 92 |
+
class LinkError (CCompilerError):
|
| 93 |
+
"""Failure to link one or more C/C++ object files into an executable
|
| 94 |
+
or shared library file."""
|
| 95 |
+
|
| 96 |
+
class UnknownFileError (CCompilerError):
|
| 97 |
+
"""Attempt to process an unknown file type."""
|
parrot/lib/python3.10/distutils/filelist.py
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.filelist
|
| 2 |
+
|
| 3 |
+
Provides the FileList class, used for poking about the filesystem
|
| 4 |
+
and building lists of files.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os, re
|
| 8 |
+
import fnmatch
|
| 9 |
+
import functools
|
| 10 |
+
from distutils.util import convert_path
|
| 11 |
+
from distutils.errors import DistutilsTemplateError, DistutilsInternalError
|
| 12 |
+
from distutils import log
|
| 13 |
+
|
| 14 |
+
class FileList:
|
| 15 |
+
"""A list of files built by on exploring the filesystem and filtered by
|
| 16 |
+
applying various patterns to what we find there.
|
| 17 |
+
|
| 18 |
+
Instance attributes:
|
| 19 |
+
dir
|
| 20 |
+
directory from which files will be taken -- only used if
|
| 21 |
+
'allfiles' not supplied to constructor
|
| 22 |
+
files
|
| 23 |
+
list of filenames currently being built/filtered/manipulated
|
| 24 |
+
allfiles
|
| 25 |
+
complete list of files under consideration (ie. without any
|
| 26 |
+
filtering applied)
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, warn=None, debug_print=None):
|
| 30 |
+
# ignore argument to FileList, but keep them for backwards
|
| 31 |
+
# compatibility
|
| 32 |
+
self.allfiles = None
|
| 33 |
+
self.files = []
|
| 34 |
+
|
| 35 |
+
def set_allfiles(self, allfiles):
|
| 36 |
+
self.allfiles = allfiles
|
| 37 |
+
|
| 38 |
+
def findall(self, dir=os.curdir):
|
| 39 |
+
self.allfiles = findall(dir)
|
| 40 |
+
|
| 41 |
+
def debug_print(self, msg):
|
| 42 |
+
"""Print 'msg' to stdout if the global DEBUG (taken from the
|
| 43 |
+
DISTUTILS_DEBUG environment variable) flag is true.
|
| 44 |
+
"""
|
| 45 |
+
from distutils.debug import DEBUG
|
| 46 |
+
if DEBUG:
|
| 47 |
+
print(msg)
|
| 48 |
+
|
| 49 |
+
# -- List-like methods ---------------------------------------------
|
| 50 |
+
|
| 51 |
+
def append(self, item):
|
| 52 |
+
self.files.append(item)
|
| 53 |
+
|
| 54 |
+
def extend(self, items):
|
| 55 |
+
self.files.extend(items)
|
| 56 |
+
|
| 57 |
+
def sort(self):
|
| 58 |
+
# Not a strict lexical sort!
|
| 59 |
+
sortable_files = sorted(map(os.path.split, self.files))
|
| 60 |
+
self.files = []
|
| 61 |
+
for sort_tuple in sortable_files:
|
| 62 |
+
self.files.append(os.path.join(*sort_tuple))
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# -- Other miscellaneous utility methods ---------------------------
|
| 66 |
+
|
| 67 |
+
def remove_duplicates(self):
|
| 68 |
+
# Assumes list has been sorted!
|
| 69 |
+
for i in range(len(self.files) - 1, 0, -1):
|
| 70 |
+
if self.files[i] == self.files[i - 1]:
|
| 71 |
+
del self.files[i]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# -- "File template" methods ---------------------------------------
|
| 75 |
+
|
| 76 |
+
def _parse_template_line(self, line):
|
| 77 |
+
words = line.split()
|
| 78 |
+
action = words[0]
|
| 79 |
+
|
| 80 |
+
patterns = dir = dir_pattern = None
|
| 81 |
+
|
| 82 |
+
if action in ('include', 'exclude',
|
| 83 |
+
'global-include', 'global-exclude'):
|
| 84 |
+
if len(words) < 2:
|
| 85 |
+
raise DistutilsTemplateError(
|
| 86 |
+
"'%s' expects <pattern1> <pattern2> ..." % action)
|
| 87 |
+
patterns = [convert_path(w) for w in words[1:]]
|
| 88 |
+
elif action in ('recursive-include', 'recursive-exclude'):
|
| 89 |
+
if len(words) < 3:
|
| 90 |
+
raise DistutilsTemplateError(
|
| 91 |
+
"'%s' expects <dir> <pattern1> <pattern2> ..." % action)
|
| 92 |
+
dir = convert_path(words[1])
|
| 93 |
+
patterns = [convert_path(w) for w in words[2:]]
|
| 94 |
+
elif action in ('graft', 'prune'):
|
| 95 |
+
if len(words) != 2:
|
| 96 |
+
raise DistutilsTemplateError(
|
| 97 |
+
"'%s' expects a single <dir_pattern>" % action)
|
| 98 |
+
dir_pattern = convert_path(words[1])
|
| 99 |
+
else:
|
| 100 |
+
raise DistutilsTemplateError("unknown action '%s'" % action)
|
| 101 |
+
|
| 102 |
+
return (action, patterns, dir, dir_pattern)
|
| 103 |
+
|
| 104 |
+
def process_template_line(self, line):
|
| 105 |
+
# Parse the line: split it up, make sure the right number of words
|
| 106 |
+
# is there, and return the relevant words. 'action' is always
|
| 107 |
+
# defined: it's the first word of the line. Which of the other
|
| 108 |
+
# three are defined depends on the action; it'll be either
|
| 109 |
+
# patterns, (dir and patterns), or (dir_pattern).
|
| 110 |
+
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
|
| 111 |
+
|
| 112 |
+
# OK, now we know that the action is valid and we have the
|
| 113 |
+
# right number of words on the line for that action -- so we
|
| 114 |
+
# can proceed with minimal error-checking.
|
| 115 |
+
if action == 'include':
|
| 116 |
+
self.debug_print("include " + ' '.join(patterns))
|
| 117 |
+
for pattern in patterns:
|
| 118 |
+
if not self.include_pattern(pattern, anchor=1):
|
| 119 |
+
log.warn("warning: no files found matching '%s'",
|
| 120 |
+
pattern)
|
| 121 |
+
|
| 122 |
+
elif action == 'exclude':
|
| 123 |
+
self.debug_print("exclude " + ' '.join(patterns))
|
| 124 |
+
for pattern in patterns:
|
| 125 |
+
if not self.exclude_pattern(pattern, anchor=1):
|
| 126 |
+
log.warn(("warning: no previously-included files "
|
| 127 |
+
"found matching '%s'"), pattern)
|
| 128 |
+
|
| 129 |
+
elif action == 'global-include':
|
| 130 |
+
self.debug_print("global-include " + ' '.join(patterns))
|
| 131 |
+
for pattern in patterns:
|
| 132 |
+
if not self.include_pattern(pattern, anchor=0):
|
| 133 |
+
log.warn(("warning: no files found matching '%s' "
|
| 134 |
+
"anywhere in distribution"), pattern)
|
| 135 |
+
|
| 136 |
+
elif action == 'global-exclude':
|
| 137 |
+
self.debug_print("global-exclude " + ' '.join(patterns))
|
| 138 |
+
for pattern in patterns:
|
| 139 |
+
if not self.exclude_pattern(pattern, anchor=0):
|
| 140 |
+
log.warn(("warning: no previously-included files matching "
|
| 141 |
+
"'%s' found anywhere in distribution"),
|
| 142 |
+
pattern)
|
| 143 |
+
|
| 144 |
+
elif action == 'recursive-include':
|
| 145 |
+
self.debug_print("recursive-include %s %s" %
|
| 146 |
+
(dir, ' '.join(patterns)))
|
| 147 |
+
for pattern in patterns:
|
| 148 |
+
if not self.include_pattern(pattern, prefix=dir):
|
| 149 |
+
log.warn(("warning: no files found matching '%s' "
|
| 150 |
+
"under directory '%s'"),
|
| 151 |
+
pattern, dir)
|
| 152 |
+
|
| 153 |
+
elif action == 'recursive-exclude':
|
| 154 |
+
self.debug_print("recursive-exclude %s %s" %
|
| 155 |
+
(dir, ' '.join(patterns)))
|
| 156 |
+
for pattern in patterns:
|
| 157 |
+
if not self.exclude_pattern(pattern, prefix=dir):
|
| 158 |
+
log.warn(("warning: no previously-included files matching "
|
| 159 |
+
"'%s' found under directory '%s'"),
|
| 160 |
+
pattern, dir)
|
| 161 |
+
|
| 162 |
+
elif action == 'graft':
|
| 163 |
+
self.debug_print("graft " + dir_pattern)
|
| 164 |
+
if not self.include_pattern(None, prefix=dir_pattern):
|
| 165 |
+
log.warn("warning: no directories found matching '%s'",
|
| 166 |
+
dir_pattern)
|
| 167 |
+
|
| 168 |
+
elif action == 'prune':
|
| 169 |
+
self.debug_print("prune " + dir_pattern)
|
| 170 |
+
if not self.exclude_pattern(None, prefix=dir_pattern):
|
| 171 |
+
log.warn(("no previously-included directories found "
|
| 172 |
+
"matching '%s'"), dir_pattern)
|
| 173 |
+
else:
|
| 174 |
+
raise DistutilsInternalError(
|
| 175 |
+
"this cannot happen: invalid action '%s'" % action)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
# -- Filtering/selection methods -----------------------------------
|
| 179 |
+
|
| 180 |
+
def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
|
| 181 |
+
"""Select strings (presumably filenames) from 'self.files' that
|
| 182 |
+
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
|
| 183 |
+
are not quite the same as implemented by the 'fnmatch' module: '*'
|
| 184 |
+
and '?' match non-special characters, where "special" is platform-
|
| 185 |
+
dependent: slash on Unix; colon, slash, and backslash on
|
| 186 |
+
DOS/Windows; and colon on Mac OS.
|
| 187 |
+
|
| 188 |
+
If 'anchor' is true (the default), then the pattern match is more
|
| 189 |
+
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
|
| 190 |
+
'anchor' is false, both of these will match.
|
| 191 |
+
|
| 192 |
+
If 'prefix' is supplied, then only filenames starting with 'prefix'
|
| 193 |
+
(itself a pattern) and ending with 'pattern', with anything in between
|
| 194 |
+
them, will match. 'anchor' is ignored in this case.
|
| 195 |
+
|
| 196 |
+
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
|
| 197 |
+
'pattern' is assumed to be either a string containing a regex or a
|
| 198 |
+
regex object -- no translation is done, the regex is just compiled
|
| 199 |
+
and used as-is.
|
| 200 |
+
|
| 201 |
+
Selected strings will be added to self.files.
|
| 202 |
+
|
| 203 |
+
Return True if files are found, False otherwise.
|
| 204 |
+
"""
|
| 205 |
+
# XXX docstring lying about what the special chars are?
|
| 206 |
+
files_found = False
|
| 207 |
+
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
|
| 208 |
+
self.debug_print("include_pattern: applying regex r'%s'" %
|
| 209 |
+
pattern_re.pattern)
|
| 210 |
+
|
| 211 |
+
# delayed loading of allfiles list
|
| 212 |
+
if self.allfiles is None:
|
| 213 |
+
self.findall()
|
| 214 |
+
|
| 215 |
+
for name in self.allfiles:
|
| 216 |
+
if pattern_re.search(name):
|
| 217 |
+
self.debug_print(" adding " + name)
|
| 218 |
+
self.files.append(name)
|
| 219 |
+
files_found = True
|
| 220 |
+
return files_found
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def exclude_pattern (self, pattern,
|
| 224 |
+
anchor=1, prefix=None, is_regex=0):
|
| 225 |
+
"""Remove strings (presumably filenames) from 'files' that match
|
| 226 |
+
'pattern'. Other parameters are the same as for
|
| 227 |
+
'include_pattern()', above.
|
| 228 |
+
The list 'self.files' is modified in place.
|
| 229 |
+
Return True if files are found, False otherwise.
|
| 230 |
+
"""
|
| 231 |
+
files_found = False
|
| 232 |
+
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
|
| 233 |
+
self.debug_print("exclude_pattern: applying regex r'%s'" %
|
| 234 |
+
pattern_re.pattern)
|
| 235 |
+
for i in range(len(self.files)-1, -1, -1):
|
| 236 |
+
if pattern_re.search(self.files[i]):
|
| 237 |
+
self.debug_print(" removing " + self.files[i])
|
| 238 |
+
del self.files[i]
|
| 239 |
+
files_found = True
|
| 240 |
+
return files_found
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
# ----------------------------------------------------------------------
|
| 244 |
+
# Utility functions
|
| 245 |
+
|
| 246 |
+
def _find_all_simple(path):
|
| 247 |
+
"""
|
| 248 |
+
Find all files under 'path'
|
| 249 |
+
"""
|
| 250 |
+
results = (
|
| 251 |
+
os.path.join(base, file)
|
| 252 |
+
for base, dirs, files in os.walk(path, followlinks=True)
|
| 253 |
+
for file in files
|
| 254 |
+
)
|
| 255 |
+
return filter(os.path.isfile, results)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def findall(dir=os.curdir):
|
| 259 |
+
"""
|
| 260 |
+
Find all files under 'dir' and return the list of full filenames.
|
| 261 |
+
Unless dir is '.', return full filenames with dir prepended.
|
| 262 |
+
"""
|
| 263 |
+
files = _find_all_simple(dir)
|
| 264 |
+
if dir == os.curdir:
|
| 265 |
+
make_rel = functools.partial(os.path.relpath, start=dir)
|
| 266 |
+
files = map(make_rel, files)
|
| 267 |
+
return list(files)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def glob_to_re(pattern):
|
| 271 |
+
"""Translate a shell-like glob pattern to a regular expression; return
|
| 272 |
+
a string containing the regex. Differs from 'fnmatch.translate()' in
|
| 273 |
+
that '*' does not match "special characters" (which are
|
| 274 |
+
platform-specific).
|
| 275 |
+
"""
|
| 276 |
+
pattern_re = fnmatch.translate(pattern)
|
| 277 |
+
|
| 278 |
+
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
|
| 279 |
+
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
|
| 280 |
+
# and by extension they shouldn't match such "special characters" under
|
| 281 |
+
# any OS. So change all non-escaped dots in the RE to match any
|
| 282 |
+
# character except the special characters (currently: just os.sep).
|
| 283 |
+
sep = os.sep
|
| 284 |
+
if os.sep == '\\':
|
| 285 |
+
# we're using a regex to manipulate a regex, so we need
|
| 286 |
+
# to escape the backslash twice
|
| 287 |
+
sep = r'\\\\'
|
| 288 |
+
escaped = r'\1[^%s]' % sep
|
| 289 |
+
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
|
| 290 |
+
return pattern_re
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
|
| 294 |
+
"""Translate a shell-like wildcard pattern to a compiled regular
|
| 295 |
+
expression. Return the compiled regex. If 'is_regex' true,
|
| 296 |
+
then 'pattern' is directly compiled to a regex (if it's a string)
|
| 297 |
+
or just returned as-is (assumes it's a regex object).
|
| 298 |
+
"""
|
| 299 |
+
if is_regex:
|
| 300 |
+
if isinstance(pattern, str):
|
| 301 |
+
return re.compile(pattern)
|
| 302 |
+
else:
|
| 303 |
+
return pattern
|
| 304 |
+
|
| 305 |
+
# ditch start and end characters
|
| 306 |
+
start, _, end = glob_to_re('_').partition('_')
|
| 307 |
+
|
| 308 |
+
if pattern:
|
| 309 |
+
pattern_re = glob_to_re(pattern)
|
| 310 |
+
assert pattern_re.startswith(start) and pattern_re.endswith(end)
|
| 311 |
+
else:
|
| 312 |
+
pattern_re = ''
|
| 313 |
+
|
| 314 |
+
if prefix is not None:
|
| 315 |
+
prefix_re = glob_to_re(prefix)
|
| 316 |
+
assert prefix_re.startswith(start) and prefix_re.endswith(end)
|
| 317 |
+
prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
|
| 318 |
+
sep = os.sep
|
| 319 |
+
if os.sep == '\\':
|
| 320 |
+
sep = r'\\'
|
| 321 |
+
pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
|
| 322 |
+
pattern_re = r'%s\A%s%s.*%s%s' % (start, prefix_re, sep, pattern_re, end)
|
| 323 |
+
else: # no prefix -- respect anchor flag
|
| 324 |
+
if anchor:
|
| 325 |
+
pattern_re = r'%s\A%s' % (start, pattern_re[len(start):])
|
| 326 |
+
|
| 327 |
+
return re.compile(pattern_re)
|
parrot/lib/python3.10/distutils/tests/Setup.sample
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Setup file from the pygame project
|
| 2 |
+
|
| 3 |
+
#--StartConfig
|
| 4 |
+
SDL = -I/usr/include/SDL -D_REENTRANT -lSDL
|
| 5 |
+
FONT = -lSDL_ttf
|
| 6 |
+
IMAGE = -lSDL_image
|
| 7 |
+
MIXER = -lSDL_mixer
|
| 8 |
+
SMPEG = -lsmpeg
|
| 9 |
+
PNG = -lpng
|
| 10 |
+
JPEG = -ljpeg
|
| 11 |
+
SCRAP = -lX11
|
| 12 |
+
PORTMIDI = -lportmidi
|
| 13 |
+
PORTTIME = -lporttime
|
| 14 |
+
#--EndConfig
|
| 15 |
+
|
| 16 |
+
#DEBUG = -C-W -C-Wall
|
| 17 |
+
DEBUG =
|
| 18 |
+
|
| 19 |
+
#the following modules are optional. you will want to compile
|
| 20 |
+
#everything you can, but you can ignore ones you don't have
|
| 21 |
+
#dependencies for, just comment them out
|
| 22 |
+
|
| 23 |
+
imageext src/imageext.c $(SDL) $(IMAGE) $(PNG) $(JPEG) $(DEBUG)
|
| 24 |
+
font src/font.c $(SDL) $(FONT) $(DEBUG)
|
| 25 |
+
mixer src/mixer.c $(SDL) $(MIXER) $(DEBUG)
|
| 26 |
+
mixer_music src/music.c $(SDL) $(MIXER) $(DEBUG)
|
| 27 |
+
_numericsurfarray src/_numericsurfarray.c $(SDL) $(DEBUG)
|
| 28 |
+
_numericsndarray src/_numericsndarray.c $(SDL) $(MIXER) $(DEBUG)
|
| 29 |
+
movie src/movie.c $(SDL) $(SMPEG) $(DEBUG)
|
| 30 |
+
scrap src/scrap.c $(SDL) $(SCRAP) $(DEBUG)
|
| 31 |
+
_camera src/_camera.c src/camera_v4l2.c src/camera_v4l.c $(SDL) $(DEBUG)
|
| 32 |
+
pypm src/pypm.c $(SDL) $(PORTMIDI) $(PORTTIME) $(DEBUG)
|
| 33 |
+
|
| 34 |
+
GFX = src/SDL_gfx/SDL_gfxPrimitives.c
|
| 35 |
+
#GFX = src/SDL_gfx/SDL_gfxBlitFunc.c src/SDL_gfx/SDL_gfxPrimitives.c
|
| 36 |
+
gfxdraw src/gfxdraw.c $(SDL) $(GFX) $(DEBUG)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
#these modules are required for pygame to run. they only require
|
| 41 |
+
#SDL as a dependency. these should not be altered
|
| 42 |
+
|
| 43 |
+
base src/base.c $(SDL) $(DEBUG)
|
| 44 |
+
cdrom src/cdrom.c $(SDL) $(DEBUG)
|
| 45 |
+
color src/color.c $(SDL) $(DEBUG)
|
| 46 |
+
constants src/constants.c $(SDL) $(DEBUG)
|
| 47 |
+
display src/display.c $(SDL) $(DEBUG)
|
| 48 |
+
event src/event.c $(SDL) $(DEBUG)
|
| 49 |
+
fastevent src/fastevent.c src/fastevents.c $(SDL) $(DEBUG)
|
| 50 |
+
key src/key.c $(SDL) $(DEBUG)
|
| 51 |
+
mouse src/mouse.c $(SDL) $(DEBUG)
|
| 52 |
+
rect src/rect.c $(SDL) $(DEBUG)
|
| 53 |
+
rwobject src/rwobject.c $(SDL) $(DEBUG)
|
| 54 |
+
surface src/surface.c src/alphablit.c src/surface_fill.c $(SDL) $(DEBUG)
|
| 55 |
+
surflock src/surflock.c $(SDL) $(DEBUG)
|
| 56 |
+
time src/time.c $(SDL) $(DEBUG)
|
| 57 |
+
joystick src/joystick.c $(SDL) $(DEBUG)
|
| 58 |
+
draw src/draw.c $(SDL) $(DEBUG)
|
| 59 |
+
image src/image.c $(SDL) $(DEBUG)
|
| 60 |
+
overlay src/overlay.c $(SDL) $(DEBUG)
|
| 61 |
+
transform src/transform.c src/rotozoom.c src/scale2x.c src/scale_mmx.c $(SDL) $(DEBUG)
|
| 62 |
+
mask src/mask.c src/bitmask.c $(SDL) $(DEBUG)
|
| 63 |
+
bufferproxy src/bufferproxy.c $(SDL) $(DEBUG)
|
| 64 |
+
pixelarray src/pixelarray.c $(SDL) $(DEBUG)
|
| 65 |
+
_arraysurfarray src/_arraysurfarray.c $(SDL) $(DEBUG)
|
| 66 |
+
|
| 67 |
+
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_bdist.cpython-310.pyc
ADDED
|
Binary file (2.04 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_bdist_dumb.cpython-310.pyc
ADDED
|
Binary file (3.12 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_bdist_msi.cpython-310.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_bdist_rpm.cpython-310.pyc
ADDED
|
Binary file (3.62 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_build.cpython-310.pyc
ADDED
|
Binary file (1.64 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_build_ext.cpython-310.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_build_scripts.cpython-310.pyc
ADDED
|
Binary file (3.73 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_cmd.cpython-310.pyc
ADDED
|
Binary file (4.32 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_cygwinccompiler.cpython-310.pyc
ADDED
|
Binary file (4.74 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_dep_util.cpython-310.pyc
ADDED
|
Binary file (2.55 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_file_util.cpython-310.pyc
ADDED
|
Binary file (4.93 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_install_data.cpython-310.pyc
ADDED
|
Binary file (1.95 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_msvc9compiler.cpython-310.pyc
ADDED
|
Binary file (5.93 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_register.cpython-310.pyc
ADDED
|
Binary file (8.44 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_sdist.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_sysconfig.cpython-310.pyc
ADDED
|
Binary file (8.84 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_unixccompiler.cpython-310.pyc
ADDED
|
Binary file (4.75 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/__pycache__/test_version.cpython-310.pyc
ADDED
|
Binary file (2.44 kB). View file
|
|
|
parrot/lib/python3.10/distutils/tests/support.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Support code for distutils test cases."""
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import shutil
|
| 5 |
+
import tempfile
|
| 6 |
+
import unittest
|
| 7 |
+
import sysconfig
|
| 8 |
+
from copy import deepcopy
|
| 9 |
+
from test.support import os_helper
|
| 10 |
+
|
| 11 |
+
from distutils import log
|
| 12 |
+
from distutils.log import DEBUG, INFO, WARN, ERROR, FATAL
|
| 13 |
+
from distutils.core import Distribution
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class LoggingSilencer(object):
|
| 17 |
+
|
| 18 |
+
def setUp(self):
|
| 19 |
+
super().setUp()
|
| 20 |
+
self.threshold = log.set_threshold(log.FATAL)
|
| 21 |
+
# catching warnings
|
| 22 |
+
# when log will be replaced by logging
|
| 23 |
+
# we won't need such monkey-patch anymore
|
| 24 |
+
self._old_log = log.Log._log
|
| 25 |
+
log.Log._log = self._log
|
| 26 |
+
self.logs = []
|
| 27 |
+
|
| 28 |
+
def tearDown(self):
|
| 29 |
+
log.set_threshold(self.threshold)
|
| 30 |
+
log.Log._log = self._old_log
|
| 31 |
+
super().tearDown()
|
| 32 |
+
|
| 33 |
+
def _log(self, level, msg, args):
|
| 34 |
+
if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
|
| 35 |
+
raise ValueError('%s wrong log level' % str(level))
|
| 36 |
+
if not isinstance(msg, str):
|
| 37 |
+
raise TypeError("msg should be str, not '%.200s'"
|
| 38 |
+
% (type(msg).__name__))
|
| 39 |
+
self.logs.append((level, msg, args))
|
| 40 |
+
|
| 41 |
+
def get_logs(self, *levels):
|
| 42 |
+
return [msg % args for level, msg, args
|
| 43 |
+
in self.logs if level in levels]
|
| 44 |
+
|
| 45 |
+
def clear_logs(self):
|
| 46 |
+
self.logs = []
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class TempdirManager(object):
|
| 50 |
+
"""Mix-in class that handles temporary directories for test cases.
|
| 51 |
+
|
| 52 |
+
This is intended to be used with unittest.TestCase.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def setUp(self):
|
| 56 |
+
super().setUp()
|
| 57 |
+
self.old_cwd = os.getcwd()
|
| 58 |
+
self.tempdirs = []
|
| 59 |
+
|
| 60 |
+
def tearDown(self):
|
| 61 |
+
# Restore working dir, for Solaris and derivatives, where rmdir()
|
| 62 |
+
# on the current directory fails.
|
| 63 |
+
os.chdir(self.old_cwd)
|
| 64 |
+
super().tearDown()
|
| 65 |
+
while self.tempdirs:
|
| 66 |
+
tmpdir = self.tempdirs.pop()
|
| 67 |
+
os_helper.rmtree(tmpdir)
|
| 68 |
+
|
| 69 |
+
def mkdtemp(self):
|
| 70 |
+
"""Create a temporary directory that will be cleaned up.
|
| 71 |
+
|
| 72 |
+
Returns the path of the directory.
|
| 73 |
+
"""
|
| 74 |
+
d = tempfile.mkdtemp()
|
| 75 |
+
self.tempdirs.append(d)
|
| 76 |
+
return d
|
| 77 |
+
|
| 78 |
+
def write_file(self, path, content='xxx'):
|
| 79 |
+
"""Writes a file in the given path.
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
path can be a string or a sequence.
|
| 83 |
+
"""
|
| 84 |
+
if isinstance(path, (list, tuple)):
|
| 85 |
+
path = os.path.join(*path)
|
| 86 |
+
f = open(path, 'w')
|
| 87 |
+
try:
|
| 88 |
+
f.write(content)
|
| 89 |
+
finally:
|
| 90 |
+
f.close()
|
| 91 |
+
|
| 92 |
+
def create_dist(self, pkg_name='foo', **kw):
|
| 93 |
+
"""Will generate a test environment.
|
| 94 |
+
|
| 95 |
+
This function creates:
|
| 96 |
+
- a Distribution instance using keywords
|
| 97 |
+
- a temporary directory with a package structure
|
| 98 |
+
|
| 99 |
+
It returns the package directory and the distribution
|
| 100 |
+
instance.
|
| 101 |
+
"""
|
| 102 |
+
tmp_dir = self.mkdtemp()
|
| 103 |
+
pkg_dir = os.path.join(tmp_dir, pkg_name)
|
| 104 |
+
os.mkdir(pkg_dir)
|
| 105 |
+
dist = Distribution(attrs=kw)
|
| 106 |
+
|
| 107 |
+
return pkg_dir, dist
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class DummyCommand:
|
| 111 |
+
"""Class to store options for retrieval via set_undefined_options()."""
|
| 112 |
+
|
| 113 |
+
def __init__(self, **kwargs):
|
| 114 |
+
for kw, val in kwargs.items():
|
| 115 |
+
setattr(self, kw, val)
|
| 116 |
+
|
| 117 |
+
def ensure_finalized(self):
|
| 118 |
+
pass
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class EnvironGuard(object):
|
| 122 |
+
|
| 123 |
+
def setUp(self):
|
| 124 |
+
super(EnvironGuard, self).setUp()
|
| 125 |
+
self.old_environ = deepcopy(os.environ)
|
| 126 |
+
|
| 127 |
+
def tearDown(self):
|
| 128 |
+
for key, value in self.old_environ.items():
|
| 129 |
+
if os.environ.get(key) != value:
|
| 130 |
+
os.environ[key] = value
|
| 131 |
+
|
| 132 |
+
for key in tuple(os.environ.keys()):
|
| 133 |
+
if key not in self.old_environ:
|
| 134 |
+
del os.environ[key]
|
| 135 |
+
|
| 136 |
+
super(EnvironGuard, self).tearDown()
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def copy_xxmodule_c(directory):
|
| 140 |
+
"""Helper for tests that need the xxmodule.c source file.
|
| 141 |
+
|
| 142 |
+
Example use:
|
| 143 |
+
|
| 144 |
+
def test_compile(self):
|
| 145 |
+
copy_xxmodule_c(self.tmpdir)
|
| 146 |
+
self.assertIn('xxmodule.c', os.listdir(self.tmpdir))
|
| 147 |
+
|
| 148 |
+
If the source file can be found, it will be copied to *directory*. If not,
|
| 149 |
+
the test will be skipped. Errors during copy are not caught.
|
| 150 |
+
"""
|
| 151 |
+
filename = _get_xxmodule_path()
|
| 152 |
+
if filename is None:
|
| 153 |
+
raise unittest.SkipTest('cannot find xxmodule.c (test must run in '
|
| 154 |
+
'the python build dir)')
|
| 155 |
+
shutil.copy(filename, directory)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def _get_xxmodule_path():
|
| 159 |
+
srcdir = sysconfig.get_config_var('srcdir')
|
| 160 |
+
candidates = [
|
| 161 |
+
# use installed copy if available
|
| 162 |
+
os.path.join(os.path.dirname(__file__), 'xxmodule.c'),
|
| 163 |
+
# otherwise try using copy from build directory
|
| 164 |
+
os.path.join(srcdir, 'Modules', 'xxmodule.c'),
|
| 165 |
+
# srcdir mysteriously can be $srcdir/Lib/distutils/tests when
|
| 166 |
+
# this file is run from its parent directory, so walk up the
|
| 167 |
+
# tree to find the real srcdir
|
| 168 |
+
os.path.join(srcdir, '..', '..', '..', 'Modules', 'xxmodule.c'),
|
| 169 |
+
]
|
| 170 |
+
for path in candidates:
|
| 171 |
+
if os.path.exists(path):
|
| 172 |
+
return path
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def fixup_build_ext(cmd):
|
| 176 |
+
"""Function needed to make build_ext tests pass.
|
| 177 |
+
|
| 178 |
+
When Python was built with --enable-shared on Unix, -L. is not enough to
|
| 179 |
+
find libpython<blah>.so, because regrtest runs in a tempdir, not in the
|
| 180 |
+
source directory where the .so lives.
|
| 181 |
+
|
| 182 |
+
When Python was built with in debug mode on Windows, build_ext commands
|
| 183 |
+
need their debug attribute set, and it is not done automatically for
|
| 184 |
+
some reason.
|
| 185 |
+
|
| 186 |
+
This function handles both of these things. Example use:
|
| 187 |
+
|
| 188 |
+
cmd = build_ext(dist)
|
| 189 |
+
support.fixup_build_ext(cmd)
|
| 190 |
+
cmd.ensure_finalized()
|
| 191 |
+
|
| 192 |
+
Unlike most other Unix platforms, Mac OS X embeds absolute paths
|
| 193 |
+
to shared libraries into executables, so the fixup is not needed there.
|
| 194 |
+
"""
|
| 195 |
+
if os.name == 'nt':
|
| 196 |
+
cmd.debug = sys.executable.endswith('_d.exe')
|
| 197 |
+
elif sysconfig.get_config_var('Py_ENABLE_SHARED'):
|
| 198 |
+
# To further add to the shared builds fun on Unix, we can't just add
|
| 199 |
+
# library_dirs to the Extension() instance because that doesn't get
|
| 200 |
+
# plumbed through to the final compiler command.
|
| 201 |
+
runshared = sysconfig.get_config_var('RUNSHARED')
|
| 202 |
+
if runshared is None:
|
| 203 |
+
cmd.library_dirs = ['.']
|
| 204 |
+
else:
|
| 205 |
+
if sys.platform == 'darwin':
|
| 206 |
+
cmd.library_dirs = []
|
| 207 |
+
else:
|
| 208 |
+
name, equals, value = runshared.partition('=')
|
| 209 |
+
cmd.library_dirs = [d for d in value.split(os.pathsep) if d]
|
parrot/lib/python3.10/distutils/tests/test_archive_util.py
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Tests for distutils.archive_util."""
|
| 3 |
+
import unittest
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
import tarfile
|
| 7 |
+
from os.path import splitdrive
|
| 8 |
+
import warnings
|
| 9 |
+
|
| 10 |
+
from distutils import archive_util
|
| 11 |
+
from distutils.archive_util import (check_archive_formats, make_tarball,
|
| 12 |
+
make_zipfile, make_archive,
|
| 13 |
+
ARCHIVE_FORMATS)
|
| 14 |
+
from distutils.spawn import find_executable, spawn
|
| 15 |
+
from distutils.tests import support
|
| 16 |
+
from test.support import run_unittest, patch
|
| 17 |
+
from test.support.os_helper import change_cwd
|
| 18 |
+
from test.support.warnings_helper import check_warnings
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
import grp
|
| 22 |
+
import pwd
|
| 23 |
+
UID_GID_SUPPORT = True
|
| 24 |
+
except ImportError:
|
| 25 |
+
UID_GID_SUPPORT = False
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import zipfile
|
| 29 |
+
ZIP_SUPPORT = True
|
| 30 |
+
except ImportError:
|
| 31 |
+
ZIP_SUPPORT = find_executable('zip')
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
import zlib
|
| 35 |
+
ZLIB_SUPPORT = True
|
| 36 |
+
except ImportError:
|
| 37 |
+
ZLIB_SUPPORT = False
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
import bz2
|
| 41 |
+
except ImportError:
|
| 42 |
+
bz2 = None
|
| 43 |
+
|
| 44 |
+
try:
|
| 45 |
+
import lzma
|
| 46 |
+
except ImportError:
|
| 47 |
+
lzma = None
|
| 48 |
+
|
| 49 |
+
def can_fs_encode(filename):
|
| 50 |
+
"""
|
| 51 |
+
Return True if the filename can be saved in the file system.
|
| 52 |
+
"""
|
| 53 |
+
if os.path.supports_unicode_filenames:
|
| 54 |
+
return True
|
| 55 |
+
try:
|
| 56 |
+
filename.encode(sys.getfilesystemencoding())
|
| 57 |
+
except UnicodeEncodeError:
|
| 58 |
+
return False
|
| 59 |
+
return True
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class ArchiveUtilTestCase(support.TempdirManager,
|
| 63 |
+
support.LoggingSilencer,
|
| 64 |
+
unittest.TestCase):
|
| 65 |
+
|
| 66 |
+
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
|
| 67 |
+
def test_make_tarball(self, name='archive'):
|
| 68 |
+
# creating something to tar
|
| 69 |
+
tmpdir = self._create_files()
|
| 70 |
+
self._make_tarball(tmpdir, name, '.tar.gz')
|
| 71 |
+
# trying an uncompressed one
|
| 72 |
+
self._make_tarball(tmpdir, name, '.tar', compress=None)
|
| 73 |
+
|
| 74 |
+
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
|
| 75 |
+
def test_make_tarball_gzip(self):
|
| 76 |
+
tmpdir = self._create_files()
|
| 77 |
+
self._make_tarball(tmpdir, 'archive', '.tar.gz', compress='gzip')
|
| 78 |
+
|
| 79 |
+
@unittest.skipUnless(bz2, 'Need bz2 support to run')
|
| 80 |
+
def test_make_tarball_bzip2(self):
|
| 81 |
+
tmpdir = self._create_files()
|
| 82 |
+
self._make_tarball(tmpdir, 'archive', '.tar.bz2', compress='bzip2')
|
| 83 |
+
|
| 84 |
+
@unittest.skipUnless(lzma, 'Need lzma support to run')
|
| 85 |
+
def test_make_tarball_xz(self):
|
| 86 |
+
tmpdir = self._create_files()
|
| 87 |
+
self._make_tarball(tmpdir, 'archive', '.tar.xz', compress='xz')
|
| 88 |
+
|
| 89 |
+
@unittest.skipUnless(can_fs_encode('årchiv'),
|
| 90 |
+
'File system cannot handle this filename')
|
| 91 |
+
def test_make_tarball_latin1(self):
|
| 92 |
+
"""
|
| 93 |
+
Mirror test_make_tarball, except filename contains latin characters.
|
| 94 |
+
"""
|
| 95 |
+
self.test_make_tarball('årchiv') # note this isn't a real word
|
| 96 |
+
|
| 97 |
+
@unittest.skipUnless(can_fs_encode('のアーカイブ'),
|
| 98 |
+
'File system cannot handle this filename')
|
| 99 |
+
def test_make_tarball_extended(self):
|
| 100 |
+
"""
|
| 101 |
+
Mirror test_make_tarball, except filename contains extended
|
| 102 |
+
characters outside the latin charset.
|
| 103 |
+
"""
|
| 104 |
+
self.test_make_tarball('のアーカイブ') # japanese for archive
|
| 105 |
+
|
| 106 |
+
def _make_tarball(self, tmpdir, target_name, suffix, **kwargs):
|
| 107 |
+
tmpdir2 = self.mkdtemp()
|
| 108 |
+
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
|
| 109 |
+
"source and target should be on same drive")
|
| 110 |
+
|
| 111 |
+
base_name = os.path.join(tmpdir2, target_name)
|
| 112 |
+
|
| 113 |
+
# working with relative paths to avoid tar warnings
|
| 114 |
+
with change_cwd(tmpdir):
|
| 115 |
+
make_tarball(splitdrive(base_name)[1], 'dist', **kwargs)
|
| 116 |
+
|
| 117 |
+
# check if the compressed tarball was created
|
| 118 |
+
tarball = base_name + suffix
|
| 119 |
+
self.assertTrue(os.path.exists(tarball))
|
| 120 |
+
self.assertEqual(self._tarinfo(tarball), self._created_files)
|
| 121 |
+
|
| 122 |
+
def _tarinfo(self, path):
|
| 123 |
+
tar = tarfile.open(path)
|
| 124 |
+
try:
|
| 125 |
+
names = tar.getnames()
|
| 126 |
+
names.sort()
|
| 127 |
+
return names
|
| 128 |
+
finally:
|
| 129 |
+
tar.close()
|
| 130 |
+
|
| 131 |
+
_zip_created_files = ['dist/', 'dist/file1', 'dist/file2',
|
| 132 |
+
'dist/sub/', 'dist/sub/file3', 'dist/sub2/']
|
| 133 |
+
_created_files = [p.rstrip('/') for p in _zip_created_files]
|
| 134 |
+
|
| 135 |
+
def _create_files(self):
|
| 136 |
+
# creating something to tar
|
| 137 |
+
tmpdir = self.mkdtemp()
|
| 138 |
+
dist = os.path.join(tmpdir, 'dist')
|
| 139 |
+
os.mkdir(dist)
|
| 140 |
+
self.write_file([dist, 'file1'], 'xxx')
|
| 141 |
+
self.write_file([dist, 'file2'], 'xxx')
|
| 142 |
+
os.mkdir(os.path.join(dist, 'sub'))
|
| 143 |
+
self.write_file([dist, 'sub', 'file3'], 'xxx')
|
| 144 |
+
os.mkdir(os.path.join(dist, 'sub2'))
|
| 145 |
+
return tmpdir
|
| 146 |
+
|
| 147 |
+
@unittest.skipUnless(find_executable('tar') and find_executable('gzip')
|
| 148 |
+
and ZLIB_SUPPORT,
|
| 149 |
+
'Need the tar, gzip and zlib command to run')
|
| 150 |
+
def test_tarfile_vs_tar(self):
|
| 151 |
+
tmpdir = self._create_files()
|
| 152 |
+
tmpdir2 = self.mkdtemp()
|
| 153 |
+
base_name = os.path.join(tmpdir2, 'archive')
|
| 154 |
+
old_dir = os.getcwd()
|
| 155 |
+
os.chdir(tmpdir)
|
| 156 |
+
try:
|
| 157 |
+
make_tarball(base_name, 'dist')
|
| 158 |
+
finally:
|
| 159 |
+
os.chdir(old_dir)
|
| 160 |
+
|
| 161 |
+
# check if the compressed tarball was created
|
| 162 |
+
tarball = base_name + '.tar.gz'
|
| 163 |
+
self.assertTrue(os.path.exists(tarball))
|
| 164 |
+
|
| 165 |
+
# now create another tarball using `tar`
|
| 166 |
+
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
|
| 167 |
+
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
|
| 168 |
+
gzip_cmd = ['gzip', '-f', '-9', 'archive2.tar']
|
| 169 |
+
old_dir = os.getcwd()
|
| 170 |
+
os.chdir(tmpdir)
|
| 171 |
+
try:
|
| 172 |
+
spawn(tar_cmd)
|
| 173 |
+
spawn(gzip_cmd)
|
| 174 |
+
finally:
|
| 175 |
+
os.chdir(old_dir)
|
| 176 |
+
|
| 177 |
+
self.assertTrue(os.path.exists(tarball2))
|
| 178 |
+
# let's compare both tarballs
|
| 179 |
+
self.assertEqual(self._tarinfo(tarball), self._created_files)
|
| 180 |
+
self.assertEqual(self._tarinfo(tarball2), self._created_files)
|
| 181 |
+
|
| 182 |
+
# trying an uncompressed one
|
| 183 |
+
base_name = os.path.join(tmpdir2, 'archive')
|
| 184 |
+
old_dir = os.getcwd()
|
| 185 |
+
os.chdir(tmpdir)
|
| 186 |
+
try:
|
| 187 |
+
make_tarball(base_name, 'dist', compress=None)
|
| 188 |
+
finally:
|
| 189 |
+
os.chdir(old_dir)
|
| 190 |
+
tarball = base_name + '.tar'
|
| 191 |
+
self.assertTrue(os.path.exists(tarball))
|
| 192 |
+
|
| 193 |
+
# now for a dry_run
|
| 194 |
+
base_name = os.path.join(tmpdir2, 'archive')
|
| 195 |
+
old_dir = os.getcwd()
|
| 196 |
+
os.chdir(tmpdir)
|
| 197 |
+
try:
|
| 198 |
+
make_tarball(base_name, 'dist', compress=None, dry_run=True)
|
| 199 |
+
finally:
|
| 200 |
+
os.chdir(old_dir)
|
| 201 |
+
tarball = base_name + '.tar'
|
| 202 |
+
self.assertTrue(os.path.exists(tarball))
|
| 203 |
+
|
| 204 |
+
@unittest.skipUnless(find_executable('compress'),
|
| 205 |
+
'The compress program is required')
|
| 206 |
+
def test_compress_deprecated(self):
|
| 207 |
+
tmpdir = self._create_files()
|
| 208 |
+
base_name = os.path.join(self.mkdtemp(), 'archive')
|
| 209 |
+
|
| 210 |
+
# using compress and testing the PendingDeprecationWarning
|
| 211 |
+
old_dir = os.getcwd()
|
| 212 |
+
os.chdir(tmpdir)
|
| 213 |
+
try:
|
| 214 |
+
with check_warnings() as w:
|
| 215 |
+
warnings.simplefilter("always")
|
| 216 |
+
make_tarball(base_name, 'dist', compress='compress')
|
| 217 |
+
finally:
|
| 218 |
+
os.chdir(old_dir)
|
| 219 |
+
tarball = base_name + '.tar.Z'
|
| 220 |
+
self.assertTrue(os.path.exists(tarball))
|
| 221 |
+
self.assertEqual(len(w.warnings), 1)
|
| 222 |
+
|
| 223 |
+
# same test with dry_run
|
| 224 |
+
os.remove(tarball)
|
| 225 |
+
old_dir = os.getcwd()
|
| 226 |
+
os.chdir(tmpdir)
|
| 227 |
+
try:
|
| 228 |
+
with check_warnings() as w:
|
| 229 |
+
warnings.simplefilter("always")
|
| 230 |
+
make_tarball(base_name, 'dist', compress='compress',
|
| 231 |
+
dry_run=True)
|
| 232 |
+
finally:
|
| 233 |
+
os.chdir(old_dir)
|
| 234 |
+
self.assertFalse(os.path.exists(tarball))
|
| 235 |
+
self.assertEqual(len(w.warnings), 1)
|
| 236 |
+
|
| 237 |
+
@unittest.skipUnless(ZIP_SUPPORT and ZLIB_SUPPORT,
|
| 238 |
+
'Need zip and zlib support to run')
|
| 239 |
+
def test_make_zipfile(self):
|
| 240 |
+
# creating something to tar
|
| 241 |
+
tmpdir = self._create_files()
|
| 242 |
+
base_name = os.path.join(self.mkdtemp(), 'archive')
|
| 243 |
+
with change_cwd(tmpdir):
|
| 244 |
+
make_zipfile(base_name, 'dist')
|
| 245 |
+
|
| 246 |
+
# check if the compressed tarball was created
|
| 247 |
+
tarball = base_name + '.zip'
|
| 248 |
+
self.assertTrue(os.path.exists(tarball))
|
| 249 |
+
with zipfile.ZipFile(tarball) as zf:
|
| 250 |
+
self.assertEqual(sorted(zf.namelist()), self._zip_created_files)
|
| 251 |
+
|
| 252 |
+
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
|
| 253 |
+
def test_make_zipfile_no_zlib(self):
|
| 254 |
+
patch(self, archive_util.zipfile, 'zlib', None) # force zlib ImportError
|
| 255 |
+
|
| 256 |
+
called = []
|
| 257 |
+
zipfile_class = zipfile.ZipFile
|
| 258 |
+
def fake_zipfile(*a, **kw):
|
| 259 |
+
if kw.get('compression', None) == zipfile.ZIP_STORED:
|
| 260 |
+
called.append((a, kw))
|
| 261 |
+
return zipfile_class(*a, **kw)
|
| 262 |
+
|
| 263 |
+
patch(self, archive_util.zipfile, 'ZipFile', fake_zipfile)
|
| 264 |
+
|
| 265 |
+
# create something to tar and compress
|
| 266 |
+
tmpdir = self._create_files()
|
| 267 |
+
base_name = os.path.join(self.mkdtemp(), 'archive')
|
| 268 |
+
with change_cwd(tmpdir):
|
| 269 |
+
make_zipfile(base_name, 'dist')
|
| 270 |
+
|
| 271 |
+
tarball = base_name + '.zip'
|
| 272 |
+
self.assertEqual(called,
|
| 273 |
+
[((tarball, "w"), {'compression': zipfile.ZIP_STORED})])
|
| 274 |
+
self.assertTrue(os.path.exists(tarball))
|
| 275 |
+
with zipfile.ZipFile(tarball) as zf:
|
| 276 |
+
self.assertEqual(sorted(zf.namelist()), self._zip_created_files)
|
| 277 |
+
|
| 278 |
+
def test_check_archive_formats(self):
|
| 279 |
+
self.assertEqual(check_archive_formats(['gztar', 'xxx', 'zip']),
|
| 280 |
+
'xxx')
|
| 281 |
+
self.assertIsNone(check_archive_formats(['gztar', 'bztar', 'xztar',
|
| 282 |
+
'ztar', 'tar', 'zip']))
|
| 283 |
+
|
| 284 |
+
def test_make_archive(self):
|
| 285 |
+
tmpdir = self.mkdtemp()
|
| 286 |
+
base_name = os.path.join(tmpdir, 'archive')
|
| 287 |
+
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
|
| 288 |
+
|
| 289 |
+
def test_make_archive_cwd(self):
|
| 290 |
+
current_dir = os.getcwd()
|
| 291 |
+
def _breaks(*args, **kw):
|
| 292 |
+
raise RuntimeError()
|
| 293 |
+
ARCHIVE_FORMATS['xxx'] = (_breaks, [], 'xxx file')
|
| 294 |
+
try:
|
| 295 |
+
try:
|
| 296 |
+
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
|
| 297 |
+
except:
|
| 298 |
+
pass
|
| 299 |
+
self.assertEqual(os.getcwd(), current_dir)
|
| 300 |
+
finally:
|
| 301 |
+
del ARCHIVE_FORMATS['xxx']
|
| 302 |
+
|
| 303 |
+
def test_make_archive_tar(self):
|
| 304 |
+
base_dir = self._create_files()
|
| 305 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 306 |
+
res = make_archive(base_name, 'tar', base_dir, 'dist')
|
| 307 |
+
self.assertTrue(os.path.exists(res))
|
| 308 |
+
self.assertEqual(os.path.basename(res), 'archive.tar')
|
| 309 |
+
self.assertEqual(self._tarinfo(res), self._created_files)
|
| 310 |
+
|
| 311 |
+
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
|
| 312 |
+
def test_make_archive_gztar(self):
|
| 313 |
+
base_dir = self._create_files()
|
| 314 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 315 |
+
res = make_archive(base_name, 'gztar', base_dir, 'dist')
|
| 316 |
+
self.assertTrue(os.path.exists(res))
|
| 317 |
+
self.assertEqual(os.path.basename(res), 'archive.tar.gz')
|
| 318 |
+
self.assertEqual(self._tarinfo(res), self._created_files)
|
| 319 |
+
|
| 320 |
+
@unittest.skipUnless(bz2, 'Need bz2 support to run')
|
| 321 |
+
def test_make_archive_bztar(self):
|
| 322 |
+
base_dir = self._create_files()
|
| 323 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 324 |
+
res = make_archive(base_name, 'bztar', base_dir, 'dist')
|
| 325 |
+
self.assertTrue(os.path.exists(res))
|
| 326 |
+
self.assertEqual(os.path.basename(res), 'archive.tar.bz2')
|
| 327 |
+
self.assertEqual(self._tarinfo(res), self._created_files)
|
| 328 |
+
|
| 329 |
+
@unittest.skipUnless(lzma, 'Need xz support to run')
|
| 330 |
+
def test_make_archive_xztar(self):
|
| 331 |
+
base_dir = self._create_files()
|
| 332 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 333 |
+
res = make_archive(base_name, 'xztar', base_dir, 'dist')
|
| 334 |
+
self.assertTrue(os.path.exists(res))
|
| 335 |
+
self.assertEqual(os.path.basename(res), 'archive.tar.xz')
|
| 336 |
+
self.assertEqual(self._tarinfo(res), self._created_files)
|
| 337 |
+
|
| 338 |
+
def test_make_archive_owner_group(self):
|
| 339 |
+
# testing make_archive with owner and group, with various combinations
|
| 340 |
+
# this works even if there's not gid/uid support
|
| 341 |
+
if UID_GID_SUPPORT:
|
| 342 |
+
group = grp.getgrgid(0)[0]
|
| 343 |
+
owner = pwd.getpwuid(0)[0]
|
| 344 |
+
else:
|
| 345 |
+
group = owner = 'root'
|
| 346 |
+
|
| 347 |
+
base_dir = self._create_files()
|
| 348 |
+
root_dir = self.mkdtemp()
|
| 349 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 350 |
+
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
|
| 351 |
+
group=group)
|
| 352 |
+
self.assertTrue(os.path.exists(res))
|
| 353 |
+
|
| 354 |
+
res = make_archive(base_name, 'zip', root_dir, base_dir)
|
| 355 |
+
self.assertTrue(os.path.exists(res))
|
| 356 |
+
|
| 357 |
+
res = make_archive(base_name, 'tar', root_dir, base_dir,
|
| 358 |
+
owner=owner, group=group)
|
| 359 |
+
self.assertTrue(os.path.exists(res))
|
| 360 |
+
|
| 361 |
+
res = make_archive(base_name, 'tar', root_dir, base_dir,
|
| 362 |
+
owner='kjhkjhkjg', group='oihohoh')
|
| 363 |
+
self.assertTrue(os.path.exists(res))
|
| 364 |
+
|
| 365 |
+
@unittest.skipUnless(ZLIB_SUPPORT, "Requires zlib")
|
| 366 |
+
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
|
| 367 |
+
def test_tarfile_root_owner(self):
|
| 368 |
+
tmpdir = self._create_files()
|
| 369 |
+
base_name = os.path.join(self.mkdtemp(), 'archive')
|
| 370 |
+
old_dir = os.getcwd()
|
| 371 |
+
os.chdir(tmpdir)
|
| 372 |
+
group = grp.getgrgid(0)[0]
|
| 373 |
+
owner = pwd.getpwuid(0)[0]
|
| 374 |
+
try:
|
| 375 |
+
archive_name = make_tarball(base_name, 'dist', compress=None,
|
| 376 |
+
owner=owner, group=group)
|
| 377 |
+
finally:
|
| 378 |
+
os.chdir(old_dir)
|
| 379 |
+
|
| 380 |
+
# check if the compressed tarball was created
|
| 381 |
+
self.assertTrue(os.path.exists(archive_name))
|
| 382 |
+
|
| 383 |
+
# now checks the rights
|
| 384 |
+
archive = tarfile.open(archive_name)
|
| 385 |
+
try:
|
| 386 |
+
for member in archive.getmembers():
|
| 387 |
+
self.assertEqual(member.uid, 0)
|
| 388 |
+
self.assertEqual(member.gid, 0)
|
| 389 |
+
finally:
|
| 390 |
+
archive.close()
|
| 391 |
+
|
| 392 |
+
def test_suite():
|
| 393 |
+
return unittest.makeSuite(ArchiveUtilTestCase)
|
| 394 |
+
|
| 395 |
+
if __name__ == "__main__":
|
| 396 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_bdist_dumb.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.bdist_dumb."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import zipfile
|
| 6 |
+
import unittest
|
| 7 |
+
from test.support import run_unittest
|
| 8 |
+
|
| 9 |
+
from distutils.core import Distribution
|
| 10 |
+
from distutils.command.bdist_dumb import bdist_dumb
|
| 11 |
+
from distutils.tests import support
|
| 12 |
+
|
| 13 |
+
SETUP_PY = """\
|
| 14 |
+
from distutils.core import setup
|
| 15 |
+
import foo
|
| 16 |
+
|
| 17 |
+
setup(name='foo', version='0.1', py_modules=['foo'],
|
| 18 |
+
url='xxx', author='xxx', author_email='xxx')
|
| 19 |
+
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
import zlib
|
| 24 |
+
ZLIB_SUPPORT = True
|
| 25 |
+
except ImportError:
|
| 26 |
+
ZLIB_SUPPORT = False
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class BuildDumbTestCase(support.TempdirManager,
|
| 30 |
+
support.LoggingSilencer,
|
| 31 |
+
support.EnvironGuard,
|
| 32 |
+
unittest.TestCase):
|
| 33 |
+
|
| 34 |
+
def setUp(self):
|
| 35 |
+
super(BuildDumbTestCase, self).setUp()
|
| 36 |
+
self.old_location = os.getcwd()
|
| 37 |
+
self.old_sys_argv = sys.argv, sys.argv[:]
|
| 38 |
+
|
| 39 |
+
def tearDown(self):
|
| 40 |
+
os.chdir(self.old_location)
|
| 41 |
+
sys.argv = self.old_sys_argv[0]
|
| 42 |
+
sys.argv[:] = self.old_sys_argv[1]
|
| 43 |
+
super(BuildDumbTestCase, self).tearDown()
|
| 44 |
+
|
| 45 |
+
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
|
| 46 |
+
def test_simple_built(self):
|
| 47 |
+
|
| 48 |
+
# let's create a simple package
|
| 49 |
+
tmp_dir = self.mkdtemp()
|
| 50 |
+
pkg_dir = os.path.join(tmp_dir, 'foo')
|
| 51 |
+
os.mkdir(pkg_dir)
|
| 52 |
+
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
|
| 53 |
+
self.write_file((pkg_dir, 'foo.py'), '#')
|
| 54 |
+
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
|
| 55 |
+
self.write_file((pkg_dir, 'README'), '')
|
| 56 |
+
|
| 57 |
+
dist = Distribution({'name': 'foo', 'version': '0.1',
|
| 58 |
+
'py_modules': ['foo'],
|
| 59 |
+
'url': 'xxx', 'author': 'xxx',
|
| 60 |
+
'author_email': 'xxx'})
|
| 61 |
+
dist.script_name = 'setup.py'
|
| 62 |
+
os.chdir(pkg_dir)
|
| 63 |
+
|
| 64 |
+
sys.argv = ['setup.py']
|
| 65 |
+
cmd = bdist_dumb(dist)
|
| 66 |
+
|
| 67 |
+
# so the output is the same no matter
|
| 68 |
+
# what is the platform
|
| 69 |
+
cmd.format = 'zip'
|
| 70 |
+
|
| 71 |
+
cmd.ensure_finalized()
|
| 72 |
+
cmd.run()
|
| 73 |
+
|
| 74 |
+
# see what we have
|
| 75 |
+
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
|
| 76 |
+
base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name)
|
| 77 |
+
|
| 78 |
+
self.assertEqual(dist_created, [base])
|
| 79 |
+
|
| 80 |
+
# now let's check what we have in the zip file
|
| 81 |
+
fp = zipfile.ZipFile(os.path.join('dist', base))
|
| 82 |
+
try:
|
| 83 |
+
contents = fp.namelist()
|
| 84 |
+
finally:
|
| 85 |
+
fp.close()
|
| 86 |
+
|
| 87 |
+
contents = sorted(filter(None, map(os.path.basename, contents)))
|
| 88 |
+
wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py']
|
| 89 |
+
if not sys.dont_write_bytecode:
|
| 90 |
+
wanted.append('foo.%s.pyc' % sys.implementation.cache_tag)
|
| 91 |
+
self.assertEqual(contents, sorted(wanted))
|
| 92 |
+
|
| 93 |
+
def test_suite():
|
| 94 |
+
return unittest.makeSuite(BuildDumbTestCase)
|
| 95 |
+
|
| 96 |
+
if __name__ == '__main__':
|
| 97 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_bdist_rpm.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.bdist_rpm."""
|
| 2 |
+
|
| 3 |
+
import unittest
|
| 4 |
+
import sys
|
| 5 |
+
import os
|
| 6 |
+
from test.support import run_unittest, requires_zlib
|
| 7 |
+
|
| 8 |
+
from distutils.core import Distribution
|
| 9 |
+
from distutils.command.bdist_rpm import bdist_rpm
|
| 10 |
+
from distutils.tests import support
|
| 11 |
+
from distutils.spawn import find_executable
|
| 12 |
+
|
| 13 |
+
SETUP_PY = """\
|
| 14 |
+
from distutils.core import setup
|
| 15 |
+
import foo
|
| 16 |
+
|
| 17 |
+
setup(name='foo', version='0.1', py_modules=['foo'],
|
| 18 |
+
url='xxx', author='xxx', author_email='xxx')
|
| 19 |
+
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
class BuildRpmTestCase(support.TempdirManager,
|
| 23 |
+
support.EnvironGuard,
|
| 24 |
+
support.LoggingSilencer,
|
| 25 |
+
unittest.TestCase):
|
| 26 |
+
|
| 27 |
+
def setUp(self):
|
| 28 |
+
try:
|
| 29 |
+
sys.executable.encode("UTF-8")
|
| 30 |
+
except UnicodeEncodeError:
|
| 31 |
+
raise unittest.SkipTest("sys.executable is not encodable to UTF-8")
|
| 32 |
+
|
| 33 |
+
super(BuildRpmTestCase, self).setUp()
|
| 34 |
+
self.old_location = os.getcwd()
|
| 35 |
+
self.old_sys_argv = sys.argv, sys.argv[:]
|
| 36 |
+
|
| 37 |
+
def tearDown(self):
|
| 38 |
+
os.chdir(self.old_location)
|
| 39 |
+
sys.argv = self.old_sys_argv[0]
|
| 40 |
+
sys.argv[:] = self.old_sys_argv[1]
|
| 41 |
+
super(BuildRpmTestCase, self).tearDown()
|
| 42 |
+
|
| 43 |
+
# XXX I am unable yet to make this test work without
|
| 44 |
+
# spurious sdtout/stderr output under Mac OS X
|
| 45 |
+
@unittest.skipUnless(sys.platform.startswith('linux'),
|
| 46 |
+
'spurious sdtout/stderr output under Mac OS X')
|
| 47 |
+
@requires_zlib()
|
| 48 |
+
@unittest.skipIf(find_executable('rpm') is None,
|
| 49 |
+
'the rpm command is not found')
|
| 50 |
+
@unittest.skipIf(find_executable('rpmbuild') is None,
|
| 51 |
+
'the rpmbuild command is not found')
|
| 52 |
+
def test_quiet(self):
|
| 53 |
+
# let's create a package
|
| 54 |
+
tmp_dir = self.mkdtemp()
|
| 55 |
+
os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation
|
| 56 |
+
pkg_dir = os.path.join(tmp_dir, 'foo')
|
| 57 |
+
os.mkdir(pkg_dir)
|
| 58 |
+
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
|
| 59 |
+
self.write_file((pkg_dir, 'foo.py'), '#')
|
| 60 |
+
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
|
| 61 |
+
self.write_file((pkg_dir, 'README'), '')
|
| 62 |
+
|
| 63 |
+
dist = Distribution({'name': 'foo', 'version': '0.1',
|
| 64 |
+
'py_modules': ['foo'],
|
| 65 |
+
'url': 'xxx', 'author': 'xxx',
|
| 66 |
+
'author_email': 'xxx'})
|
| 67 |
+
dist.script_name = 'setup.py'
|
| 68 |
+
os.chdir(pkg_dir)
|
| 69 |
+
|
| 70 |
+
sys.argv = ['setup.py']
|
| 71 |
+
cmd = bdist_rpm(dist)
|
| 72 |
+
cmd.fix_python = True
|
| 73 |
+
|
| 74 |
+
# running in quiet mode
|
| 75 |
+
cmd.quiet = 1
|
| 76 |
+
cmd.ensure_finalized()
|
| 77 |
+
cmd.run()
|
| 78 |
+
|
| 79 |
+
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
|
| 80 |
+
self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
|
| 81 |
+
|
| 82 |
+
# bug #2945: upload ignores bdist_rpm files
|
| 83 |
+
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
|
| 84 |
+
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
|
| 85 |
+
|
| 86 |
+
# XXX I am unable yet to make this test work without
|
| 87 |
+
# spurious sdtout/stderr output under Mac OS X
|
| 88 |
+
@unittest.skipUnless(sys.platform.startswith('linux'),
|
| 89 |
+
'spurious sdtout/stderr output under Mac OS X')
|
| 90 |
+
@requires_zlib()
|
| 91 |
+
# http://bugs.python.org/issue1533164
|
| 92 |
+
@unittest.skipIf(find_executable('rpm') is None,
|
| 93 |
+
'the rpm command is not found')
|
| 94 |
+
@unittest.skipIf(find_executable('rpmbuild') is None,
|
| 95 |
+
'the rpmbuild command is not found')
|
| 96 |
+
def test_no_optimize_flag(self):
|
| 97 |
+
# let's create a package that breaks bdist_rpm
|
| 98 |
+
tmp_dir = self.mkdtemp()
|
| 99 |
+
os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation
|
| 100 |
+
pkg_dir = os.path.join(tmp_dir, 'foo')
|
| 101 |
+
os.mkdir(pkg_dir)
|
| 102 |
+
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
|
| 103 |
+
self.write_file((pkg_dir, 'foo.py'), '#')
|
| 104 |
+
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
|
| 105 |
+
self.write_file((pkg_dir, 'README'), '')
|
| 106 |
+
|
| 107 |
+
dist = Distribution({'name': 'foo', 'version': '0.1',
|
| 108 |
+
'py_modules': ['foo'],
|
| 109 |
+
'url': 'xxx', 'author': 'xxx',
|
| 110 |
+
'author_email': 'xxx'})
|
| 111 |
+
dist.script_name = 'setup.py'
|
| 112 |
+
os.chdir(pkg_dir)
|
| 113 |
+
|
| 114 |
+
sys.argv = ['setup.py']
|
| 115 |
+
cmd = bdist_rpm(dist)
|
| 116 |
+
cmd.fix_python = True
|
| 117 |
+
|
| 118 |
+
cmd.quiet = 1
|
| 119 |
+
cmd.ensure_finalized()
|
| 120 |
+
cmd.run()
|
| 121 |
+
|
| 122 |
+
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
|
| 123 |
+
self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
|
| 124 |
+
|
| 125 |
+
# bug #2945: upload ignores bdist_rpm files
|
| 126 |
+
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
|
| 127 |
+
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
|
| 128 |
+
|
| 129 |
+
os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm'))
|
| 130 |
+
|
| 131 |
+
def test_suite():
|
| 132 |
+
return unittest.makeSuite(BuildRpmTestCase)
|
| 133 |
+
|
| 134 |
+
if __name__ == '__main__':
|
| 135 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_build.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.build."""
|
| 2 |
+
import unittest
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
from test.support import run_unittest
|
| 6 |
+
|
| 7 |
+
from distutils.command.build import build
|
| 8 |
+
from distutils.tests import support
|
| 9 |
+
from sysconfig import get_platform
|
| 10 |
+
|
| 11 |
+
class BuildTestCase(support.TempdirManager,
|
| 12 |
+
support.LoggingSilencer,
|
| 13 |
+
unittest.TestCase):
|
| 14 |
+
|
| 15 |
+
def test_finalize_options(self):
|
| 16 |
+
pkg_dir, dist = self.create_dist()
|
| 17 |
+
cmd = build(dist)
|
| 18 |
+
cmd.finalize_options()
|
| 19 |
+
|
| 20 |
+
# if not specified, plat_name gets the current platform
|
| 21 |
+
self.assertEqual(cmd.plat_name, get_platform())
|
| 22 |
+
|
| 23 |
+
# build_purelib is build + lib
|
| 24 |
+
wanted = os.path.join(cmd.build_base, 'lib')
|
| 25 |
+
self.assertEqual(cmd.build_purelib, wanted)
|
| 26 |
+
|
| 27 |
+
# build_platlib is 'build/lib.platform-x.x[-pydebug]'
|
| 28 |
+
# examples:
|
| 29 |
+
# build/lib.macosx-10.3-i386-2.7
|
| 30 |
+
plat_spec = '.%s-%d.%d' % (cmd.plat_name, *sys.version_info[:2])
|
| 31 |
+
if hasattr(sys, 'gettotalrefcount'):
|
| 32 |
+
self.assertTrue(cmd.build_platlib.endswith('-pydebug'))
|
| 33 |
+
plat_spec += '-pydebug'
|
| 34 |
+
wanted = os.path.join(cmd.build_base, 'lib' + plat_spec)
|
| 35 |
+
self.assertEqual(cmd.build_platlib, wanted)
|
| 36 |
+
|
| 37 |
+
# by default, build_lib = build_purelib
|
| 38 |
+
self.assertEqual(cmd.build_lib, cmd.build_purelib)
|
| 39 |
+
|
| 40 |
+
# build_temp is build/temp.<plat>
|
| 41 |
+
wanted = os.path.join(cmd.build_base, 'temp' + plat_spec)
|
| 42 |
+
self.assertEqual(cmd.build_temp, wanted)
|
| 43 |
+
|
| 44 |
+
# build_scripts is build/scripts-x.x
|
| 45 |
+
wanted = os.path.join(cmd.build_base,
|
| 46 |
+
'scripts-%d.%d' % sys.version_info[:2])
|
| 47 |
+
self.assertEqual(cmd.build_scripts, wanted)
|
| 48 |
+
|
| 49 |
+
# executable is os.path.normpath(sys.executable)
|
| 50 |
+
self.assertEqual(cmd.executable, os.path.normpath(sys.executable))
|
| 51 |
+
|
| 52 |
+
def test_suite():
|
| 53 |
+
return unittest.makeSuite(BuildTestCase)
|
| 54 |
+
|
| 55 |
+
if __name__ == "__main__":
|
| 56 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_build_clib.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.build_clib."""
|
| 2 |
+
import unittest
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import sysconfig
|
| 6 |
+
|
| 7 |
+
from test.support import run_unittest, missing_compiler_executable
|
| 8 |
+
|
| 9 |
+
from distutils.command.build_clib import build_clib
|
| 10 |
+
from distutils.errors import DistutilsSetupError
|
| 11 |
+
from distutils.tests import support
|
| 12 |
+
|
| 13 |
+
class BuildCLibTestCase(support.TempdirManager,
|
| 14 |
+
support.LoggingSilencer,
|
| 15 |
+
unittest.TestCase):
|
| 16 |
+
|
| 17 |
+
def setUp(self):
|
| 18 |
+
super().setUp()
|
| 19 |
+
self._backup_CONFIG_VARS = dict(sysconfig._CONFIG_VARS)
|
| 20 |
+
|
| 21 |
+
def tearDown(self):
|
| 22 |
+
super().tearDown()
|
| 23 |
+
sysconfig._CONFIG_VARS.clear()
|
| 24 |
+
sysconfig._CONFIG_VARS.update(self._backup_CONFIG_VARS)
|
| 25 |
+
|
| 26 |
+
def test_check_library_dist(self):
|
| 27 |
+
pkg_dir, dist = self.create_dist()
|
| 28 |
+
cmd = build_clib(dist)
|
| 29 |
+
|
| 30 |
+
# 'libraries' option must be a list
|
| 31 |
+
self.assertRaises(DistutilsSetupError, cmd.check_library_list, 'foo')
|
| 32 |
+
|
| 33 |
+
# each element of 'libraries' must a 2-tuple
|
| 34 |
+
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
|
| 35 |
+
['foo1', 'foo2'])
|
| 36 |
+
|
| 37 |
+
# first element of each tuple in 'libraries'
|
| 38 |
+
# must be a string (the library name)
|
| 39 |
+
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
|
| 40 |
+
[(1, 'foo1'), ('name', 'foo2')])
|
| 41 |
+
|
| 42 |
+
# library name may not contain directory separators
|
| 43 |
+
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
|
| 44 |
+
[('name', 'foo1'),
|
| 45 |
+
('another/name', 'foo2')])
|
| 46 |
+
|
| 47 |
+
# second element of each tuple must be a dictionary (build info)
|
| 48 |
+
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
|
| 49 |
+
[('name', {}),
|
| 50 |
+
('another', 'foo2')])
|
| 51 |
+
|
| 52 |
+
# those work
|
| 53 |
+
libs = [('name', {}), ('name', {'ok': 'good'})]
|
| 54 |
+
cmd.check_library_list(libs)
|
| 55 |
+
|
| 56 |
+
def test_get_source_files(self):
|
| 57 |
+
pkg_dir, dist = self.create_dist()
|
| 58 |
+
cmd = build_clib(dist)
|
| 59 |
+
|
| 60 |
+
# "in 'libraries' option 'sources' must be present and must be
|
| 61 |
+
# a list of source filenames
|
| 62 |
+
cmd.libraries = [('name', {})]
|
| 63 |
+
self.assertRaises(DistutilsSetupError, cmd.get_source_files)
|
| 64 |
+
|
| 65 |
+
cmd.libraries = [('name', {'sources': 1})]
|
| 66 |
+
self.assertRaises(DistutilsSetupError, cmd.get_source_files)
|
| 67 |
+
|
| 68 |
+
cmd.libraries = [('name', {'sources': ['a', 'b']})]
|
| 69 |
+
self.assertEqual(cmd.get_source_files(), ['a', 'b'])
|
| 70 |
+
|
| 71 |
+
cmd.libraries = [('name', {'sources': ('a', 'b')})]
|
| 72 |
+
self.assertEqual(cmd.get_source_files(), ['a', 'b'])
|
| 73 |
+
|
| 74 |
+
cmd.libraries = [('name', {'sources': ('a', 'b')}),
|
| 75 |
+
('name2', {'sources': ['c', 'd']})]
|
| 76 |
+
self.assertEqual(cmd.get_source_files(), ['a', 'b', 'c', 'd'])
|
| 77 |
+
|
| 78 |
+
def test_build_libraries(self):
|
| 79 |
+
|
| 80 |
+
pkg_dir, dist = self.create_dist()
|
| 81 |
+
cmd = build_clib(dist)
|
| 82 |
+
class FakeCompiler:
|
| 83 |
+
def compile(*args, **kw):
|
| 84 |
+
pass
|
| 85 |
+
create_static_lib = compile
|
| 86 |
+
|
| 87 |
+
cmd.compiler = FakeCompiler()
|
| 88 |
+
|
| 89 |
+
# build_libraries is also doing a bit of typo checking
|
| 90 |
+
lib = [('name', {'sources': 'notvalid'})]
|
| 91 |
+
self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib)
|
| 92 |
+
|
| 93 |
+
lib = [('name', {'sources': list()})]
|
| 94 |
+
cmd.build_libraries(lib)
|
| 95 |
+
|
| 96 |
+
lib = [('name', {'sources': tuple()})]
|
| 97 |
+
cmd.build_libraries(lib)
|
| 98 |
+
|
| 99 |
+
def test_finalize_options(self):
|
| 100 |
+
pkg_dir, dist = self.create_dist()
|
| 101 |
+
cmd = build_clib(dist)
|
| 102 |
+
|
| 103 |
+
cmd.include_dirs = 'one-dir'
|
| 104 |
+
cmd.finalize_options()
|
| 105 |
+
self.assertEqual(cmd.include_dirs, ['one-dir'])
|
| 106 |
+
|
| 107 |
+
cmd.include_dirs = None
|
| 108 |
+
cmd.finalize_options()
|
| 109 |
+
self.assertEqual(cmd.include_dirs, [])
|
| 110 |
+
|
| 111 |
+
cmd.distribution.libraries = 'WONTWORK'
|
| 112 |
+
self.assertRaises(DistutilsSetupError, cmd.finalize_options)
|
| 113 |
+
|
| 114 |
+
@unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
|
| 115 |
+
def test_run(self):
|
| 116 |
+
pkg_dir, dist = self.create_dist()
|
| 117 |
+
cmd = build_clib(dist)
|
| 118 |
+
|
| 119 |
+
foo_c = os.path.join(pkg_dir, 'foo.c')
|
| 120 |
+
self.write_file(foo_c, 'int main(void) { return 1;}\n')
|
| 121 |
+
cmd.libraries = [('foo', {'sources': [foo_c]})]
|
| 122 |
+
|
| 123 |
+
build_temp = os.path.join(pkg_dir, 'build')
|
| 124 |
+
os.mkdir(build_temp)
|
| 125 |
+
cmd.build_temp = build_temp
|
| 126 |
+
cmd.build_clib = build_temp
|
| 127 |
+
|
| 128 |
+
# Before we run the command, we want to make sure
|
| 129 |
+
# all commands are present on the system.
|
| 130 |
+
ccmd = missing_compiler_executable()
|
| 131 |
+
if ccmd is not None:
|
| 132 |
+
self.skipTest('The %r command is not found' % ccmd)
|
| 133 |
+
|
| 134 |
+
# this should work
|
| 135 |
+
cmd.run()
|
| 136 |
+
|
| 137 |
+
# let's check the result
|
| 138 |
+
self.assertIn('libfoo.a', os.listdir(build_temp))
|
| 139 |
+
|
| 140 |
+
def test_suite():
|
| 141 |
+
return unittest.makeSuite(BuildCLibTestCase)
|
| 142 |
+
|
| 143 |
+
if __name__ == "__main__":
|
| 144 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_build_scripts.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.build_scripts."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import unittest
|
| 5 |
+
|
| 6 |
+
from distutils.command.build_scripts import build_scripts
|
| 7 |
+
from distutils.core import Distribution
|
| 8 |
+
from distutils import sysconfig
|
| 9 |
+
|
| 10 |
+
from distutils.tests import support
|
| 11 |
+
from test.support import run_unittest
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BuildScriptsTestCase(support.TempdirManager,
|
| 15 |
+
support.LoggingSilencer,
|
| 16 |
+
unittest.TestCase):
|
| 17 |
+
|
| 18 |
+
def test_default_settings(self):
|
| 19 |
+
cmd = self.get_build_scripts_cmd("/foo/bar", [])
|
| 20 |
+
self.assertFalse(cmd.force)
|
| 21 |
+
self.assertIsNone(cmd.build_dir)
|
| 22 |
+
|
| 23 |
+
cmd.finalize_options()
|
| 24 |
+
|
| 25 |
+
self.assertTrue(cmd.force)
|
| 26 |
+
self.assertEqual(cmd.build_dir, "/foo/bar")
|
| 27 |
+
|
| 28 |
+
def test_build(self):
|
| 29 |
+
source = self.mkdtemp()
|
| 30 |
+
target = self.mkdtemp()
|
| 31 |
+
expected = self.write_sample_scripts(source)
|
| 32 |
+
|
| 33 |
+
cmd = self.get_build_scripts_cmd(target,
|
| 34 |
+
[os.path.join(source, fn)
|
| 35 |
+
for fn in expected])
|
| 36 |
+
cmd.finalize_options()
|
| 37 |
+
cmd.run()
|
| 38 |
+
|
| 39 |
+
built = os.listdir(target)
|
| 40 |
+
for name in expected:
|
| 41 |
+
self.assertIn(name, built)
|
| 42 |
+
|
| 43 |
+
def get_build_scripts_cmd(self, target, scripts):
|
| 44 |
+
import sys
|
| 45 |
+
dist = Distribution()
|
| 46 |
+
dist.scripts = scripts
|
| 47 |
+
dist.command_obj["build"] = support.DummyCommand(
|
| 48 |
+
build_scripts=target,
|
| 49 |
+
force=1,
|
| 50 |
+
executable=sys.executable
|
| 51 |
+
)
|
| 52 |
+
return build_scripts(dist)
|
| 53 |
+
|
| 54 |
+
def write_sample_scripts(self, dir):
|
| 55 |
+
expected = []
|
| 56 |
+
expected.append("script1.py")
|
| 57 |
+
self.write_script(dir, "script1.py",
|
| 58 |
+
("#! /usr/bin/env python2.3\n"
|
| 59 |
+
"# bogus script w/ Python sh-bang\n"
|
| 60 |
+
"pass\n"))
|
| 61 |
+
expected.append("script2.py")
|
| 62 |
+
self.write_script(dir, "script2.py",
|
| 63 |
+
("#!/usr/bin/python\n"
|
| 64 |
+
"# bogus script w/ Python sh-bang\n"
|
| 65 |
+
"pass\n"))
|
| 66 |
+
expected.append("shell.sh")
|
| 67 |
+
self.write_script(dir, "shell.sh",
|
| 68 |
+
("#!/bin/sh\n"
|
| 69 |
+
"# bogus shell script w/ sh-bang\n"
|
| 70 |
+
"exit 0\n"))
|
| 71 |
+
return expected
|
| 72 |
+
|
| 73 |
+
def write_script(self, dir, name, text):
|
| 74 |
+
f = open(os.path.join(dir, name), "w")
|
| 75 |
+
try:
|
| 76 |
+
f.write(text)
|
| 77 |
+
finally:
|
| 78 |
+
f.close()
|
| 79 |
+
|
| 80 |
+
def test_version_int(self):
|
| 81 |
+
source = self.mkdtemp()
|
| 82 |
+
target = self.mkdtemp()
|
| 83 |
+
expected = self.write_sample_scripts(source)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
cmd = self.get_build_scripts_cmd(target,
|
| 87 |
+
[os.path.join(source, fn)
|
| 88 |
+
for fn in expected])
|
| 89 |
+
cmd.finalize_options()
|
| 90 |
+
|
| 91 |
+
# http://bugs.python.org/issue4524
|
| 92 |
+
#
|
| 93 |
+
# On linux-g++-32 with command line `./configure --enable-ipv6
|
| 94 |
+
# --with-suffix=3`, python is compiled okay but the build scripts
|
| 95 |
+
# failed when writing the name of the executable
|
| 96 |
+
old = sysconfig.get_config_vars().get('VERSION')
|
| 97 |
+
sysconfig._config_vars['VERSION'] = 4
|
| 98 |
+
try:
|
| 99 |
+
cmd.run()
|
| 100 |
+
finally:
|
| 101 |
+
if old is not None:
|
| 102 |
+
sysconfig._config_vars['VERSION'] = old
|
| 103 |
+
|
| 104 |
+
built = os.listdir(target)
|
| 105 |
+
for name in expected:
|
| 106 |
+
self.assertIn(name, built)
|
| 107 |
+
|
| 108 |
+
def test_suite():
|
| 109 |
+
return unittest.makeSuite(BuildScriptsTestCase)
|
| 110 |
+
|
| 111 |
+
if __name__ == "__main__":
|
| 112 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_check.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.check."""
|
| 2 |
+
import os
|
| 3 |
+
import textwrap
|
| 4 |
+
import unittest
|
| 5 |
+
from test.support import run_unittest
|
| 6 |
+
|
| 7 |
+
from distutils.command.check import check, HAS_DOCUTILS
|
| 8 |
+
from distutils.tests import support
|
| 9 |
+
from distutils.errors import DistutilsSetupError
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
import pygments
|
| 13 |
+
except ImportError:
|
| 14 |
+
pygments = None
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
HERE = os.path.dirname(__file__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class CheckTestCase(support.LoggingSilencer,
|
| 21 |
+
support.TempdirManager,
|
| 22 |
+
unittest.TestCase):
|
| 23 |
+
|
| 24 |
+
def _run(self, metadata=None, cwd=None, **options):
|
| 25 |
+
if metadata is None:
|
| 26 |
+
metadata = {}
|
| 27 |
+
if cwd is not None:
|
| 28 |
+
old_dir = os.getcwd()
|
| 29 |
+
os.chdir(cwd)
|
| 30 |
+
pkg_info, dist = self.create_dist(**metadata)
|
| 31 |
+
cmd = check(dist)
|
| 32 |
+
cmd.initialize_options()
|
| 33 |
+
for name, value in options.items():
|
| 34 |
+
setattr(cmd, name, value)
|
| 35 |
+
cmd.ensure_finalized()
|
| 36 |
+
cmd.run()
|
| 37 |
+
if cwd is not None:
|
| 38 |
+
os.chdir(old_dir)
|
| 39 |
+
return cmd
|
| 40 |
+
|
| 41 |
+
def test_check_metadata(self):
|
| 42 |
+
# let's run the command with no metadata at all
|
| 43 |
+
# by default, check is checking the metadata
|
| 44 |
+
# should have some warnings
|
| 45 |
+
cmd = self._run()
|
| 46 |
+
self.assertEqual(cmd._warnings, 2)
|
| 47 |
+
|
| 48 |
+
# now let's add the required fields
|
| 49 |
+
# and run it again, to make sure we don't get
|
| 50 |
+
# any warning anymore
|
| 51 |
+
metadata = {'url': 'xxx', 'author': 'xxx',
|
| 52 |
+
'author_email': 'xxx',
|
| 53 |
+
'name': 'xxx', 'version': 'xxx'}
|
| 54 |
+
cmd = self._run(metadata)
|
| 55 |
+
self.assertEqual(cmd._warnings, 0)
|
| 56 |
+
|
| 57 |
+
# now with the strict mode, we should
|
| 58 |
+
# get an error if there are missing metadata
|
| 59 |
+
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
|
| 60 |
+
|
| 61 |
+
# and of course, no error when all metadata are present
|
| 62 |
+
cmd = self._run(metadata, strict=1)
|
| 63 |
+
self.assertEqual(cmd._warnings, 0)
|
| 64 |
+
|
| 65 |
+
# now a test with non-ASCII characters
|
| 66 |
+
metadata = {'url': 'xxx', 'author': '\u00c9ric',
|
| 67 |
+
'author_email': 'xxx', 'name': 'xxx',
|
| 68 |
+
'version': 'xxx',
|
| 69 |
+
'description': 'Something about esszet \u00df',
|
| 70 |
+
'long_description': 'More things about esszet \u00df'}
|
| 71 |
+
cmd = self._run(metadata)
|
| 72 |
+
self.assertEqual(cmd._warnings, 0)
|
| 73 |
+
|
| 74 |
+
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
|
| 75 |
+
def test_check_document(self):
|
| 76 |
+
pkg_info, dist = self.create_dist()
|
| 77 |
+
cmd = check(dist)
|
| 78 |
+
|
| 79 |
+
# let's see if it detects broken rest
|
| 80 |
+
broken_rest = 'title\n===\n\ntest'
|
| 81 |
+
msgs = cmd._check_rst_data(broken_rest)
|
| 82 |
+
self.assertEqual(len(msgs), 1)
|
| 83 |
+
|
| 84 |
+
# and non-broken rest
|
| 85 |
+
rest = 'title\n=====\n\ntest'
|
| 86 |
+
msgs = cmd._check_rst_data(rest)
|
| 87 |
+
self.assertEqual(len(msgs), 0)
|
| 88 |
+
|
| 89 |
+
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
|
| 90 |
+
def test_check_restructuredtext(self):
|
| 91 |
+
# let's see if it detects broken rest in long_description
|
| 92 |
+
broken_rest = 'title\n===\n\ntest'
|
| 93 |
+
pkg_info, dist = self.create_dist(long_description=broken_rest)
|
| 94 |
+
cmd = check(dist)
|
| 95 |
+
cmd.check_restructuredtext()
|
| 96 |
+
self.assertEqual(cmd._warnings, 1)
|
| 97 |
+
|
| 98 |
+
# let's see if we have an error with strict=1
|
| 99 |
+
metadata = {'url': 'xxx', 'author': 'xxx',
|
| 100 |
+
'author_email': 'xxx',
|
| 101 |
+
'name': 'xxx', 'version': 'xxx',
|
| 102 |
+
'long_description': broken_rest}
|
| 103 |
+
self.assertRaises(DistutilsSetupError, self._run, metadata,
|
| 104 |
+
**{'strict': 1, 'restructuredtext': 1})
|
| 105 |
+
|
| 106 |
+
# and non-broken rest, including a non-ASCII character to test #12114
|
| 107 |
+
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
|
| 108 |
+
cmd = self._run(metadata, strict=1, restructuredtext=1)
|
| 109 |
+
self.assertEqual(cmd._warnings, 0)
|
| 110 |
+
|
| 111 |
+
# check that includes work to test #31292
|
| 112 |
+
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
|
| 113 |
+
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
|
| 114 |
+
self.assertEqual(cmd._warnings, 0)
|
| 115 |
+
|
| 116 |
+
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
|
| 117 |
+
def test_check_restructuredtext_with_syntax_highlight(self):
|
| 118 |
+
# Don't fail if there is a `code` or `code-block` directive
|
| 119 |
+
|
| 120 |
+
example_rst_docs = []
|
| 121 |
+
example_rst_docs.append(textwrap.dedent("""\
|
| 122 |
+
Here's some code:
|
| 123 |
+
|
| 124 |
+
.. code:: python
|
| 125 |
+
|
| 126 |
+
def foo():
|
| 127 |
+
pass
|
| 128 |
+
"""))
|
| 129 |
+
example_rst_docs.append(textwrap.dedent("""\
|
| 130 |
+
Here's some code:
|
| 131 |
+
|
| 132 |
+
.. code-block:: python
|
| 133 |
+
|
| 134 |
+
def foo():
|
| 135 |
+
pass
|
| 136 |
+
"""))
|
| 137 |
+
|
| 138 |
+
for rest_with_code in example_rst_docs:
|
| 139 |
+
pkg_info, dist = self.create_dist(long_description=rest_with_code)
|
| 140 |
+
cmd = check(dist)
|
| 141 |
+
cmd.check_restructuredtext()
|
| 142 |
+
msgs = cmd._check_rst_data(rest_with_code)
|
| 143 |
+
if pygments is not None:
|
| 144 |
+
self.assertEqual(len(msgs), 0)
|
| 145 |
+
else:
|
| 146 |
+
self.assertEqual(len(msgs), 1)
|
| 147 |
+
self.assertEqual(
|
| 148 |
+
str(msgs[0][1]),
|
| 149 |
+
'Cannot analyze code. Pygments package not found.'
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
def test_check_all(self):
|
| 153 |
+
|
| 154 |
+
metadata = {'url': 'xxx', 'author': 'xxx'}
|
| 155 |
+
self.assertRaises(DistutilsSetupError, self._run,
|
| 156 |
+
{}, **{'strict': 1,
|
| 157 |
+
'restructuredtext': 1})
|
| 158 |
+
|
| 159 |
+
def test_suite():
|
| 160 |
+
return unittest.makeSuite(CheckTestCase)
|
| 161 |
+
|
| 162 |
+
if __name__ == "__main__":
|
| 163 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_config.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.pypirc.pypirc."""
|
| 2 |
+
import os
|
| 3 |
+
import unittest
|
| 4 |
+
|
| 5 |
+
from distutils.core import PyPIRCCommand
|
| 6 |
+
from distutils.core import Distribution
|
| 7 |
+
from distutils.log import set_threshold
|
| 8 |
+
from distutils.log import WARN
|
| 9 |
+
|
| 10 |
+
from distutils.tests import support
|
| 11 |
+
from test.support import run_unittest
|
| 12 |
+
|
| 13 |
+
PYPIRC = """\
|
| 14 |
+
[distutils]
|
| 15 |
+
|
| 16 |
+
index-servers =
|
| 17 |
+
server1
|
| 18 |
+
server2
|
| 19 |
+
server3
|
| 20 |
+
|
| 21 |
+
[server1]
|
| 22 |
+
username:me
|
| 23 |
+
password:secret
|
| 24 |
+
|
| 25 |
+
[server2]
|
| 26 |
+
username:meagain
|
| 27 |
+
password: secret
|
| 28 |
+
realm:acme
|
| 29 |
+
repository:http://another.pypi/
|
| 30 |
+
|
| 31 |
+
[server3]
|
| 32 |
+
username:cbiggles
|
| 33 |
+
password:yh^%#rest-of-my-password
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
PYPIRC_OLD = """\
|
| 37 |
+
[server-login]
|
| 38 |
+
username:tarek
|
| 39 |
+
password:secret
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
WANTED = """\
|
| 43 |
+
[distutils]
|
| 44 |
+
index-servers =
|
| 45 |
+
pypi
|
| 46 |
+
|
| 47 |
+
[pypi]
|
| 48 |
+
username:tarek
|
| 49 |
+
password:xxx
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class BasePyPIRCCommandTestCase(support.TempdirManager,
|
| 54 |
+
support.LoggingSilencer,
|
| 55 |
+
support.EnvironGuard,
|
| 56 |
+
unittest.TestCase):
|
| 57 |
+
|
| 58 |
+
def setUp(self):
|
| 59 |
+
"""Patches the environment."""
|
| 60 |
+
super(BasePyPIRCCommandTestCase, self).setUp()
|
| 61 |
+
self.tmp_dir = self.mkdtemp()
|
| 62 |
+
os.environ['HOME'] = self.tmp_dir
|
| 63 |
+
os.environ['USERPROFILE'] = self.tmp_dir
|
| 64 |
+
self.rc = os.path.join(self.tmp_dir, '.pypirc')
|
| 65 |
+
self.dist = Distribution()
|
| 66 |
+
|
| 67 |
+
class command(PyPIRCCommand):
|
| 68 |
+
def __init__(self, dist):
|
| 69 |
+
PyPIRCCommand.__init__(self, dist)
|
| 70 |
+
def initialize_options(self):
|
| 71 |
+
pass
|
| 72 |
+
finalize_options = initialize_options
|
| 73 |
+
|
| 74 |
+
self._cmd = command
|
| 75 |
+
self.old_threshold = set_threshold(WARN)
|
| 76 |
+
|
| 77 |
+
def tearDown(self):
|
| 78 |
+
"""Removes the patch."""
|
| 79 |
+
set_threshold(self.old_threshold)
|
| 80 |
+
super(BasePyPIRCCommandTestCase, self).tearDown()
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class PyPIRCCommandTestCase(BasePyPIRCCommandTestCase):
|
| 84 |
+
|
| 85 |
+
def test_server_registration(self):
|
| 86 |
+
# This test makes sure PyPIRCCommand knows how to:
|
| 87 |
+
# 1. handle several sections in .pypirc
|
| 88 |
+
# 2. handle the old format
|
| 89 |
+
|
| 90 |
+
# new format
|
| 91 |
+
self.write_file(self.rc, PYPIRC)
|
| 92 |
+
cmd = self._cmd(self.dist)
|
| 93 |
+
config = cmd._read_pypirc()
|
| 94 |
+
|
| 95 |
+
config = list(sorted(config.items()))
|
| 96 |
+
waited = [('password', 'secret'), ('realm', 'pypi'),
|
| 97 |
+
('repository', 'https://upload.pypi.org/legacy/'),
|
| 98 |
+
('server', 'server1'), ('username', 'me')]
|
| 99 |
+
self.assertEqual(config, waited)
|
| 100 |
+
|
| 101 |
+
# old format
|
| 102 |
+
self.write_file(self.rc, PYPIRC_OLD)
|
| 103 |
+
config = cmd._read_pypirc()
|
| 104 |
+
config = list(sorted(config.items()))
|
| 105 |
+
waited = [('password', 'secret'), ('realm', 'pypi'),
|
| 106 |
+
('repository', 'https://upload.pypi.org/legacy/'),
|
| 107 |
+
('server', 'server-login'), ('username', 'tarek')]
|
| 108 |
+
self.assertEqual(config, waited)
|
| 109 |
+
|
| 110 |
+
def test_server_empty_registration(self):
|
| 111 |
+
cmd = self._cmd(self.dist)
|
| 112 |
+
rc = cmd._get_rc_file()
|
| 113 |
+
self.assertFalse(os.path.exists(rc))
|
| 114 |
+
cmd._store_pypirc('tarek', 'xxx')
|
| 115 |
+
self.assertTrue(os.path.exists(rc))
|
| 116 |
+
f = open(rc)
|
| 117 |
+
try:
|
| 118 |
+
content = f.read()
|
| 119 |
+
self.assertEqual(content, WANTED)
|
| 120 |
+
finally:
|
| 121 |
+
f.close()
|
| 122 |
+
|
| 123 |
+
def test_config_interpolation(self):
|
| 124 |
+
# using the % character in .pypirc should not raise an error (#20120)
|
| 125 |
+
self.write_file(self.rc, PYPIRC)
|
| 126 |
+
cmd = self._cmd(self.dist)
|
| 127 |
+
cmd.repository = 'server3'
|
| 128 |
+
config = cmd._read_pypirc()
|
| 129 |
+
|
| 130 |
+
config = list(sorted(config.items()))
|
| 131 |
+
waited = [('password', 'yh^%#rest-of-my-password'), ('realm', 'pypi'),
|
| 132 |
+
('repository', 'https://upload.pypi.org/legacy/'),
|
| 133 |
+
('server', 'server3'), ('username', 'cbiggles')]
|
| 134 |
+
self.assertEqual(config, waited)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def test_suite():
|
| 138 |
+
return unittest.makeSuite(PyPIRCCommandTestCase)
|
| 139 |
+
|
| 140 |
+
if __name__ == "__main__":
|
| 141 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_cygwinccompiler.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.cygwinccompiler."""
|
| 2 |
+
import unittest
|
| 3 |
+
import sys
|
| 4 |
+
import os
|
| 5 |
+
from io import BytesIO
|
| 6 |
+
from test.support import run_unittest
|
| 7 |
+
|
| 8 |
+
from distutils import cygwinccompiler
|
| 9 |
+
from distutils.cygwinccompiler import (check_config_h,
|
| 10 |
+
CONFIG_H_OK, CONFIG_H_NOTOK,
|
| 11 |
+
CONFIG_H_UNCERTAIN, get_versions,
|
| 12 |
+
get_msvcr)
|
| 13 |
+
from distutils.tests import support
|
| 14 |
+
|
| 15 |
+
class FakePopen(object):
|
| 16 |
+
test_class = None
|
| 17 |
+
|
| 18 |
+
def __init__(self, cmd, shell, stdout):
|
| 19 |
+
self.cmd = cmd.split()[0]
|
| 20 |
+
exes = self.test_class._exes
|
| 21 |
+
if self.cmd in exes:
|
| 22 |
+
# issue #6438 in Python 3.x, Popen returns bytes
|
| 23 |
+
self.stdout = BytesIO(exes[self.cmd])
|
| 24 |
+
else:
|
| 25 |
+
self.stdout = os.popen(cmd, 'r')
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class CygwinCCompilerTestCase(support.TempdirManager,
|
| 29 |
+
unittest.TestCase):
|
| 30 |
+
|
| 31 |
+
def setUp(self):
|
| 32 |
+
super(CygwinCCompilerTestCase, self).setUp()
|
| 33 |
+
self.version = sys.version
|
| 34 |
+
self.python_h = os.path.join(self.mkdtemp(), 'python.h')
|
| 35 |
+
from distutils import sysconfig
|
| 36 |
+
self.old_get_config_h_filename = sysconfig.get_config_h_filename
|
| 37 |
+
sysconfig.get_config_h_filename = self._get_config_h_filename
|
| 38 |
+
self.old_find_executable = cygwinccompiler.find_executable
|
| 39 |
+
cygwinccompiler.find_executable = self._find_executable
|
| 40 |
+
self._exes = {}
|
| 41 |
+
self.old_popen = cygwinccompiler.Popen
|
| 42 |
+
FakePopen.test_class = self
|
| 43 |
+
cygwinccompiler.Popen = FakePopen
|
| 44 |
+
|
| 45 |
+
def tearDown(self):
|
| 46 |
+
sys.version = self.version
|
| 47 |
+
from distutils import sysconfig
|
| 48 |
+
sysconfig.get_config_h_filename = self.old_get_config_h_filename
|
| 49 |
+
cygwinccompiler.find_executable = self.old_find_executable
|
| 50 |
+
cygwinccompiler.Popen = self.old_popen
|
| 51 |
+
super(CygwinCCompilerTestCase, self).tearDown()
|
| 52 |
+
|
| 53 |
+
def _get_config_h_filename(self):
|
| 54 |
+
return self.python_h
|
| 55 |
+
|
| 56 |
+
def _find_executable(self, name):
|
| 57 |
+
if name in self._exes:
|
| 58 |
+
return name
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
+
def test_check_config_h(self):
|
| 62 |
+
|
| 63 |
+
# check_config_h looks for "GCC" in sys.version first
|
| 64 |
+
# returns CONFIG_H_OK if found
|
| 65 |
+
sys.version = ('2.6.1 (r261:67515, Dec 6 2008, 16:42:21) \n[GCC '
|
| 66 |
+
'4.0.1 (Apple Computer, Inc. build 5370)]')
|
| 67 |
+
|
| 68 |
+
self.assertEqual(check_config_h()[0], CONFIG_H_OK)
|
| 69 |
+
|
| 70 |
+
# then it tries to see if it can find "__GNUC__" in pyconfig.h
|
| 71 |
+
sys.version = 'something without the *CC word'
|
| 72 |
+
|
| 73 |
+
# if the file doesn't exist it returns CONFIG_H_UNCERTAIN
|
| 74 |
+
self.assertEqual(check_config_h()[0], CONFIG_H_UNCERTAIN)
|
| 75 |
+
|
| 76 |
+
# if it exists but does not contain __GNUC__, it returns CONFIG_H_NOTOK
|
| 77 |
+
self.write_file(self.python_h, 'xxx')
|
| 78 |
+
self.assertEqual(check_config_h()[0], CONFIG_H_NOTOK)
|
| 79 |
+
|
| 80 |
+
# and CONFIG_H_OK if __GNUC__ is found
|
| 81 |
+
self.write_file(self.python_h, 'xxx __GNUC__ xxx')
|
| 82 |
+
self.assertEqual(check_config_h()[0], CONFIG_H_OK)
|
| 83 |
+
|
| 84 |
+
def test_get_versions(self):
|
| 85 |
+
|
| 86 |
+
# get_versions calls distutils.spawn.find_executable on
|
| 87 |
+
# 'gcc', 'ld' and 'dllwrap'
|
| 88 |
+
self.assertEqual(get_versions(), (None, None, None))
|
| 89 |
+
|
| 90 |
+
# Let's fake we have 'gcc' and it returns '3.4.5'
|
| 91 |
+
self._exes['gcc'] = b'gcc (GCC) 3.4.5 (mingw special)\nFSF'
|
| 92 |
+
res = get_versions()
|
| 93 |
+
self.assertEqual(str(res[0]), '3.4.5')
|
| 94 |
+
|
| 95 |
+
# and let's see what happens when the version
|
| 96 |
+
# doesn't match the regular expression
|
| 97 |
+
# (\d+\.\d+(\.\d+)*)
|
| 98 |
+
self._exes['gcc'] = b'very strange output'
|
| 99 |
+
res = get_versions()
|
| 100 |
+
self.assertEqual(res[0], None)
|
| 101 |
+
|
| 102 |
+
# same thing for ld
|
| 103 |
+
self._exes['ld'] = b'GNU ld version 2.17.50 20060824'
|
| 104 |
+
res = get_versions()
|
| 105 |
+
self.assertEqual(str(res[1]), '2.17.50')
|
| 106 |
+
self._exes['ld'] = b'@(#)PROGRAM:ld PROJECT:ld64-77'
|
| 107 |
+
res = get_versions()
|
| 108 |
+
self.assertEqual(res[1], None)
|
| 109 |
+
|
| 110 |
+
# and dllwrap
|
| 111 |
+
self._exes['dllwrap'] = b'GNU dllwrap 2.17.50 20060824\nFSF'
|
| 112 |
+
res = get_versions()
|
| 113 |
+
self.assertEqual(str(res[2]), '2.17.50')
|
| 114 |
+
self._exes['dllwrap'] = b'Cheese Wrap'
|
| 115 |
+
res = get_versions()
|
| 116 |
+
self.assertEqual(res[2], None)
|
| 117 |
+
|
| 118 |
+
def test_get_msvcr(self):
|
| 119 |
+
|
| 120 |
+
# none
|
| 121 |
+
sys.version = ('2.6.1 (r261:67515, Dec 6 2008, 16:42:21) '
|
| 122 |
+
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]')
|
| 123 |
+
self.assertEqual(get_msvcr(), None)
|
| 124 |
+
|
| 125 |
+
# MSVC 7.0
|
| 126 |
+
sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
|
| 127 |
+
'[MSC v.1300 32 bits (Intel)]')
|
| 128 |
+
self.assertEqual(get_msvcr(), ['msvcr70'])
|
| 129 |
+
|
| 130 |
+
# MSVC 7.1
|
| 131 |
+
sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
|
| 132 |
+
'[MSC v.1310 32 bits (Intel)]')
|
| 133 |
+
self.assertEqual(get_msvcr(), ['msvcr71'])
|
| 134 |
+
|
| 135 |
+
# VS2005 / MSVC 8.0
|
| 136 |
+
sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
|
| 137 |
+
'[MSC v.1400 32 bits (Intel)]')
|
| 138 |
+
self.assertEqual(get_msvcr(), ['msvcr80'])
|
| 139 |
+
|
| 140 |
+
# VS2008 / MSVC 9.0
|
| 141 |
+
sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
|
| 142 |
+
'[MSC v.1500 32 bits (Intel)]')
|
| 143 |
+
self.assertEqual(get_msvcr(), ['msvcr90'])
|
| 144 |
+
|
| 145 |
+
# unknown
|
| 146 |
+
sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
|
| 147 |
+
'[MSC v.1999 32 bits (Intel)]')
|
| 148 |
+
self.assertRaises(ValueError, get_msvcr)
|
| 149 |
+
|
| 150 |
+
def test_suite():
|
| 151 |
+
return unittest.makeSuite(CygwinCCompilerTestCase)
|
| 152 |
+
|
| 153 |
+
if __name__ == '__main__':
|
| 154 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_dir_util.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.dir_util."""
|
| 2 |
+
import unittest
|
| 3 |
+
import os
|
| 4 |
+
import stat
|
| 5 |
+
import sys
|
| 6 |
+
from unittest.mock import patch
|
| 7 |
+
|
| 8 |
+
from distutils import dir_util, errors
|
| 9 |
+
from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree,
|
| 10 |
+
ensure_relative)
|
| 11 |
+
|
| 12 |
+
from distutils import log
|
| 13 |
+
from distutils.tests import support
|
| 14 |
+
from test.support import run_unittest
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class DirUtilTestCase(support.TempdirManager, unittest.TestCase):
|
| 18 |
+
|
| 19 |
+
def _log(self, msg, *args):
|
| 20 |
+
if len(args) > 0:
|
| 21 |
+
self._logs.append(msg % args)
|
| 22 |
+
else:
|
| 23 |
+
self._logs.append(msg)
|
| 24 |
+
|
| 25 |
+
def setUp(self):
|
| 26 |
+
super(DirUtilTestCase, self).setUp()
|
| 27 |
+
self._logs = []
|
| 28 |
+
tmp_dir = self.mkdtemp()
|
| 29 |
+
self.root_target = os.path.join(tmp_dir, 'deep')
|
| 30 |
+
self.target = os.path.join(self.root_target, 'here')
|
| 31 |
+
self.target2 = os.path.join(tmp_dir, 'deep2')
|
| 32 |
+
self.old_log = log.info
|
| 33 |
+
log.info = self._log
|
| 34 |
+
|
| 35 |
+
def tearDown(self):
|
| 36 |
+
log.info = self.old_log
|
| 37 |
+
super(DirUtilTestCase, self).tearDown()
|
| 38 |
+
|
| 39 |
+
def test_mkpath_remove_tree_verbosity(self):
|
| 40 |
+
|
| 41 |
+
mkpath(self.target, verbose=0)
|
| 42 |
+
wanted = []
|
| 43 |
+
self.assertEqual(self._logs, wanted)
|
| 44 |
+
remove_tree(self.root_target, verbose=0)
|
| 45 |
+
|
| 46 |
+
mkpath(self.target, verbose=1)
|
| 47 |
+
wanted = ['creating %s' % self.root_target,
|
| 48 |
+
'creating %s' % self.target]
|
| 49 |
+
self.assertEqual(self._logs, wanted)
|
| 50 |
+
self._logs = []
|
| 51 |
+
|
| 52 |
+
remove_tree(self.root_target, verbose=1)
|
| 53 |
+
wanted = ["removing '%s' (and everything under it)" % self.root_target]
|
| 54 |
+
self.assertEqual(self._logs, wanted)
|
| 55 |
+
|
| 56 |
+
@unittest.skipIf(sys.platform.startswith('win'),
|
| 57 |
+
"This test is only appropriate for POSIX-like systems.")
|
| 58 |
+
def test_mkpath_with_custom_mode(self):
|
| 59 |
+
# Get and set the current umask value for testing mode bits.
|
| 60 |
+
umask = os.umask(0o002)
|
| 61 |
+
os.umask(umask)
|
| 62 |
+
mkpath(self.target, 0o700)
|
| 63 |
+
self.assertEqual(
|
| 64 |
+
stat.S_IMODE(os.stat(self.target).st_mode), 0o700 & ~umask)
|
| 65 |
+
mkpath(self.target2, 0o555)
|
| 66 |
+
self.assertEqual(
|
| 67 |
+
stat.S_IMODE(os.stat(self.target2).st_mode), 0o555 & ~umask)
|
| 68 |
+
|
| 69 |
+
def test_create_tree_verbosity(self):
|
| 70 |
+
|
| 71 |
+
create_tree(self.root_target, ['one', 'two', 'three'], verbose=0)
|
| 72 |
+
self.assertEqual(self._logs, [])
|
| 73 |
+
remove_tree(self.root_target, verbose=0)
|
| 74 |
+
|
| 75 |
+
wanted = ['creating %s' % self.root_target]
|
| 76 |
+
create_tree(self.root_target, ['one', 'two', 'three'], verbose=1)
|
| 77 |
+
self.assertEqual(self._logs, wanted)
|
| 78 |
+
|
| 79 |
+
remove_tree(self.root_target, verbose=0)
|
| 80 |
+
|
| 81 |
+
def test_copy_tree_verbosity(self):
|
| 82 |
+
|
| 83 |
+
mkpath(self.target, verbose=0)
|
| 84 |
+
|
| 85 |
+
copy_tree(self.target, self.target2, verbose=0)
|
| 86 |
+
self.assertEqual(self._logs, [])
|
| 87 |
+
|
| 88 |
+
remove_tree(self.root_target, verbose=0)
|
| 89 |
+
|
| 90 |
+
mkpath(self.target, verbose=0)
|
| 91 |
+
a_file = os.path.join(self.target, 'ok.txt')
|
| 92 |
+
with open(a_file, 'w') as f:
|
| 93 |
+
f.write('some content')
|
| 94 |
+
|
| 95 |
+
wanted = ['copying %s -> %s' % (a_file, self.target2)]
|
| 96 |
+
copy_tree(self.target, self.target2, verbose=1)
|
| 97 |
+
self.assertEqual(self._logs, wanted)
|
| 98 |
+
|
| 99 |
+
remove_tree(self.root_target, verbose=0)
|
| 100 |
+
remove_tree(self.target2, verbose=0)
|
| 101 |
+
|
| 102 |
+
def test_copy_tree_skips_nfs_temp_files(self):
|
| 103 |
+
mkpath(self.target, verbose=0)
|
| 104 |
+
|
| 105 |
+
a_file = os.path.join(self.target, 'ok.txt')
|
| 106 |
+
nfs_file = os.path.join(self.target, '.nfs123abc')
|
| 107 |
+
for f in a_file, nfs_file:
|
| 108 |
+
with open(f, 'w') as fh:
|
| 109 |
+
fh.write('some content')
|
| 110 |
+
|
| 111 |
+
copy_tree(self.target, self.target2)
|
| 112 |
+
self.assertEqual(os.listdir(self.target2), ['ok.txt'])
|
| 113 |
+
|
| 114 |
+
remove_tree(self.root_target, verbose=0)
|
| 115 |
+
remove_tree(self.target2, verbose=0)
|
| 116 |
+
|
| 117 |
+
def test_ensure_relative(self):
|
| 118 |
+
if os.sep == '/':
|
| 119 |
+
self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
|
| 120 |
+
self.assertEqual(ensure_relative('some/path'), 'some/path')
|
| 121 |
+
else: # \\
|
| 122 |
+
self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo')
|
| 123 |
+
self.assertEqual(ensure_relative('home\\foo'), 'home\\foo')
|
| 124 |
+
|
| 125 |
+
def test_copy_tree_exception_in_listdir(self):
|
| 126 |
+
"""
|
| 127 |
+
An exception in listdir should raise a DistutilsFileError
|
| 128 |
+
"""
|
| 129 |
+
with patch("os.listdir", side_effect=OSError()), \
|
| 130 |
+
self.assertRaises(errors.DistutilsFileError):
|
| 131 |
+
src = self.tempdirs[-1]
|
| 132 |
+
dir_util.copy_tree(src, None)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def test_suite():
|
| 136 |
+
return unittest.makeSuite(DirUtilTestCase)
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_extension.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.extension."""
|
| 2 |
+
import unittest
|
| 3 |
+
import os
|
| 4 |
+
import warnings
|
| 5 |
+
|
| 6 |
+
from test.support import run_unittest
|
| 7 |
+
from test.support.warnings_helper import check_warnings
|
| 8 |
+
from distutils.extension import read_setup_file, Extension
|
| 9 |
+
|
| 10 |
+
class ExtensionTestCase(unittest.TestCase):
|
| 11 |
+
|
| 12 |
+
def test_read_setup_file(self):
|
| 13 |
+
# trying to read a Setup file
|
| 14 |
+
# (sample extracted from the PyGame project)
|
| 15 |
+
setup = os.path.join(os.path.dirname(__file__), 'Setup.sample')
|
| 16 |
+
|
| 17 |
+
exts = read_setup_file(setup)
|
| 18 |
+
names = [ext.name for ext in exts]
|
| 19 |
+
names.sort()
|
| 20 |
+
|
| 21 |
+
# here are the extensions read_setup_file should have created
|
| 22 |
+
# out of the file
|
| 23 |
+
wanted = ['_arraysurfarray', '_camera', '_numericsndarray',
|
| 24 |
+
'_numericsurfarray', 'base', 'bufferproxy', 'cdrom',
|
| 25 |
+
'color', 'constants', 'display', 'draw', 'event',
|
| 26 |
+
'fastevent', 'font', 'gfxdraw', 'image', 'imageext',
|
| 27 |
+
'joystick', 'key', 'mask', 'mixer', 'mixer_music',
|
| 28 |
+
'mouse', 'movie', 'overlay', 'pixelarray', 'pypm',
|
| 29 |
+
'rect', 'rwobject', 'scrap', 'surface', 'surflock',
|
| 30 |
+
'time', 'transform']
|
| 31 |
+
|
| 32 |
+
self.assertEqual(names, wanted)
|
| 33 |
+
|
| 34 |
+
def test_extension_init(self):
|
| 35 |
+
# the first argument, which is the name, must be a string
|
| 36 |
+
self.assertRaises(AssertionError, Extension, 1, [])
|
| 37 |
+
ext = Extension('name', [])
|
| 38 |
+
self.assertEqual(ext.name, 'name')
|
| 39 |
+
|
| 40 |
+
# the second argument, which is the list of files, must
|
| 41 |
+
# be a list of strings
|
| 42 |
+
self.assertRaises(AssertionError, Extension, 'name', 'file')
|
| 43 |
+
self.assertRaises(AssertionError, Extension, 'name', ['file', 1])
|
| 44 |
+
ext = Extension('name', ['file1', 'file2'])
|
| 45 |
+
self.assertEqual(ext.sources, ['file1', 'file2'])
|
| 46 |
+
|
| 47 |
+
# others arguments have defaults
|
| 48 |
+
for attr in ('include_dirs', 'define_macros', 'undef_macros',
|
| 49 |
+
'library_dirs', 'libraries', 'runtime_library_dirs',
|
| 50 |
+
'extra_objects', 'extra_compile_args', 'extra_link_args',
|
| 51 |
+
'export_symbols', 'swig_opts', 'depends'):
|
| 52 |
+
self.assertEqual(getattr(ext, attr), [])
|
| 53 |
+
|
| 54 |
+
self.assertEqual(ext.language, None)
|
| 55 |
+
self.assertEqual(ext.optional, None)
|
| 56 |
+
|
| 57 |
+
# if there are unknown keyword options, warn about them
|
| 58 |
+
with check_warnings() as w:
|
| 59 |
+
warnings.simplefilter('always')
|
| 60 |
+
ext = Extension('name', ['file1', 'file2'], chic=True)
|
| 61 |
+
|
| 62 |
+
self.assertEqual(len(w.warnings), 1)
|
| 63 |
+
self.assertEqual(str(w.warnings[0].message),
|
| 64 |
+
"Unknown Extension options: 'chic'")
|
| 65 |
+
|
| 66 |
+
def test_suite():
|
| 67 |
+
return unittest.makeSuite(ExtensionTestCase)
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_file_util.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.file_util."""
|
| 2 |
+
import unittest
|
| 3 |
+
import os
|
| 4 |
+
import errno
|
| 5 |
+
from unittest.mock import patch
|
| 6 |
+
|
| 7 |
+
from distutils.file_util import move_file, copy_file
|
| 8 |
+
from distutils import log
|
| 9 |
+
from distutils.tests import support
|
| 10 |
+
from distutils.errors import DistutilsFileError
|
| 11 |
+
from test.support import run_unittest
|
| 12 |
+
from test.support.os_helper import unlink
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class FileUtilTestCase(support.TempdirManager, unittest.TestCase):
|
| 16 |
+
|
| 17 |
+
def _log(self, msg, *args):
|
| 18 |
+
if len(args) > 0:
|
| 19 |
+
self._logs.append(msg % args)
|
| 20 |
+
else:
|
| 21 |
+
self._logs.append(msg)
|
| 22 |
+
|
| 23 |
+
def setUp(self):
|
| 24 |
+
super(FileUtilTestCase, self).setUp()
|
| 25 |
+
self._logs = []
|
| 26 |
+
self.old_log = log.info
|
| 27 |
+
log.info = self._log
|
| 28 |
+
tmp_dir = self.mkdtemp()
|
| 29 |
+
self.source = os.path.join(tmp_dir, 'f1')
|
| 30 |
+
self.target = os.path.join(tmp_dir, 'f2')
|
| 31 |
+
self.target_dir = os.path.join(tmp_dir, 'd1')
|
| 32 |
+
|
| 33 |
+
def tearDown(self):
|
| 34 |
+
log.info = self.old_log
|
| 35 |
+
super(FileUtilTestCase, self).tearDown()
|
| 36 |
+
|
| 37 |
+
def test_move_file_verbosity(self):
|
| 38 |
+
f = open(self.source, 'w')
|
| 39 |
+
try:
|
| 40 |
+
f.write('some content')
|
| 41 |
+
finally:
|
| 42 |
+
f.close()
|
| 43 |
+
|
| 44 |
+
move_file(self.source, self.target, verbose=0)
|
| 45 |
+
wanted = []
|
| 46 |
+
self.assertEqual(self._logs, wanted)
|
| 47 |
+
|
| 48 |
+
# back to original state
|
| 49 |
+
move_file(self.target, self.source, verbose=0)
|
| 50 |
+
|
| 51 |
+
move_file(self.source, self.target, verbose=1)
|
| 52 |
+
wanted = ['moving %s -> %s' % (self.source, self.target)]
|
| 53 |
+
self.assertEqual(self._logs, wanted)
|
| 54 |
+
|
| 55 |
+
# back to original state
|
| 56 |
+
move_file(self.target, self.source, verbose=0)
|
| 57 |
+
|
| 58 |
+
self._logs = []
|
| 59 |
+
# now the target is a dir
|
| 60 |
+
os.mkdir(self.target_dir)
|
| 61 |
+
move_file(self.source, self.target_dir, verbose=1)
|
| 62 |
+
wanted = ['moving %s -> %s' % (self.source, self.target_dir)]
|
| 63 |
+
self.assertEqual(self._logs, wanted)
|
| 64 |
+
|
| 65 |
+
def test_move_file_exception_unpacking_rename(self):
|
| 66 |
+
# see issue 22182
|
| 67 |
+
with patch("os.rename", side_effect=OSError("wrong", 1)), \
|
| 68 |
+
self.assertRaises(DistutilsFileError):
|
| 69 |
+
with open(self.source, 'w') as fobj:
|
| 70 |
+
fobj.write('spam eggs')
|
| 71 |
+
move_file(self.source, self.target, verbose=0)
|
| 72 |
+
|
| 73 |
+
def test_move_file_exception_unpacking_unlink(self):
|
| 74 |
+
# see issue 22182
|
| 75 |
+
with patch("os.rename", side_effect=OSError(errno.EXDEV, "wrong")), \
|
| 76 |
+
patch("os.unlink", side_effect=OSError("wrong", 1)), \
|
| 77 |
+
self.assertRaises(DistutilsFileError):
|
| 78 |
+
with open(self.source, 'w') as fobj:
|
| 79 |
+
fobj.write('spam eggs')
|
| 80 |
+
move_file(self.source, self.target, verbose=0)
|
| 81 |
+
|
| 82 |
+
def test_copy_file_hard_link(self):
|
| 83 |
+
with open(self.source, 'w') as f:
|
| 84 |
+
f.write('some content')
|
| 85 |
+
# Check first that copy_file() will not fall back on copying the file
|
| 86 |
+
# instead of creating the hard link.
|
| 87 |
+
try:
|
| 88 |
+
os.link(self.source, self.target)
|
| 89 |
+
except OSError as e:
|
| 90 |
+
self.skipTest('os.link: %s' % e)
|
| 91 |
+
else:
|
| 92 |
+
unlink(self.target)
|
| 93 |
+
st = os.stat(self.source)
|
| 94 |
+
copy_file(self.source, self.target, link='hard')
|
| 95 |
+
st2 = os.stat(self.source)
|
| 96 |
+
st3 = os.stat(self.target)
|
| 97 |
+
self.assertTrue(os.path.samestat(st, st2), (st, st2))
|
| 98 |
+
self.assertTrue(os.path.samestat(st2, st3), (st2, st3))
|
| 99 |
+
with open(self.source, 'r') as f:
|
| 100 |
+
self.assertEqual(f.read(), 'some content')
|
| 101 |
+
|
| 102 |
+
def test_copy_file_hard_link_failure(self):
|
| 103 |
+
# If hard linking fails, copy_file() falls back on copying file
|
| 104 |
+
# (some special filesystems don't support hard linking even under
|
| 105 |
+
# Unix, see issue #8876).
|
| 106 |
+
with open(self.source, 'w') as f:
|
| 107 |
+
f.write('some content')
|
| 108 |
+
st = os.stat(self.source)
|
| 109 |
+
with patch("os.link", side_effect=OSError(0, "linking unsupported")):
|
| 110 |
+
copy_file(self.source, self.target, link='hard')
|
| 111 |
+
st2 = os.stat(self.source)
|
| 112 |
+
st3 = os.stat(self.target)
|
| 113 |
+
self.assertTrue(os.path.samestat(st, st2), (st, st2))
|
| 114 |
+
self.assertFalse(os.path.samestat(st2, st3), (st2, st3))
|
| 115 |
+
for fn in (self.source, self.target):
|
| 116 |
+
with open(fn, 'r') as f:
|
| 117 |
+
self.assertEqual(f.read(), 'some content')
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def test_suite():
|
| 121 |
+
return unittest.makeSuite(FileUtilTestCase)
|
| 122 |
+
|
| 123 |
+
if __name__ == "__main__":
|
| 124 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_install.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.install."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import unittest
|
| 6 |
+
import site
|
| 7 |
+
|
| 8 |
+
from test.support import captured_stdout, run_unittest
|
| 9 |
+
|
| 10 |
+
from distutils import sysconfig
|
| 11 |
+
from distutils.command.install import install, HAS_USER_SITE
|
| 12 |
+
from distutils.command import install as install_module
|
| 13 |
+
from distutils.command.build_ext import build_ext
|
| 14 |
+
from distutils.command.install import INSTALL_SCHEMES
|
| 15 |
+
from distutils.core import Distribution
|
| 16 |
+
from distutils.errors import DistutilsOptionError
|
| 17 |
+
from distutils.extension import Extension
|
| 18 |
+
|
| 19 |
+
from distutils.tests import support
|
| 20 |
+
from test import support as test_support
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _make_ext_name(modname):
|
| 24 |
+
return modname + sysconfig.get_config_var('EXT_SUFFIX')
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class InstallTestCase(support.TempdirManager,
|
| 28 |
+
support.EnvironGuard,
|
| 29 |
+
support.LoggingSilencer,
|
| 30 |
+
unittest.TestCase):
|
| 31 |
+
|
| 32 |
+
def setUp(self):
|
| 33 |
+
super().setUp()
|
| 34 |
+
self._backup_config_vars = dict(sysconfig._config_vars)
|
| 35 |
+
|
| 36 |
+
def tearDown(self):
|
| 37 |
+
super().tearDown()
|
| 38 |
+
sysconfig._config_vars.clear()
|
| 39 |
+
sysconfig._config_vars.update(self._backup_config_vars)
|
| 40 |
+
|
| 41 |
+
def test_home_installation_scheme(self):
|
| 42 |
+
# This ensure two things:
|
| 43 |
+
# - that --home generates the desired set of directory names
|
| 44 |
+
# - test --home is supported on all platforms
|
| 45 |
+
builddir = self.mkdtemp()
|
| 46 |
+
destination = os.path.join(builddir, "installation")
|
| 47 |
+
|
| 48 |
+
dist = Distribution({"name": "foopkg"})
|
| 49 |
+
# script_name need not exist, it just need to be initialized
|
| 50 |
+
dist.script_name = os.path.join(builddir, "setup.py")
|
| 51 |
+
dist.command_obj["build"] = support.DummyCommand(
|
| 52 |
+
build_base=builddir,
|
| 53 |
+
build_lib=os.path.join(builddir, "lib"),
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
cmd = install(dist)
|
| 57 |
+
cmd.home = destination
|
| 58 |
+
cmd.ensure_finalized()
|
| 59 |
+
|
| 60 |
+
self.assertEqual(cmd.install_base, destination)
|
| 61 |
+
self.assertEqual(cmd.install_platbase, destination)
|
| 62 |
+
|
| 63 |
+
def check_path(got, expected):
|
| 64 |
+
got = os.path.normpath(got)
|
| 65 |
+
expected = os.path.normpath(expected)
|
| 66 |
+
self.assertEqual(got, expected)
|
| 67 |
+
|
| 68 |
+
libdir = os.path.join(destination, "lib", "python")
|
| 69 |
+
check_path(cmd.install_lib, libdir)
|
| 70 |
+
platlibdir = os.path.join(destination, sys.platlibdir, "python")
|
| 71 |
+
check_path(cmd.install_platlib, platlibdir)
|
| 72 |
+
check_path(cmd.install_purelib, libdir)
|
| 73 |
+
check_path(cmd.install_headers,
|
| 74 |
+
os.path.join(destination, "include", "python", "foopkg"))
|
| 75 |
+
check_path(cmd.install_scripts, os.path.join(destination, "bin"))
|
| 76 |
+
check_path(cmd.install_data, destination)
|
| 77 |
+
|
| 78 |
+
@unittest.skipUnless(HAS_USER_SITE, 'need user site')
|
| 79 |
+
def test_user_site(self):
|
| 80 |
+
# test install with --user
|
| 81 |
+
# preparing the environment for the test
|
| 82 |
+
self.old_user_base = site.USER_BASE
|
| 83 |
+
self.old_user_site = site.USER_SITE
|
| 84 |
+
self.tmpdir = self.mkdtemp()
|
| 85 |
+
self.user_base = os.path.join(self.tmpdir, 'B')
|
| 86 |
+
self.user_site = os.path.join(self.tmpdir, 'S')
|
| 87 |
+
site.USER_BASE = self.user_base
|
| 88 |
+
site.USER_SITE = self.user_site
|
| 89 |
+
install_module.USER_BASE = self.user_base
|
| 90 |
+
install_module.USER_SITE = self.user_site
|
| 91 |
+
|
| 92 |
+
def _expanduser(path):
|
| 93 |
+
return self.tmpdir
|
| 94 |
+
self.old_expand = os.path.expanduser
|
| 95 |
+
os.path.expanduser = _expanduser
|
| 96 |
+
|
| 97 |
+
def cleanup():
|
| 98 |
+
site.USER_BASE = self.old_user_base
|
| 99 |
+
site.USER_SITE = self.old_user_site
|
| 100 |
+
install_module.USER_BASE = self.old_user_base
|
| 101 |
+
install_module.USER_SITE = self.old_user_site
|
| 102 |
+
os.path.expanduser = self.old_expand
|
| 103 |
+
|
| 104 |
+
self.addCleanup(cleanup)
|
| 105 |
+
|
| 106 |
+
if HAS_USER_SITE:
|
| 107 |
+
for key in ('nt_user', 'unix_user'):
|
| 108 |
+
self.assertIn(key, INSTALL_SCHEMES)
|
| 109 |
+
|
| 110 |
+
dist = Distribution({'name': 'xx'})
|
| 111 |
+
cmd = install(dist)
|
| 112 |
+
|
| 113 |
+
# making sure the user option is there
|
| 114 |
+
options = [name for name, short, lable in
|
| 115 |
+
cmd.user_options]
|
| 116 |
+
self.assertIn('user', options)
|
| 117 |
+
|
| 118 |
+
# setting a value
|
| 119 |
+
cmd.user = 1
|
| 120 |
+
|
| 121 |
+
# user base and site shouldn't be created yet
|
| 122 |
+
self.assertFalse(os.path.exists(self.user_base))
|
| 123 |
+
self.assertFalse(os.path.exists(self.user_site))
|
| 124 |
+
|
| 125 |
+
# let's run finalize
|
| 126 |
+
cmd.ensure_finalized()
|
| 127 |
+
|
| 128 |
+
# now they should
|
| 129 |
+
self.assertTrue(os.path.exists(self.user_base))
|
| 130 |
+
self.assertTrue(os.path.exists(self.user_site))
|
| 131 |
+
|
| 132 |
+
self.assertIn('userbase', cmd.config_vars)
|
| 133 |
+
self.assertIn('usersite', cmd.config_vars)
|
| 134 |
+
|
| 135 |
+
def test_handle_extra_path(self):
|
| 136 |
+
dist = Distribution({'name': 'xx', 'extra_path': 'path,dirs'})
|
| 137 |
+
cmd = install(dist)
|
| 138 |
+
|
| 139 |
+
# two elements
|
| 140 |
+
cmd.handle_extra_path()
|
| 141 |
+
self.assertEqual(cmd.extra_path, ['path', 'dirs'])
|
| 142 |
+
self.assertEqual(cmd.extra_dirs, 'dirs')
|
| 143 |
+
self.assertEqual(cmd.path_file, 'path')
|
| 144 |
+
|
| 145 |
+
# one element
|
| 146 |
+
cmd.extra_path = ['path']
|
| 147 |
+
cmd.handle_extra_path()
|
| 148 |
+
self.assertEqual(cmd.extra_path, ['path'])
|
| 149 |
+
self.assertEqual(cmd.extra_dirs, 'path')
|
| 150 |
+
self.assertEqual(cmd.path_file, 'path')
|
| 151 |
+
|
| 152 |
+
# none
|
| 153 |
+
dist.extra_path = cmd.extra_path = None
|
| 154 |
+
cmd.handle_extra_path()
|
| 155 |
+
self.assertEqual(cmd.extra_path, None)
|
| 156 |
+
self.assertEqual(cmd.extra_dirs, '')
|
| 157 |
+
self.assertEqual(cmd.path_file, None)
|
| 158 |
+
|
| 159 |
+
# three elements (no way !)
|
| 160 |
+
cmd.extra_path = 'path,dirs,again'
|
| 161 |
+
self.assertRaises(DistutilsOptionError, cmd.handle_extra_path)
|
| 162 |
+
|
| 163 |
+
def test_finalize_options(self):
|
| 164 |
+
dist = Distribution({'name': 'xx'})
|
| 165 |
+
cmd = install(dist)
|
| 166 |
+
|
| 167 |
+
# must supply either prefix/exec-prefix/home or
|
| 168 |
+
# install-base/install-platbase -- not both
|
| 169 |
+
cmd.prefix = 'prefix'
|
| 170 |
+
cmd.install_base = 'base'
|
| 171 |
+
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
|
| 172 |
+
|
| 173 |
+
# must supply either home or prefix/exec-prefix -- not both
|
| 174 |
+
cmd.install_base = None
|
| 175 |
+
cmd.home = 'home'
|
| 176 |
+
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
|
| 177 |
+
|
| 178 |
+
# can't combine user with prefix/exec_prefix/home or
|
| 179 |
+
# install_(plat)base
|
| 180 |
+
cmd.prefix = None
|
| 181 |
+
cmd.user = 'user'
|
| 182 |
+
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
|
| 183 |
+
|
| 184 |
+
def test_record(self):
|
| 185 |
+
install_dir = self.mkdtemp()
|
| 186 |
+
project_dir, dist = self.create_dist(py_modules=['hello'],
|
| 187 |
+
scripts=['sayhi'])
|
| 188 |
+
os.chdir(project_dir)
|
| 189 |
+
self.write_file('hello.py', "def main(): print('o hai')")
|
| 190 |
+
self.write_file('sayhi', 'from hello import main; main()')
|
| 191 |
+
|
| 192 |
+
cmd = install(dist)
|
| 193 |
+
dist.command_obj['install'] = cmd
|
| 194 |
+
cmd.root = install_dir
|
| 195 |
+
cmd.record = os.path.join(project_dir, 'filelist')
|
| 196 |
+
cmd.ensure_finalized()
|
| 197 |
+
cmd.run()
|
| 198 |
+
|
| 199 |
+
f = open(cmd.record)
|
| 200 |
+
try:
|
| 201 |
+
content = f.read()
|
| 202 |
+
finally:
|
| 203 |
+
f.close()
|
| 204 |
+
|
| 205 |
+
found = [os.path.basename(line) for line in content.splitlines()]
|
| 206 |
+
expected = ['hello.py', 'hello.%s.pyc' % sys.implementation.cache_tag,
|
| 207 |
+
'sayhi',
|
| 208 |
+
'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
|
| 209 |
+
self.assertEqual(found, expected)
|
| 210 |
+
|
| 211 |
+
def test_record_extensions(self):
|
| 212 |
+
cmd = test_support.missing_compiler_executable()
|
| 213 |
+
if cmd is not None:
|
| 214 |
+
self.skipTest('The %r command is not found' % cmd)
|
| 215 |
+
install_dir = self.mkdtemp()
|
| 216 |
+
project_dir, dist = self.create_dist(ext_modules=[
|
| 217 |
+
Extension('xx', ['xxmodule.c'])])
|
| 218 |
+
os.chdir(project_dir)
|
| 219 |
+
support.copy_xxmodule_c(project_dir)
|
| 220 |
+
|
| 221 |
+
buildextcmd = build_ext(dist)
|
| 222 |
+
support.fixup_build_ext(buildextcmd)
|
| 223 |
+
buildextcmd.ensure_finalized()
|
| 224 |
+
|
| 225 |
+
cmd = install(dist)
|
| 226 |
+
dist.command_obj['install'] = cmd
|
| 227 |
+
dist.command_obj['build_ext'] = buildextcmd
|
| 228 |
+
cmd.root = install_dir
|
| 229 |
+
cmd.record = os.path.join(project_dir, 'filelist')
|
| 230 |
+
cmd.ensure_finalized()
|
| 231 |
+
cmd.run()
|
| 232 |
+
|
| 233 |
+
f = open(cmd.record)
|
| 234 |
+
try:
|
| 235 |
+
content = f.read()
|
| 236 |
+
finally:
|
| 237 |
+
f.close()
|
| 238 |
+
|
| 239 |
+
found = [os.path.basename(line) for line in content.splitlines()]
|
| 240 |
+
expected = [_make_ext_name('xx'),
|
| 241 |
+
'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
|
| 242 |
+
self.assertEqual(found, expected)
|
| 243 |
+
|
| 244 |
+
def test_debug_mode(self):
|
| 245 |
+
# this covers the code called when DEBUG is set
|
| 246 |
+
old_logs_len = len(self.logs)
|
| 247 |
+
install_module.DEBUG = True
|
| 248 |
+
try:
|
| 249 |
+
with captured_stdout():
|
| 250 |
+
self.test_record()
|
| 251 |
+
finally:
|
| 252 |
+
install_module.DEBUG = False
|
| 253 |
+
self.assertGreater(len(self.logs), old_logs_len)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def test_suite():
|
| 257 |
+
return unittest.makeSuite(InstallTestCase)
|
| 258 |
+
|
| 259 |
+
if __name__ == "__main__":
|
| 260 |
+
run_unittest(test_suite())
|
parrot/lib/python3.10/distutils/tests/test_install_headers.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.install_headers."""
|
| 2 |
+
import os
|
| 3 |
+
import unittest
|
| 4 |
+
|
| 5 |
+
from distutils.command.install_headers import install_headers
|
| 6 |
+
from distutils.tests import support
|
| 7 |
+
from test.support import run_unittest
|
| 8 |
+
|
| 9 |
+
class InstallHeadersTestCase(support.TempdirManager,
|
| 10 |
+
support.LoggingSilencer,
|
| 11 |
+
support.EnvironGuard,
|
| 12 |
+
unittest.TestCase):
|
| 13 |
+
|
| 14 |
+
def test_simple_run(self):
|
| 15 |
+
# we have two headers
|
| 16 |
+
header_list = self.mkdtemp()
|
| 17 |
+
header1 = os.path.join(header_list, 'header1')
|
| 18 |
+
header2 = os.path.join(header_list, 'header2')
|
| 19 |
+
self.write_file(header1)
|
| 20 |
+
self.write_file(header2)
|
| 21 |
+
headers = [header1, header2]
|
| 22 |
+
|
| 23 |
+
pkg_dir, dist = self.create_dist(headers=headers)
|
| 24 |
+
cmd = install_headers(dist)
|
| 25 |
+
self.assertEqual(cmd.get_inputs(), headers)
|
| 26 |
+
|
| 27 |
+
# let's run the command
|
| 28 |
+
cmd.install_dir = os.path.join(pkg_dir, 'inst')
|
| 29 |
+
cmd.ensure_finalized()
|
| 30 |
+
cmd.run()
|
| 31 |
+
|
| 32 |
+
# let's check the results
|
| 33 |
+
self.assertEqual(len(cmd.get_outputs()), 2)
|
| 34 |
+
|
| 35 |
+
def test_suite():
|
| 36 |
+
return unittest.makeSuite(InstallHeadersTestCase)
|
| 37 |
+
|
| 38 |
+
if __name__ == "__main__":
|
| 39 |
+
run_unittest(test_suite())
|