Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- my_container_sandbox/workspace/anaconda3/lib/libnvvm.so.4 +3 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/AUTHORS +48 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/INSTALLER +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/LICENSE +30 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/METADATA +33 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/RECORD +83 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/REQUESTED +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/WHEEL +5 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/top_level.txt +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/accelerate-0.11.0.dist-info/INSTALLER +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/accelerate-0.11.0.dist-info/LICENSE +201 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/AUTHORS.md +24 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/INSTALLER +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/LICENSE +29 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/METADATA +42 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/RECORD +33 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/REQUESTED +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/WHEEL +5 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/direct_url.json +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/entry_points.txt +2 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/top_level.txt +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/__init__.py +52 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/filesize.py +76 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/i18n.py +174 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/number.py +516 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/py.typed +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/time.py +580 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/kiwisolver/__init__.py +40 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/kiwisolver/_cext.pyi +234 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/kiwisolver/py.typed +0 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/libpasteurize/__init__.py +1 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/libpasteurize/main.py +204 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/__main__.py +151 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/__meta__.py +49 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/blockparser.py +125 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/blockprocessors.py +623 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/core.py +407 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/inlinepatterns.py +892 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/postprocessors.py +134 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/preprocessors.py +82 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/serializers.py +189 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/test_tools.py +220 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/treeprocessors.py +436 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nisext/__init__.py +7 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nisext/py3builder.py +38 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nisext/sexts.py +289 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nisext/testers.py +533 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/oauthlib-3.2.0.dist-info/METADATA +181 -0
- my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/oauthlib-3.2.0.dist-info/RECORD +143 -0
.gitattributes
CHANGED
|
@@ -180,3 +180,6 @@ my_container_sandbox/workspace/anaconda3/pkgs/certifi-2024.2.2-pyhd8ed1ab_0.cond
|
|
| 180 |
my_container_sandbox/workspace/anaconda3/pkgs/sqlite-3.41.2-h5eee18b_0.conda filter=lfs diff=lfs merge=lfs -text
|
| 181 |
my_container_sandbox/workspace/anaconda3/pkgs/brotlipy-0.7.0-py39h27cfd23_1003.conda filter=lfs diff=lfs merge=lfs -text
|
| 182 |
my_container_sandbox/workspace/anaconda3/pkgs/xz-5.2.5-h7b6447c_0.conda filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
my_container_sandbox/workspace/anaconda3/pkgs/sqlite-3.41.2-h5eee18b_0.conda filter=lfs diff=lfs merge=lfs -text
|
| 181 |
my_container_sandbox/workspace/anaconda3/pkgs/brotlipy-0.7.0-py39h27cfd23_1003.conda filter=lfs diff=lfs merge=lfs -text
|
| 182 |
my_container_sandbox/workspace/anaconda3/pkgs/xz-5.2.5-h7b6447c_0.conda filter=lfs diff=lfs merge=lfs -text
|
| 183 |
+
my_container_sandbox/workspace/anaconda3/lib/libnvvm.so.4 filter=lfs diff=lfs merge=lfs -text
|
| 184 |
+
my_container_sandbox/workspace/anaconda3/pkgs/xz-5.4.6-h5eee18b_0.conda filter=lfs diff=lfs merge=lfs -text
|
| 185 |
+
my_container_sandbox/workspace/anaconda3/pkgs/libgomp-9.3.0-h5101ec6_17.conda filter=lfs diff=lfs merge=lfs -text
|
my_container_sandbox/workspace/anaconda3/lib/libnvvm.so.4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0c95d249f3d60d67dbd191174fe1d8b9f393b1887ae1a54b4988823c7e42a31
|
| 3 |
+
size 26650200
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/AUTHORS
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
GitPython was originally written by Michael Trier.
|
| 2 |
+
GitPython 0.2 was partially (re)written by Sebastian Thiel, based on 0.1.6 and git-dulwich.
|
| 3 |
+
|
| 4 |
+
Contributors are:
|
| 5 |
+
|
| 6 |
+
-Michael Trier <mtrier _at_ gmail.com>
|
| 7 |
+
-Alan Briolat
|
| 8 |
+
-Florian Apolloner <florian _at_ apolloner.eu>
|
| 9 |
+
-David Aguilar <davvid _at_ gmail.com>
|
| 10 |
+
-Jelmer Vernooij <jelmer _at_ samba.org>
|
| 11 |
+
-Steve Frécinaux <code _at_ istique.net>
|
| 12 |
+
-Kai Lautaportti <kai _at_ lautaportti.fi>
|
| 13 |
+
-Paul Sowden <paul _at_ idontsmoke.co.uk>
|
| 14 |
+
-Sebastian Thiel <byronimo _at_ gmail.com>
|
| 15 |
+
-Jonathan Chu <jonathan.chu _at_ me.com>
|
| 16 |
+
-Vincent Driessen <me _at_ nvie.com>
|
| 17 |
+
-Phil Elson <pelson _dot_ pub _at_ gmail.com>
|
| 18 |
+
-Bernard `Guyzmo` Pratz <guyzmo+gitpython+pub@m0g.net>
|
| 19 |
+
-Timothy B. Hartman <tbhartman _at_ gmail.com>
|
| 20 |
+
-Konstantin Popov <konstantin.popov.89 _at_ yandex.ru>
|
| 21 |
+
-Peter Jones <pjones _at_ redhat.com>
|
| 22 |
+
-Anson Mansfield <anson.mansfield _at_ gmail.com>
|
| 23 |
+
-Ken Odegard <ken.odegard _at_ gmail.com>
|
| 24 |
+
-Alexis Horgix Chotard
|
| 25 |
+
-Piotr Babij <piotr.babij _at_ gmail.com>
|
| 26 |
+
-Mikuláš Poul <mikulaspoul _at_ gmail.com>
|
| 27 |
+
-Charles Bouchard-Légaré <cblegare.atl _at_ ntis.ca>
|
| 28 |
+
-Yaroslav Halchenko <debian _at_ onerussian.com>
|
| 29 |
+
-Tim Swast <swast _at_ google.com>
|
| 30 |
+
-William Luc Ritchie
|
| 31 |
+
-David Host <hostdm _at_ outlook.com>
|
| 32 |
+
-A. Jesse Jiryu Davis <jesse _at_ emptysquare.net>
|
| 33 |
+
-Steven Whitman <ninloot _at_ gmail.com>
|
| 34 |
+
-Stefan Stancu <stefan.stancu _at_ gmail.com>
|
| 35 |
+
-César Izurieta <cesar _at_ caih.org>
|
| 36 |
+
-Arthur Milchior <arthur _at_ milchior.fr>
|
| 37 |
+
-Anil Khatri <anil.soccer.khatri _at_ gmail.com>
|
| 38 |
+
-JJ Graham <thetwoj _at_ gmail.com>
|
| 39 |
+
-Ben Thayer <ben _at_ benthayer.com>
|
| 40 |
+
-Dries Kennes <admin _at_ dries007.net>
|
| 41 |
+
-Pratik Anurag <panurag247365 _at_ gmail.com>
|
| 42 |
+
-Harmon <harmon.public _at_ gmail.com>
|
| 43 |
+
-Liam Beguin <liambeguin _at_ gmail.com>
|
| 44 |
+
-Ram Rachum <ram _at_ rachum.com>
|
| 45 |
+
-Alba Mendez <me _at_ alba.sh>
|
| 46 |
+
-Robert Westman <robert _at_ byteflux.io>
|
| 47 |
+
-Hugo van Kemenade
|
| 48 |
+
Portions derived from other open source works and are clearly marked.
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (C) 2008, 2009 Michael Trier and contributors
|
| 2 |
+
All rights reserved.
|
| 3 |
+
|
| 4 |
+
Redistribution and use in source and binary forms, with or without
|
| 5 |
+
modification, are permitted provided that the following conditions
|
| 6 |
+
are met:
|
| 7 |
+
|
| 8 |
+
* Redistributions of source code must retain the above copyright
|
| 9 |
+
notice, this list of conditions and the following disclaimer.
|
| 10 |
+
|
| 11 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 12 |
+
notice, this list of conditions and the following disclaimer in the
|
| 13 |
+
documentation and/or other materials provided with the distribution.
|
| 14 |
+
|
| 15 |
+
* Neither the name of the GitPython project nor the names of
|
| 16 |
+
its contributors may be used to endorse or promote products derived
|
| 17 |
+
from this software without specific prior written permission.
|
| 18 |
+
|
| 19 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 20 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 21 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 22 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 23 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 24 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
| 25 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
| 26 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
| 27 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/METADATA
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: GitPython
|
| 3 |
+
Version: 3.1.27
|
| 4 |
+
Summary: GitPython is a python library used to interact with Git repositories
|
| 5 |
+
Home-page: https://github.com/gitpython-developers/GitPython
|
| 6 |
+
Author: Sebastian Thiel, Michael Trier
|
| 7 |
+
Author-email: byronimo@gmail.com, mtrier@gmail.com
|
| 8 |
+
License: BSD
|
| 9 |
+
Platform: UNKNOWN
|
| 10 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 11 |
+
Classifier: Environment :: Console
|
| 12 |
+
Classifier: Intended Audience :: Developers
|
| 13 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 14 |
+
Classifier: Operating System :: OS Independent
|
| 15 |
+
Classifier: Operating System :: POSIX
|
| 16 |
+
Classifier: Operating System :: Microsoft :: Windows
|
| 17 |
+
Classifier: Operating System :: MacOS :: MacOS X
|
| 18 |
+
Classifier: Typing :: Typed
|
| 19 |
+
Classifier: Programming Language :: Python
|
| 20 |
+
Classifier: Programming Language :: Python :: 3
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 25 |
+
Requires-Python: >=3.7
|
| 26 |
+
Description-Content-Type: text/markdown
|
| 27 |
+
License-File: LICENSE
|
| 28 |
+
License-File: AUTHORS
|
| 29 |
+
Requires-Dist: gitdb (<5,>=4.0.1)
|
| 30 |
+
Requires-Dist: typing-extensions (>=3.7.4.3) ; python_version < "3.8"
|
| 31 |
+
|
| 32 |
+
GitPython is a python library used to interact with Git repositories
|
| 33 |
+
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/RECORD
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
GitPython-3.1.27.dist-info/AUTHORS,sha256=vjnd09wZL3p1v8gB5lsk4nj-2nDyHcZEzY_MKrZQyco,1936
|
| 2 |
+
GitPython-3.1.27.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 3 |
+
GitPython-3.1.27.dist-info/LICENSE,sha256=_WV__CzvY9JceMq3gI1BTdA6KC5jiTSR_RHDL5i-Z_s,1521
|
| 4 |
+
GitPython-3.1.27.dist-info/METADATA,sha256=h9Z-ZVEP5r7UES4v0wOH-dyhekIRG0qBBg_7VX5zAFc,1289
|
| 5 |
+
GitPython-3.1.27.dist-info/RECORD,,
|
| 6 |
+
GitPython-3.1.27.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 7 |
+
GitPython-3.1.27.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
| 8 |
+
GitPython-3.1.27.dist-info/top_level.txt,sha256=0hzDuIp8obv624V3GmbqsagBWkk8ohtGU-Bc1PmTT0o,4
|
| 9 |
+
git/__init__.py,sha256=LIqmpO0UYXoia_6ymWif0fdDpVN1apyh-yuzD1Qmkog,2500
|
| 10 |
+
git/__pycache__/__init__.cpython-38.pyc,,
|
| 11 |
+
git/__pycache__/cmd.cpython-38.pyc,,
|
| 12 |
+
git/__pycache__/compat.cpython-38.pyc,,
|
| 13 |
+
git/__pycache__/config.cpython-38.pyc,,
|
| 14 |
+
git/__pycache__/db.cpython-38.pyc,,
|
| 15 |
+
git/__pycache__/diff.cpython-38.pyc,,
|
| 16 |
+
git/__pycache__/exc.cpython-38.pyc,,
|
| 17 |
+
git/__pycache__/remote.cpython-38.pyc,,
|
| 18 |
+
git/__pycache__/types.cpython-38.pyc,,
|
| 19 |
+
git/__pycache__/util.cpython-38.pyc,,
|
| 20 |
+
git/cmd.py,sha256=8QI5KptzHzAEGzfMg2FtQrPaggMAzv_YKwF4-rGhJRI,51897
|
| 21 |
+
git/compat.py,sha256=A__j0NDexK9vm-IP59CveY7V_Epef9Km4wH94nykvGs,2244
|
| 22 |
+
git/config.py,sha256=ABfTzzFy82NkwTKHreMbJF78JC43dAR385LbrUhnc84,34506
|
| 23 |
+
git/db.py,sha256=Ji8Zrdq5Gvo3Hm54gBB7DXFYJE0SgPmoHyAp_5RI3s0,2255
|
| 24 |
+
git/diff.py,sha256=nzQ2ZtSFrvcpgTvKBKlheLLZGdLgmW3xbwjaZQtaJBQ,22587
|
| 25 |
+
git/exc.py,sha256=rtiF2l2ZONIih_yF3DZ_WuDXAocdq0inhjQzGJq5T7o,6079
|
| 26 |
+
git/index/__init__.py,sha256=43ovvVNocVRNiQd4fLqvUMuGGmwhBQ9SsiQ46vkvk1E,89
|
| 27 |
+
git/index/__pycache__/__init__.cpython-38.pyc,,
|
| 28 |
+
git/index/__pycache__/base.cpython-38.pyc,,
|
| 29 |
+
git/index/__pycache__/fun.cpython-38.pyc,,
|
| 30 |
+
git/index/__pycache__/typ.cpython-38.pyc,,
|
| 31 |
+
git/index/__pycache__/util.cpython-38.pyc,,
|
| 32 |
+
git/index/base.py,sha256=-MV0W-PyJwCYxFb-Klbooaay3uDm-sr8xHAiK_4lXaU,57001
|
| 33 |
+
git/index/fun.py,sha256=Ac9zLk2JAnayb4Gl9QpJ8S2CzNqWDfxFhZT2S9bhWX8,16431
|
| 34 |
+
git/index/typ.py,sha256=8-yL3QhdHXkVaDHfUuk4Kmks1Comrq547Kg48m6H2gA,5516
|
| 35 |
+
git/index/util.py,sha256=t3llCo90s1L_OgPYIqah5AuVU6043XKTaQrmJtUeYjU,3454
|
| 36 |
+
git/objects/__init__.py,sha256=1uMoWicK_mgiQIaikCMsX7uiRWc9US4XUXCouSmH4Dk,703
|
| 37 |
+
git/objects/__pycache__/__init__.cpython-38.pyc,,
|
| 38 |
+
git/objects/__pycache__/base.cpython-38.pyc,,
|
| 39 |
+
git/objects/__pycache__/blob.cpython-38.pyc,,
|
| 40 |
+
git/objects/__pycache__/commit.cpython-38.pyc,,
|
| 41 |
+
git/objects/__pycache__/fun.cpython-38.pyc,,
|
| 42 |
+
git/objects/__pycache__/tag.cpython-38.pyc,,
|
| 43 |
+
git/objects/__pycache__/tree.cpython-38.pyc,,
|
| 44 |
+
git/objects/__pycache__/util.cpython-38.pyc,,
|
| 45 |
+
git/objects/base.py,sha256=qLgh-OStkOke3yKgwq2ZNFTC45Qsl1UTSI1-fdjR8-w,7759
|
| 46 |
+
git/objects/blob.py,sha256=nXCRt885vuNjI6VRw_fXOZSgQfD9PjXPg3XZIRZkIfM,987
|
| 47 |
+
git/objects/commit.py,sha256=-dXHQvop5HXIqQjuMYIFMNLL2AY9y0a7Gw3ayG8OAHY,25869
|
| 48 |
+
git/objects/fun.py,sha256=vSmm8p4_6ZMOh3Vtwbi65gP1vIFczXF2hXtgHclP-EY,8542
|
| 49 |
+
git/objects/submodule/__init__.py,sha256=OsMeiex7cG6ev2f35IaJ5csH-eXchSoNKCt4HXUG5Ws,93
|
| 50 |
+
git/objects/submodule/__pycache__/__init__.cpython-38.pyc,,
|
| 51 |
+
git/objects/submodule/__pycache__/base.cpython-38.pyc,,
|
| 52 |
+
git/objects/submodule/__pycache__/root.cpython-38.pyc,,
|
| 53 |
+
git/objects/submodule/__pycache__/util.cpython-38.pyc,,
|
| 54 |
+
git/objects/submodule/base.py,sha256=QOZfeU4mQVyGFLKGWS8YTRD8DtN_lVjhtPsw6N9Upqs,58774
|
| 55 |
+
git/objects/submodule/root.py,sha256=cy7wRBLJwqNXGm6bK0tgEucYX7lx-KNQS-WTTlEXvig,18288
|
| 56 |
+
git/objects/submodule/util.py,sha256=iX1EYGDhVrr1PG8729zQPm2GL47FkE9MPqPYC8C_h-o,3358
|
| 57 |
+
git/objects/tag.py,sha256=mqlDG5UyScqHEnwDXRBPwbMcPrTZnhMiezpDM7DkEss,3764
|
| 58 |
+
git/objects/tree.py,sha256=ly7fgePvItjB-B69AvdlFXrekH8MixOMauO31sCpg7E,14292
|
| 59 |
+
git/objects/util.py,sha256=gx1jzp1oiqdlADrWu6oorIF3FwXJZtaprzxPlyY9u8I,22466
|
| 60 |
+
git/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 61 |
+
git/refs/__init__.py,sha256=PMF97jMUcivbCCEJnl2zTs-YtECNFp8rL8GHK8AitXU,203
|
| 62 |
+
git/refs/__pycache__/__init__.cpython-38.pyc,,
|
| 63 |
+
git/refs/__pycache__/head.cpython-38.pyc,,
|
| 64 |
+
git/refs/__pycache__/log.cpython-38.pyc,,
|
| 65 |
+
git/refs/__pycache__/reference.cpython-38.pyc,,
|
| 66 |
+
git/refs/__pycache__/remote.cpython-38.pyc,,
|
| 67 |
+
git/refs/__pycache__/symbolic.cpython-38.pyc,,
|
| 68 |
+
git/refs/__pycache__/tag.cpython-38.pyc,,
|
| 69 |
+
git/refs/head.py,sha256=UbIuWCb9WI87DZY6puX92YaHLDnh-D6DkONwDjyxcvM,9626
|
| 70 |
+
git/refs/log.py,sha256=8ZTiE7KV4SVAlx14xR7xVG1uh_jFgsSNZ_6QSMh4vpA,12061
|
| 71 |
+
git/refs/reference.py,sha256=28aB_lnYLKIXx2wvy_y87P0EDIuNiCsGLgBmV-29qP0,5405
|
| 72 |
+
git/refs/remote.py,sha256=3iSjMHPlQCUA3QacJ-CHK60yihCmGeyVnuTh-zTq7qo,2556
|
| 73 |
+
git/refs/symbolic.py,sha256=nBmwXAK48w-vmFq-tAl40XlemKsPxiu7aDnyctlWA5o,29739
|
| 74 |
+
git/refs/tag.py,sha256=xboM_oFCFXakpZvvb-bn4GgLcsddPvNIuEK9E3gNuNs,4273
|
| 75 |
+
git/remote.py,sha256=VohJ7s27AC0RCP8EgG-_oMjTE1nxOqnQTHSulhqglnY,41704
|
| 76 |
+
git/repo/__init__.py,sha256=XMpdeowJRtTEd80jAcrKSQfMu2JZGMfPlpuIYHG2ZCk,80
|
| 77 |
+
git/repo/__pycache__/__init__.cpython-38.pyc,,
|
| 78 |
+
git/repo/__pycache__/base.cpython-38.pyc,,
|
| 79 |
+
git/repo/__pycache__/fun.cpython-38.pyc,,
|
| 80 |
+
git/repo/base.py,sha256=mF3lmjtg81KQfst2RZad290fp-bwsEG28dEh7NYkmf0,51847
|
| 81 |
+
git/repo/fun.py,sha256=yjsY_sna6XaUzB0ZANiIMwu97QMxWAWpWtoj4pynqg4,12713
|
| 82 |
+
git/types.py,sha256=L9yBmFn6XUdV9BJgnKSwXPcYEPj9mACl7VuIi7FcNQ8,3072
|
| 83 |
+
git/util.py,sha256=AAebYTIADbA46Iu5rsUqCvCPAM2w-PlbMp2-sm6Wdok,39531
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/REQUESTED
ADDED
|
File without changes
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.37.1)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/GitPython-3.1.27.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
git
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/accelerate-0.11.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/accelerate-0.11.0.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/AUTHORS.md
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
All of the people who have made at least one contribution to conda-package-handling.
|
| 2 |
+
Authors are sorted alphabetically.
|
| 3 |
+
|
| 4 |
+
* Alan Du
|
| 5 |
+
* Cheng H. Lee
|
| 6 |
+
* Chris Burr
|
| 7 |
+
* Christopher Barber
|
| 8 |
+
* Conda Bot
|
| 9 |
+
* Daniel Bast
|
| 10 |
+
* Daniel Holth
|
| 11 |
+
* Eli Uriegas
|
| 12 |
+
* Jannis Leidel
|
| 13 |
+
* John Lee
|
| 14 |
+
* Jonathan J. Helmus
|
| 15 |
+
* Ken Odegard
|
| 16 |
+
* Marius van Niekerk
|
| 17 |
+
* Matthew R. Becker
|
| 18 |
+
* Michael Sarahan
|
| 19 |
+
* Nehal J Wani
|
| 20 |
+
* Pure Software
|
| 21 |
+
* Ray Donnelly
|
| 22 |
+
* Tobias "Tobi" Koch
|
| 23 |
+
* Vadim Zayakin
|
| 24 |
+
* pre-commit-ci[bot]
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
conda
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BSD 3-Clause License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2019, Conda
|
| 4 |
+
All rights reserved.
|
| 5 |
+
|
| 6 |
+
Redistribution and use in source and binary forms, with or without
|
| 7 |
+
modification, are permitted provided that the following conditions are met:
|
| 8 |
+
|
| 9 |
+
* Redistributions of source code must retain the above copyright notice, this
|
| 10 |
+
list of conditions and the following disclaimer.
|
| 11 |
+
|
| 12 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
| 13 |
+
this list of conditions and the following disclaimer in the documentation
|
| 14 |
+
and/or other materials provided with the distribution.
|
| 15 |
+
|
| 16 |
+
* Neither the name of the copyright holder nor the names of its
|
| 17 |
+
contributors may be used to endorse or promote products derived from
|
| 18 |
+
this software without specific prior written permission.
|
| 19 |
+
|
| 20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 21 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 22 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 24 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 25 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 26 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 27 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 28 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 29 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: conda-package-handling
|
| 3 |
+
Version: 2.2.0
|
| 4 |
+
Summary: Create and extract conda packages of various formats.
|
| 5 |
+
Home-page: https://github.com/conda/conda-package-handling
|
| 6 |
+
Author: Anaconda, Inc.
|
| 7 |
+
Author-email: conda@anaconda.com
|
| 8 |
+
Keywords: conda-package-handling
|
| 9 |
+
Classifier: Programming Language :: Python :: 3
|
| 10 |
+
Requires-Python: >=3.7
|
| 11 |
+
Description-Content-Type: text/markdown
|
| 12 |
+
License-File: LICENSE
|
| 13 |
+
License-File: AUTHORS.md
|
| 14 |
+
Requires-Dist: conda-package-streaming (>=0.9.0)
|
| 15 |
+
Provides-Extra: docs
|
| 16 |
+
Requires-Dist: furo ; extra == 'docs'
|
| 17 |
+
Requires-Dist: sphinx ; extra == 'docs'
|
| 18 |
+
Requires-Dist: sphinx-argparse ; extra == 'docs'
|
| 19 |
+
Requires-Dist: myst-parser ; extra == 'docs'
|
| 20 |
+
Requires-Dist: mdit-py-plugins (>=0.3.0) ; extra == 'docs'
|
| 21 |
+
Provides-Extra: test
|
| 22 |
+
Requires-Dist: mock ; extra == 'test'
|
| 23 |
+
Requires-Dist: pytest ; extra == 'test'
|
| 24 |
+
Requires-Dist: pytest-cov ; extra == 'test'
|
| 25 |
+
Requires-Dist: pytest-mock ; extra == 'test'
|
| 26 |
+
|
| 27 |
+
# conda-package-handling
|
| 28 |
+
|
| 29 |
+
[](https://results.pre-commit.ci/latest/github/conda/conda-package-handling/main)
|
| 30 |
+
|
| 31 |
+
Create and extract conda packages of various formats.
|
| 32 |
+
|
| 33 |
+
`conda` and `conda-build` use `conda_package_handling.api` to create and extract
|
| 34 |
+
conda packages. This package also provides the `cph` command line tool to
|
| 35 |
+
extract, create, and convert between formats.
|
| 36 |
+
|
| 37 |
+
See also
|
| 38 |
+
[conda-package-streaming](https://conda.github.io/conda-package-streaming), an
|
| 39 |
+
efficient library to read from new and old format .conda and .tar.bz2 conda
|
| 40 |
+
packages.
|
| 41 |
+
|
| 42 |
+
Full documentation at [https://conda.github.io/conda-package-handling/](https://conda.github.io/conda-package-handling/)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
../../../bin/cph,sha256=amSbVeIpmd6emwl52Plg8237KxV03oaiIyrlJiUprq4,508
|
| 2 |
+
conda_package_handling-2.2.0.dist-info/AUTHORS.md,sha256=e1PalFDU00a-gtCZU_etkSjXHnlxkpBDiSJjUCCWcW0,460
|
| 3 |
+
conda_package_handling-2.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 4 |
+
conda_package_handling-2.2.0.dist-info/LICENSE,sha256=t_7OW5ytYcamhC7Wd2bBOAaL6tOaKVupR0m8szIrXnU,1505
|
| 5 |
+
conda_package_handling-2.2.0.dist-info/METADATA,sha256=a--HjXzY6tdey9RQ6WQryFRW2AItZKrXnEU_eSwsliU,1692
|
| 6 |
+
conda_package_handling-2.2.0.dist-info/RECORD,,
|
| 7 |
+
conda_package_handling-2.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 8 |
+
conda_package_handling-2.2.0.dist-info/WHEEL,sha256=AtBG6SXL3KF_v0NxLf0ehyVOh0cold-JbJYXNGorC6Q,92
|
| 9 |
+
conda_package_handling-2.2.0.dist-info/direct_url.json,sha256=Nvd-PiTlaGzGjht--TbZ0TmVoP4PCheazKelTLI465I,118
|
| 10 |
+
conda_package_handling-2.2.0.dist-info/entry_points.txt,sha256=4gBd7ZOv9v6P3VIiI6veVwgXzKCu_fUiOqwqVb0VTno,56
|
| 11 |
+
conda_package_handling-2.2.0.dist-info/top_level.txt,sha256=3290W3g8MbIyhMS6inC3oTzSzIweY5A2UKk__AZrnSY,23
|
| 12 |
+
conda_package_handling/__init__.py,sha256=DKk-1b-rZsJFxFi1JoJ7TmEvIEQ0rf-C9HAZWwvjuM0,22
|
| 13 |
+
conda_package_handling/__main__.py,sha256=a8OMmLES3Fl3A097Gn8VgCl6qR0b0zL6PON76sjnik8,60
|
| 14 |
+
conda_package_handling/__pycache__/__init__.cpython-311.pyc,,
|
| 15 |
+
conda_package_handling/__pycache__/__main__.cpython-311.pyc,,
|
| 16 |
+
conda_package_handling/__pycache__/api.cpython-311.pyc,,
|
| 17 |
+
conda_package_handling/__pycache__/cli.cpython-311.pyc,,
|
| 18 |
+
conda_package_handling/__pycache__/conda_fmt.cpython-311.pyc,,
|
| 19 |
+
conda_package_handling/__pycache__/exceptions.cpython-311.pyc,,
|
| 20 |
+
conda_package_handling/__pycache__/interface.cpython-311.pyc,,
|
| 21 |
+
conda_package_handling/__pycache__/streaming.cpython-311.pyc,,
|
| 22 |
+
conda_package_handling/__pycache__/tarball.cpython-311.pyc,,
|
| 23 |
+
conda_package_handling/__pycache__/utils.cpython-311.pyc,,
|
| 24 |
+
conda_package_handling/__pycache__/validate.cpython-311.pyc,,
|
| 25 |
+
conda_package_handling/api.py,sha256=NzZji1JAN8s7L9Mjc5dzqkGmlLmrwMS5NtoaPyNvFuA,7772
|
| 26 |
+
conda_package_handling/cli.py,sha256=Q0cQOjRRXlpZiexJ_E6o_hQuDa7Pc_6x4o3BzglwRts,5083
|
| 27 |
+
conda_package_handling/conda_fmt.py,sha256=av1fRa8QDa5rCQjMCC089odh-OFMnPBtRC0yoH3SQPU,5187
|
| 28 |
+
conda_package_handling/exceptions.py,sha256=Eg2_GsE2oJqpeHvg98YYO6tfHP8Wv0MrV7CS55xRau0,1893
|
| 29 |
+
conda_package_handling/interface.py,sha256=X8nhmM-orw50Fw08OxVIkHOjk7GNI9LFZLVQmcHYo18,613
|
| 30 |
+
conda_package_handling/streaming.py,sha256=RBD9-9FoLGyljQKHxY-KdPsJ8CpIOl1nic-pbd3d8YA,1412
|
| 31 |
+
conda_package_handling/tarball.py,sha256=k2M_KQpOHvnIPT1RhV-cixciIJQbICA8nnGPKkz4HW4,2895
|
| 32 |
+
conda_package_handling/utils.py,sha256=Mg3o4itddHZ-h_q-NerL3_Viw65Jj2_OyWKPf8iV4Ok,16707
|
| 33 |
+
conda_package_handling/validate.py,sha256=48gkidkuUm1JbCxLTh2jbJ4TgMu9tAasltb-UO2R62k,3652
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/REQUESTED
ADDED
|
File without changes
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.41.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/direct_url.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"dir_info": {}, "url": "file:///home/conda/feedstock_root/build_artifacts/conda-package-handling_1691048088238/work"}
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/entry_points.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[console_scripts]
|
| 2 |
+
cph = conda_package_handling.cli:main
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/conda_package_handling-2.2.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
conda_package_handling
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/__init__.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Main package for humanize."""
|
| 2 |
+
|
| 3 |
+
from humanize.filesize import naturalsize
|
| 4 |
+
from humanize.i18n import activate, deactivate, thousands_separator
|
| 5 |
+
from humanize.number import (
|
| 6 |
+
apnumber,
|
| 7 |
+
clamp,
|
| 8 |
+
fractional,
|
| 9 |
+
intcomma,
|
| 10 |
+
intword,
|
| 11 |
+
metric,
|
| 12 |
+
ordinal,
|
| 13 |
+
scientific,
|
| 14 |
+
)
|
| 15 |
+
from humanize.time import (
|
| 16 |
+
naturaldate,
|
| 17 |
+
naturalday,
|
| 18 |
+
naturaldelta,
|
| 19 |
+
naturaltime,
|
| 20 |
+
precisedelta,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
# Python 3.8+
|
| 25 |
+
import importlib.metadata as importlib_metadata
|
| 26 |
+
except ImportError:
|
| 27 |
+
# <Python 3.7 and lower
|
| 28 |
+
import importlib_metadata # type: ignore
|
| 29 |
+
|
| 30 |
+
__version__ = importlib_metadata.version(__name__)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
__all__ = [
|
| 34 |
+
"__version__",
|
| 35 |
+
"activate",
|
| 36 |
+
"apnumber",
|
| 37 |
+
"clamp",
|
| 38 |
+
"deactivate",
|
| 39 |
+
"fractional",
|
| 40 |
+
"intcomma",
|
| 41 |
+
"intword",
|
| 42 |
+
"metric",
|
| 43 |
+
"naturaldate",
|
| 44 |
+
"naturalday",
|
| 45 |
+
"naturaldelta",
|
| 46 |
+
"naturalsize",
|
| 47 |
+
"naturaltime",
|
| 48 |
+
"ordinal",
|
| 49 |
+
"precisedelta",
|
| 50 |
+
"scientific",
|
| 51 |
+
"thousands_separator",
|
| 52 |
+
]
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/filesize.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
"""Bits and bytes related humanization."""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
suffixes = {
|
| 7 |
+
"decimal": ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
|
| 8 |
+
"binary": ("KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"),
|
| 9 |
+
"gnu": "KMGTPEZY",
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def naturalsize(
|
| 14 |
+
value: float | str,
|
| 15 |
+
binary: bool = False,
|
| 16 |
+
gnu: bool = False,
|
| 17 |
+
format: str = "%.1f",
|
| 18 |
+
) -> str:
|
| 19 |
+
"""Format a number of bytes like a human readable filesize (e.g. 10 kB).
|
| 20 |
+
|
| 21 |
+
By default, decimal suffixes (kB, MB) are used.
|
| 22 |
+
|
| 23 |
+
Non-GNU modes are compatible with jinja2's `filesizeformat` filter.
|
| 24 |
+
|
| 25 |
+
Examples:
|
| 26 |
+
```pycon
|
| 27 |
+
>>> naturalsize(3000000)
|
| 28 |
+
'3.0 MB'
|
| 29 |
+
>>> naturalsize(300, False, True)
|
| 30 |
+
'300B'
|
| 31 |
+
>>> naturalsize(3000, False, True)
|
| 32 |
+
'2.9K'
|
| 33 |
+
>>> naturalsize(3000, False, True, "%.3f")
|
| 34 |
+
'2.930K'
|
| 35 |
+
>>> naturalsize(3000, True)
|
| 36 |
+
'2.9 KiB'
|
| 37 |
+
|
| 38 |
+
```
|
| 39 |
+
Args:
|
| 40 |
+
value (int, float, str): Integer to convert.
|
| 41 |
+
binary (bool): If `True`, uses binary suffixes (KiB, MiB) with base
|
| 42 |
+
2<sup>10</sup> instead of 10<sup>3</sup>.
|
| 43 |
+
gnu (bool): If `True`, the binary argument is ignored and GNU-style
|
| 44 |
+
(`ls -sh` style) prefixes are used (K, M) with the 2**10 definition.
|
| 45 |
+
format (str): Custom formatter.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
str: Human readable representation of a filesize.
|
| 49 |
+
"""
|
| 50 |
+
if gnu:
|
| 51 |
+
suffix = suffixes["gnu"]
|
| 52 |
+
elif binary:
|
| 53 |
+
suffix = suffixes["binary"]
|
| 54 |
+
else:
|
| 55 |
+
suffix = suffixes["decimal"]
|
| 56 |
+
|
| 57 |
+
base = 1024 if (gnu or binary) else 1000
|
| 58 |
+
bytes_ = float(value)
|
| 59 |
+
abs_bytes = abs(bytes_)
|
| 60 |
+
|
| 61 |
+
if abs_bytes == 1 and not gnu:
|
| 62 |
+
return "%d Byte" % bytes_
|
| 63 |
+
elif abs_bytes < base and not gnu:
|
| 64 |
+
return "%d Bytes" % bytes_
|
| 65 |
+
elif abs_bytes < base and gnu:
|
| 66 |
+
return "%dB" % bytes_
|
| 67 |
+
|
| 68 |
+
for i, s in enumerate(suffix):
|
| 69 |
+
unit = base ** (i + 2)
|
| 70 |
+
if abs_bytes < unit and not gnu:
|
| 71 |
+
return (format + " %s") % ((base * bytes_ / unit), s)
|
| 72 |
+
elif abs_bytes < unit and gnu:
|
| 73 |
+
return (format + "%s") % ((base * bytes_ / unit), s)
|
| 74 |
+
if gnu:
|
| 75 |
+
return (format + "%s") % ((base * bytes_ / unit), s)
|
| 76 |
+
return (format + " %s") % ((base * bytes_ / unit), s)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/i18n.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Activate, get and deactivate translations."""
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import gettext as gettext_module
|
| 5 |
+
import os.path
|
| 6 |
+
from threading import local
|
| 7 |
+
|
| 8 |
+
__all__ = ["activate", "deactivate", "thousands_separator"]
|
| 9 |
+
|
| 10 |
+
_TRANSLATIONS: dict[str | None, gettext_module.NullTranslations] = {
|
| 11 |
+
None: gettext_module.NullTranslations()
|
| 12 |
+
}
|
| 13 |
+
_CURRENT = local()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Mapping of locale to thousands separator
|
| 17 |
+
_THOUSANDS_SEPARATOR = {
|
| 18 |
+
"fr_FR": " ",
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _get_default_locale_path() -> str | None:
|
| 23 |
+
try:
|
| 24 |
+
if __file__ is None:
|
| 25 |
+
return None
|
| 26 |
+
return os.path.join(os.path.dirname(__file__), "locale")
|
| 27 |
+
except NameError:
|
| 28 |
+
return None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_translation() -> gettext_module.NullTranslations:
|
| 32 |
+
try:
|
| 33 |
+
return _TRANSLATIONS[_CURRENT.locale]
|
| 34 |
+
except (AttributeError, KeyError):
|
| 35 |
+
return _TRANSLATIONS[None]
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def activate(locale: str, path: str | None = None) -> gettext_module.NullTranslations:
|
| 39 |
+
"""Activate internationalisation.
|
| 40 |
+
|
| 41 |
+
Set `locale` as current locale. Search for locale in directory `path`.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
locale (str): Language name, e.g. `en_GB`.
|
| 45 |
+
path (str): Path to search for locales.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
dict: Translations.
|
| 49 |
+
|
| 50 |
+
Raises:
|
| 51 |
+
Exception: If humanize cannot find the locale folder.
|
| 52 |
+
"""
|
| 53 |
+
if path is None:
|
| 54 |
+
path = _get_default_locale_path()
|
| 55 |
+
|
| 56 |
+
if path is None:
|
| 57 |
+
raise Exception(
|
| 58 |
+
"Humanize cannot determinate the default location of the 'locale' folder. "
|
| 59 |
+
"You need to pass the path explicitly."
|
| 60 |
+
)
|
| 61 |
+
if locale not in _TRANSLATIONS:
|
| 62 |
+
translation = gettext_module.translation("humanize", path, [locale])
|
| 63 |
+
_TRANSLATIONS[locale] = translation
|
| 64 |
+
_CURRENT.locale = locale
|
| 65 |
+
return _TRANSLATIONS[locale]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def deactivate() -> None:
|
| 69 |
+
"""Deactivate internationalisation."""
|
| 70 |
+
_CURRENT.locale = None
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _gettext(message: str) -> str:
|
| 74 |
+
"""Get translation.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
message (str): Text to translate.
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
str: Translated text.
|
| 81 |
+
"""
|
| 82 |
+
return get_translation().gettext(message)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _pgettext(msgctxt: str, message: str) -> str:
|
| 86 |
+
"""Fetches a particular translation.
|
| 87 |
+
|
| 88 |
+
It works with `msgctxt` .po modifiers and allows duplicate keys with different
|
| 89 |
+
translations.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
msgctxt (str): Context of the translation.
|
| 93 |
+
message (str): Text to translate.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
str: Translated text.
|
| 97 |
+
"""
|
| 98 |
+
# This GNU gettext function was added in Python 3.8, so for older versions we
|
| 99 |
+
# reimplement it. It works by joining `msgctx` and `message` by '4' byte.
|
| 100 |
+
try:
|
| 101 |
+
# Python 3.8+
|
| 102 |
+
return get_translation().pgettext(msgctxt, message)
|
| 103 |
+
except AttributeError:
|
| 104 |
+
# Python 3.7 and older
|
| 105 |
+
key = msgctxt + "\x04" + message
|
| 106 |
+
translation = get_translation().gettext(key)
|
| 107 |
+
return message if translation == key else translation
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _ngettext(message: str, plural: str, num: int) -> str:
|
| 111 |
+
"""Plural version of _gettext.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
message (str): Singular text to translate.
|
| 115 |
+
plural (str): Plural text to translate.
|
| 116 |
+
num (int): The number (e.g. item count) to determine translation for the
|
| 117 |
+
respective grammatical number.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
str: Translated text.
|
| 121 |
+
"""
|
| 122 |
+
return get_translation().ngettext(message, plural, num)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _gettext_noop(message: str) -> str:
|
| 126 |
+
"""Mark a string as a translation string without translating it.
|
| 127 |
+
|
| 128 |
+
Example usage:
|
| 129 |
+
```python
|
| 130 |
+
CONSTANTS = [_gettext_noop('first'), _gettext_noop('second')]
|
| 131 |
+
def num_name(n):
|
| 132 |
+
return _gettext(CONSTANTS[n])
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
message (str): Text to translate in the future.
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
str: Original text, unchanged.
|
| 140 |
+
"""
|
| 141 |
+
return message
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def _ngettext_noop(singular: str, plural: str) -> tuple[str, str]:
|
| 145 |
+
"""Mark two strings as pluralized translations without translating them.
|
| 146 |
+
|
| 147 |
+
Example usage:
|
| 148 |
+
```python
|
| 149 |
+
CONSTANTS = [ngettext_noop('first', 'firsts'), ngettext_noop('second', 'seconds')]
|
| 150 |
+
def num_name(n):
|
| 151 |
+
return _ngettext(*CONSTANTS[n])
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
singular (str): Singular text to translate in the future.
|
| 156 |
+
plural (str): Plural text to translate in the future.
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
tuple: Original text, unchanged.
|
| 160 |
+
"""
|
| 161 |
+
return singular, plural
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def thousands_separator() -> str:
|
| 165 |
+
"""Return the thousands separator for a locale, default to comma.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
str: Thousands separator.
|
| 169 |
+
"""
|
| 170 |
+
try:
|
| 171 |
+
sep = _THOUSANDS_SEPARATOR[_CURRENT.locale]
|
| 172 |
+
except (AttributeError, KeyError):
|
| 173 |
+
sep = ","
|
| 174 |
+
return sep
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/number.py
ADDED
|
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
"""Humanizing functions for numbers."""
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import math
|
| 7 |
+
import re
|
| 8 |
+
import sys
|
| 9 |
+
from fractions import Fraction
|
| 10 |
+
from typing import TYPE_CHECKING
|
| 11 |
+
|
| 12 |
+
from .i18n import _gettext as _
|
| 13 |
+
from .i18n import _ngettext
|
| 14 |
+
from .i18n import _ngettext_noop as NS_
|
| 15 |
+
from .i18n import _pgettext as P_
|
| 16 |
+
from .i18n import thousands_separator
|
| 17 |
+
|
| 18 |
+
if TYPE_CHECKING:
|
| 19 |
+
if sys.version_info >= (3, 10):
|
| 20 |
+
from typing import TypeAlias
|
| 21 |
+
else:
|
| 22 |
+
from typing_extensions import TypeAlias
|
| 23 |
+
|
| 24 |
+
# This type can be better defined by typing.SupportsInt, typing.SupportsFloat
|
| 25 |
+
# but that's a Python 3.8 only typing option.
|
| 26 |
+
NumberOrString: TypeAlias = "float | str"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def ordinal(value: NumberOrString, gender: str = "male") -> str:
|
| 30 |
+
"""Converts an integer to its ordinal as a string.
|
| 31 |
+
|
| 32 |
+
For example, 1 is "1st", 2 is "2nd", 3 is "3rd", etc. Works for any integer or
|
| 33 |
+
anything `int()` will turn into an integer. Anything else will return the output
|
| 34 |
+
of str(value).
|
| 35 |
+
|
| 36 |
+
Examples:
|
| 37 |
+
```pycon
|
| 38 |
+
>>> ordinal(1)
|
| 39 |
+
'1st'
|
| 40 |
+
>>> ordinal(1002)
|
| 41 |
+
'1002nd'
|
| 42 |
+
>>> ordinal(103)
|
| 43 |
+
'103rd'
|
| 44 |
+
>>> ordinal(4)
|
| 45 |
+
'4th'
|
| 46 |
+
>>> ordinal(12)
|
| 47 |
+
'12th'
|
| 48 |
+
>>> ordinal(101)
|
| 49 |
+
'101st'
|
| 50 |
+
>>> ordinal(111)
|
| 51 |
+
'111th'
|
| 52 |
+
>>> ordinal("something else")
|
| 53 |
+
'something else'
|
| 54 |
+
>>> ordinal([1, 2, 3]) == "[1, 2, 3]"
|
| 55 |
+
True
|
| 56 |
+
|
| 57 |
+
```
|
| 58 |
+
Args:
|
| 59 |
+
value (int, str, float): Integer to convert.
|
| 60 |
+
gender (str): Gender for translations. Accepts either "male" or "female".
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
str: Ordinal string.
|
| 64 |
+
"""
|
| 65 |
+
try:
|
| 66 |
+
value = int(value)
|
| 67 |
+
except (TypeError, ValueError):
|
| 68 |
+
return str(value)
|
| 69 |
+
if gender == "male":
|
| 70 |
+
t = (
|
| 71 |
+
P_("0 (male)", "th"),
|
| 72 |
+
P_("1 (male)", "st"),
|
| 73 |
+
P_("2 (male)", "nd"),
|
| 74 |
+
P_("3 (male)", "rd"),
|
| 75 |
+
P_("4 (male)", "th"),
|
| 76 |
+
P_("5 (male)", "th"),
|
| 77 |
+
P_("6 (male)", "th"),
|
| 78 |
+
P_("7 (male)", "th"),
|
| 79 |
+
P_("8 (male)", "th"),
|
| 80 |
+
P_("9 (male)", "th"),
|
| 81 |
+
)
|
| 82 |
+
else:
|
| 83 |
+
t = (
|
| 84 |
+
P_("0 (female)", "th"),
|
| 85 |
+
P_("1 (female)", "st"),
|
| 86 |
+
P_("2 (female)", "nd"),
|
| 87 |
+
P_("3 (female)", "rd"),
|
| 88 |
+
P_("4 (female)", "th"),
|
| 89 |
+
P_("5 (female)", "th"),
|
| 90 |
+
P_("6 (female)", "th"),
|
| 91 |
+
P_("7 (female)", "th"),
|
| 92 |
+
P_("8 (female)", "th"),
|
| 93 |
+
P_("9 (female)", "th"),
|
| 94 |
+
)
|
| 95 |
+
if value % 100 in (11, 12, 13): # special case
|
| 96 |
+
return f"{value}{t[0]}"
|
| 97 |
+
return f"{value}{t[value % 10]}"
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def intcomma(value: NumberOrString, ndigits: int | None = None) -> str:
|
| 101 |
+
"""Converts an integer to a string containing commas every three digits.
|
| 102 |
+
|
| 103 |
+
For example, 3000 becomes "3,000" and 45000 becomes "45,000". To maintain some
|
| 104 |
+
compatibility with Django's `intcomma`, this function also accepts floats.
|
| 105 |
+
|
| 106 |
+
Examples:
|
| 107 |
+
```pycon
|
| 108 |
+
>>> intcomma(100)
|
| 109 |
+
'100'
|
| 110 |
+
>>> intcomma("1000")
|
| 111 |
+
'1,000'
|
| 112 |
+
>>> intcomma(1_000_000)
|
| 113 |
+
'1,000,000'
|
| 114 |
+
>>> intcomma(1_234_567.25)
|
| 115 |
+
'1,234,567.25'
|
| 116 |
+
>>> intcomma(1234.5454545, 2)
|
| 117 |
+
'1,234.55'
|
| 118 |
+
>>> intcomma(14308.40, 1)
|
| 119 |
+
'14,308.4'
|
| 120 |
+
>>> intcomma(None)
|
| 121 |
+
'None'
|
| 122 |
+
|
| 123 |
+
```
|
| 124 |
+
Args:
|
| 125 |
+
value (int, float, str): Integer or float to convert.
|
| 126 |
+
ndigits (int, None): Digits of precision for rounding after the decimal point.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
str: String containing commas every three digits.
|
| 130 |
+
"""
|
| 131 |
+
sep = thousands_separator()
|
| 132 |
+
try:
|
| 133 |
+
if isinstance(value, str):
|
| 134 |
+
float(value.replace(sep, ""))
|
| 135 |
+
else:
|
| 136 |
+
float(value)
|
| 137 |
+
except (TypeError, ValueError):
|
| 138 |
+
return str(value)
|
| 139 |
+
|
| 140 |
+
if ndigits is not None:
|
| 141 |
+
orig = "{0:.{1}f}".format(value, ndigits)
|
| 142 |
+
else:
|
| 143 |
+
orig = str(value)
|
| 144 |
+
|
| 145 |
+
new = re.sub(r"^(-?\d+)(\d{3})", rf"\g<1>{sep}\g<2>", orig)
|
| 146 |
+
if orig == new:
|
| 147 |
+
return new
|
| 148 |
+
else:
|
| 149 |
+
return intcomma(new)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
powers = [10**x for x in (3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 100)]
|
| 153 |
+
human_powers = (
|
| 154 |
+
NS_("thousand", "thousand"),
|
| 155 |
+
NS_("million", "million"),
|
| 156 |
+
NS_("billion", "billion"),
|
| 157 |
+
NS_("trillion", "trillion"),
|
| 158 |
+
NS_("quadrillion", "quadrillion"),
|
| 159 |
+
NS_("quintillion", "quintillion"),
|
| 160 |
+
NS_("sextillion", "sextillion"),
|
| 161 |
+
NS_("septillion", "septillion"),
|
| 162 |
+
NS_("octillion", "octillion"),
|
| 163 |
+
NS_("nonillion", "nonillion"),
|
| 164 |
+
NS_("decillion", "decillion"),
|
| 165 |
+
NS_("googol", "googol"),
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def intword(value: NumberOrString, format: str = "%.1f") -> str:
|
| 170 |
+
"""Converts a large integer to a friendly text representation.
|
| 171 |
+
|
| 172 |
+
Works best for numbers over 1 million. For example, 1_000_000 becomes "1.0 million",
|
| 173 |
+
1200000 becomes "1.2 million" and "1_200_000_000" becomes "1.2 billion". Supports up
|
| 174 |
+
to decillion (33 digits) and googol (100 digits).
|
| 175 |
+
|
| 176 |
+
Examples:
|
| 177 |
+
```pycon
|
| 178 |
+
>>> intword("100")
|
| 179 |
+
'100'
|
| 180 |
+
>>> intword("12400")
|
| 181 |
+
'12.4 thousand'
|
| 182 |
+
>>> intword("1000000")
|
| 183 |
+
'1.0 million'
|
| 184 |
+
>>> intword(1_200_000_000)
|
| 185 |
+
'1.2 billion'
|
| 186 |
+
>>> intword(8100000000000000000000000000000000)
|
| 187 |
+
'8.1 decillion'
|
| 188 |
+
>>> intword(None)
|
| 189 |
+
'None'
|
| 190 |
+
>>> intword("1234000", "%0.3f")
|
| 191 |
+
'1.234 million'
|
| 192 |
+
|
| 193 |
+
```
|
| 194 |
+
Args:
|
| 195 |
+
value (int, float, str): Integer to convert.
|
| 196 |
+
format (str): To change the number of decimal or general format of the number
|
| 197 |
+
portion.
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
str: Friendly text representation as a string, unless the value passed could not
|
| 201 |
+
be coaxed into an `int`.
|
| 202 |
+
"""
|
| 203 |
+
try:
|
| 204 |
+
value = int(value)
|
| 205 |
+
except (TypeError, ValueError):
|
| 206 |
+
return str(value)
|
| 207 |
+
|
| 208 |
+
if value < powers[0]:
|
| 209 |
+
return str(value)
|
| 210 |
+
for ordinal, power in enumerate(powers[1:], 1):
|
| 211 |
+
if value < power:
|
| 212 |
+
chopped = value / float(powers[ordinal - 1])
|
| 213 |
+
if float(format % chopped) == float(10**3):
|
| 214 |
+
chopped = value / float(powers[ordinal])
|
| 215 |
+
singular, plural = human_powers[ordinal]
|
| 216 |
+
return (
|
| 217 |
+
" ".join([format, _ngettext(singular, plural, math.ceil(chopped))])
|
| 218 |
+
) % chopped
|
| 219 |
+
else:
|
| 220 |
+
singular, plural = human_powers[ordinal - 1]
|
| 221 |
+
return (
|
| 222 |
+
" ".join([format, _ngettext(singular, plural, math.ceil(chopped))])
|
| 223 |
+
) % chopped
|
| 224 |
+
return str(value)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def apnumber(value: NumberOrString) -> str:
|
| 228 |
+
"""Converts an integer to Associated Press style.
|
| 229 |
+
|
| 230 |
+
Examples:
|
| 231 |
+
```pycon
|
| 232 |
+
>>> apnumber(0)
|
| 233 |
+
'zero'
|
| 234 |
+
>>> apnumber(5)
|
| 235 |
+
'five'
|
| 236 |
+
>>> apnumber(10)
|
| 237 |
+
'10'
|
| 238 |
+
>>> apnumber("7")
|
| 239 |
+
'seven'
|
| 240 |
+
>>> apnumber("foo")
|
| 241 |
+
'foo'
|
| 242 |
+
>>> apnumber(None)
|
| 243 |
+
'None'
|
| 244 |
+
|
| 245 |
+
```
|
| 246 |
+
Args:
|
| 247 |
+
value (int, float, str): Integer to convert.
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
str: For numbers 0-9, the number spelled out. Otherwise, the number. This always
|
| 251 |
+
returns a string unless the value was not `int`-able, then `str(value)`
|
| 252 |
+
is returned.
|
| 253 |
+
"""
|
| 254 |
+
try:
|
| 255 |
+
value = int(value)
|
| 256 |
+
except (TypeError, ValueError):
|
| 257 |
+
return str(value)
|
| 258 |
+
if not 0 <= value < 10:
|
| 259 |
+
return str(value)
|
| 260 |
+
return (
|
| 261 |
+
_("zero"),
|
| 262 |
+
_("one"),
|
| 263 |
+
_("two"),
|
| 264 |
+
_("three"),
|
| 265 |
+
_("four"),
|
| 266 |
+
_("five"),
|
| 267 |
+
_("six"),
|
| 268 |
+
_("seven"),
|
| 269 |
+
_("eight"),
|
| 270 |
+
_("nine"),
|
| 271 |
+
)[value]
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def fractional(value: NumberOrString) -> str:
|
| 275 |
+
"""Convert to fractional number.
|
| 276 |
+
|
| 277 |
+
There will be some cases where one might not want to show ugly decimal places for
|
| 278 |
+
floats and decimals.
|
| 279 |
+
|
| 280 |
+
This function returns a human-readable fractional number in form of fractions and
|
| 281 |
+
mixed fractions.
|
| 282 |
+
|
| 283 |
+
Pass in a string, or a number or a float, and this function returns:
|
| 284 |
+
|
| 285 |
+
* a string representation of a fraction
|
| 286 |
+
* or a whole number
|
| 287 |
+
* or a mixed fraction
|
| 288 |
+
* or the str output of the value, if it could not be converted
|
| 289 |
+
|
| 290 |
+
Examples:
|
| 291 |
+
```pycon
|
| 292 |
+
>>> fractional(0.3)
|
| 293 |
+
'3/10'
|
| 294 |
+
>>> fractional(1.3)
|
| 295 |
+
'1 3/10'
|
| 296 |
+
>>> fractional(float(1/3))
|
| 297 |
+
'1/3'
|
| 298 |
+
>>> fractional(1)
|
| 299 |
+
'1'
|
| 300 |
+
>>> fractional("ten")
|
| 301 |
+
'ten'
|
| 302 |
+
>>> fractional(None)
|
| 303 |
+
'None'
|
| 304 |
+
|
| 305 |
+
```
|
| 306 |
+
Args:
|
| 307 |
+
value (int, float, str): Integer to convert.
|
| 308 |
+
|
| 309 |
+
Returns:
|
| 310 |
+
str: Fractional number as a string.
|
| 311 |
+
"""
|
| 312 |
+
try:
|
| 313 |
+
number = float(value)
|
| 314 |
+
except (TypeError, ValueError):
|
| 315 |
+
return str(value)
|
| 316 |
+
whole_number = int(number)
|
| 317 |
+
frac = Fraction(number - whole_number).limit_denominator(1000)
|
| 318 |
+
numerator = frac.numerator
|
| 319 |
+
denominator = frac.denominator
|
| 320 |
+
if whole_number and not numerator and denominator == 1:
|
| 321 |
+
# this means that an integer was passed in
|
| 322 |
+
# (or variants of that integer like 1.0000)
|
| 323 |
+
return f"{whole_number:.0f}"
|
| 324 |
+
elif not whole_number:
|
| 325 |
+
return f"{numerator:.0f}/{denominator:.0f}"
|
| 326 |
+
else:
|
| 327 |
+
return f"{whole_number:.0f} {numerator:.0f}/{denominator:.0f}"
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def scientific(value: NumberOrString, precision: int = 2) -> str:
|
| 331 |
+
"""Return number in string scientific notation z.wq x 10ⁿ.
|
| 332 |
+
|
| 333 |
+
Examples:
|
| 334 |
+
```pycon
|
| 335 |
+
>>> scientific(float(0.3))
|
| 336 |
+
'3.00 x 10⁻¹'
|
| 337 |
+
>>> scientific(int(500))
|
| 338 |
+
'5.00 x 10²'
|
| 339 |
+
>>> scientific(-1000)
|
| 340 |
+
'-1.00 x 10³'
|
| 341 |
+
>>> scientific(1000, 1)
|
| 342 |
+
'1.0 x 10³'
|
| 343 |
+
>>> scientific(1000, 3)
|
| 344 |
+
'1.000 x 10³'
|
| 345 |
+
>>> scientific("99")
|
| 346 |
+
'9.90 x 10¹'
|
| 347 |
+
>>> scientific("foo")
|
| 348 |
+
'foo'
|
| 349 |
+
>>> scientific(None)
|
| 350 |
+
'None'
|
| 351 |
+
|
| 352 |
+
```
|
| 353 |
+
|
| 354 |
+
Args:
|
| 355 |
+
value (int, float, str): Input number.
|
| 356 |
+
precision (int): Number of decimal for first part of the number.
|
| 357 |
+
|
| 358 |
+
Returns:
|
| 359 |
+
str: Number in scientific notation z.wq x 10ⁿ.
|
| 360 |
+
"""
|
| 361 |
+
exponents = {
|
| 362 |
+
"0": "⁰",
|
| 363 |
+
"1": "¹",
|
| 364 |
+
"2": "²",
|
| 365 |
+
"3": "³",
|
| 366 |
+
"4": "⁴",
|
| 367 |
+
"5": "⁵",
|
| 368 |
+
"6": "⁶",
|
| 369 |
+
"7": "⁷",
|
| 370 |
+
"8": "⁸",
|
| 371 |
+
"9": "⁹",
|
| 372 |
+
"-": "⁻",
|
| 373 |
+
}
|
| 374 |
+
try:
|
| 375 |
+
value = float(value)
|
| 376 |
+
except (ValueError, TypeError):
|
| 377 |
+
return str(value)
|
| 378 |
+
fmt = "{:.%se}" % str(int(precision))
|
| 379 |
+
n = fmt.format(value)
|
| 380 |
+
part1, part2 = n.split("e")
|
| 381 |
+
# Remove redundant leading '+' or '0's (preserving the last '0' for 10⁰).
|
| 382 |
+
part2 = re.sub(r"^\+?(\-?)0*(.+)$", r"\1\2", part2)
|
| 383 |
+
|
| 384 |
+
new_part2 = []
|
| 385 |
+
for char in part2:
|
| 386 |
+
new_part2.append(exponents[char])
|
| 387 |
+
|
| 388 |
+
final_str = part1 + " x 10" + "".join(new_part2)
|
| 389 |
+
|
| 390 |
+
return final_str
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def clamp(
|
| 394 |
+
value: float,
|
| 395 |
+
format: str = "{:}",
|
| 396 |
+
floor: float | None = None,
|
| 397 |
+
ceil: float | None = None,
|
| 398 |
+
floor_token: str = "<",
|
| 399 |
+
ceil_token: str = ">",
|
| 400 |
+
) -> str:
|
| 401 |
+
"""Returns number with the specified format, clamped between floor and ceil.
|
| 402 |
+
|
| 403 |
+
If the number is larger than ceil or smaller than floor, then the respective limit
|
| 404 |
+
will be returned, formatted and prepended with a token specifying as such.
|
| 405 |
+
|
| 406 |
+
Examples:
|
| 407 |
+
```pycon
|
| 408 |
+
>>> clamp(123.456)
|
| 409 |
+
'123.456'
|
| 410 |
+
>>> clamp(0.0001, floor=0.01)
|
| 411 |
+
'<0.01'
|
| 412 |
+
>>> clamp(0.99, format="{:.0%}", ceil=0.99)
|
| 413 |
+
'99%'
|
| 414 |
+
>>> clamp(0.999, format="{:.0%}", ceil=0.99)
|
| 415 |
+
'>99%'
|
| 416 |
+
>>> clamp(1, format=intword, floor=1e6, floor_token="under ")
|
| 417 |
+
'under 1.0 million'
|
| 418 |
+
>>> clamp(None) is None
|
| 419 |
+
True
|
| 420 |
+
|
| 421 |
+
```
|
| 422 |
+
|
| 423 |
+
Args:
|
| 424 |
+
value (int, float): Input number.
|
| 425 |
+
format (str OR callable): Can either be a formatting string, or a callable
|
| 426 |
+
function that receives value and returns a string.
|
| 427 |
+
floor (int, float): Smallest value before clamping.
|
| 428 |
+
ceil (int, float): Largest value before clamping.
|
| 429 |
+
floor_token (str): If value is smaller than floor, token will be prepended
|
| 430 |
+
to output.
|
| 431 |
+
ceil_token (str): If value is larger than ceil, token will be prepended
|
| 432 |
+
to output.
|
| 433 |
+
|
| 434 |
+
Returns:
|
| 435 |
+
str: Formatted number. The output is clamped between the indicated floor and
|
| 436 |
+
ceil. If the number is larger than ceil or smaller than floor, the output will
|
| 437 |
+
be prepended with a token indicating as such.
|
| 438 |
+
|
| 439 |
+
"""
|
| 440 |
+
if value is None:
|
| 441 |
+
return None
|
| 442 |
+
|
| 443 |
+
if floor is not None and value < floor:
|
| 444 |
+
value = floor
|
| 445 |
+
token = floor_token
|
| 446 |
+
elif ceil is not None and value > ceil:
|
| 447 |
+
value = ceil
|
| 448 |
+
token = ceil_token
|
| 449 |
+
else:
|
| 450 |
+
token = ""
|
| 451 |
+
|
| 452 |
+
if isinstance(format, str):
|
| 453 |
+
return token + format.format(value)
|
| 454 |
+
elif callable(format):
|
| 455 |
+
return token + format(value)
|
| 456 |
+
else:
|
| 457 |
+
raise ValueError(
|
| 458 |
+
"Invalid format. Must be either a valid formatting string, or a function "
|
| 459 |
+
"that accepts value and returns a string."
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def metric(value: float, unit: str = "", precision: int = 3) -> str:
|
| 464 |
+
"""Return a value with a metric SI unit-prefix appended.
|
| 465 |
+
|
| 466 |
+
Examples:
|
| 467 |
+
```pycon
|
| 468 |
+
>>> metric(1500, "V")
|
| 469 |
+
'1.50 kV'
|
| 470 |
+
>>> metric(2e8, "W")
|
| 471 |
+
'200 MW'
|
| 472 |
+
>>> metric(220e-6, "F")
|
| 473 |
+
'220 μF'
|
| 474 |
+
>>> metric(1e-14, precision=4)
|
| 475 |
+
'10.00 f'
|
| 476 |
+
|
| 477 |
+
```
|
| 478 |
+
|
| 479 |
+
The unit prefix is always chosen so that non-significant zero digits are required.
|
| 480 |
+
i.e. `123,000` will become `123k` instead of `0.123M` and `1,230,000` will become
|
| 481 |
+
`1.23M` instead of `1230K`. For numbers that are either too huge or too tiny to
|
| 482 |
+
represent without resorting to either leading or trailing zeroes, it falls back to
|
| 483 |
+
`scientific()`.
|
| 484 |
+
```pycon
|
| 485 |
+
>>> metric(1e40)
|
| 486 |
+
'1.00 x 10⁴⁰'
|
| 487 |
+
|
| 488 |
+
```
|
| 489 |
+
|
| 490 |
+
Args:
|
| 491 |
+
value (int, float): Input number.
|
| 492 |
+
unit (str): Optional base unit.
|
| 493 |
+
precision (int): The number of digits the output should contain.
|
| 494 |
+
|
| 495 |
+
Returns:
|
| 496 |
+
str:
|
| 497 |
+
"""
|
| 498 |
+
exponent = int(math.floor(math.log10(abs(value))))
|
| 499 |
+
|
| 500 |
+
if exponent >= 27 or exponent < -24:
|
| 501 |
+
return scientific(value, precision - 1) + unit
|
| 502 |
+
|
| 503 |
+
value /= 10 ** (exponent // 3 * 3)
|
| 504 |
+
if exponent >= 3:
|
| 505 |
+
ordinal = "kMGTPEZY"[exponent // 3 - 1]
|
| 506 |
+
elif exponent < 0:
|
| 507 |
+
ordinal = "mμnpfazy"[(-exponent - 1) // 3]
|
| 508 |
+
else:
|
| 509 |
+
ordinal = ""
|
| 510 |
+
value_ = format(value, ".%if" % (precision - (exponent % 3) - 1))
|
| 511 |
+
if not (unit or ordinal) or unit in ("°", "′", "″"):
|
| 512 |
+
space = ""
|
| 513 |
+
else:
|
| 514 |
+
space = " "
|
| 515 |
+
|
| 516 |
+
return f"{value_}{space}{ordinal}{unit}"
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/py.typed
ADDED
|
File without changes
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize/time.py
ADDED
|
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
"""Time humanizing functions.
|
| 4 |
+
|
| 5 |
+
These are largely borrowed from Django's `contrib.humanize`.
|
| 6 |
+
"""
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import collections.abc
|
| 10 |
+
import datetime as dt
|
| 11 |
+
import math
|
| 12 |
+
import typing
|
| 13 |
+
from enum import Enum
|
| 14 |
+
from functools import total_ordering
|
| 15 |
+
|
| 16 |
+
from .i18n import _gettext as _
|
| 17 |
+
from .i18n import _ngettext
|
| 18 |
+
from .number import intcomma
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
"naturaldelta",
|
| 22 |
+
"naturaltime",
|
| 23 |
+
"naturalday",
|
| 24 |
+
"naturaldate",
|
| 25 |
+
"precisedelta",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@total_ordering
|
| 30 |
+
class Unit(Enum):
|
| 31 |
+
MICROSECONDS = 0
|
| 32 |
+
MILLISECONDS = 1
|
| 33 |
+
SECONDS = 2
|
| 34 |
+
MINUTES = 3
|
| 35 |
+
HOURS = 4
|
| 36 |
+
DAYS = 5
|
| 37 |
+
MONTHS = 6
|
| 38 |
+
YEARS = 7
|
| 39 |
+
|
| 40 |
+
def __lt__(self, other: typing.Any) -> typing.Any:
|
| 41 |
+
if self.__class__ is other.__class__:
|
| 42 |
+
return self.value < other.value
|
| 43 |
+
return NotImplemented
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _now() -> dt.datetime:
|
| 47 |
+
return dt.datetime.now()
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _abs_timedelta(delta: dt.timedelta) -> dt.timedelta:
|
| 51 |
+
"""Return an "absolute" value for a timedelta, always representing a time distance.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
delta (datetime.timedelta): Input timedelta.
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
datetime.timedelta: Absolute timedelta.
|
| 58 |
+
"""
|
| 59 |
+
if delta.days < 0:
|
| 60 |
+
now = _now()
|
| 61 |
+
return now - (now + delta)
|
| 62 |
+
return delta
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _date_and_delta(
|
| 66 |
+
value: typing.Any, *, now: dt.datetime | None = None
|
| 67 |
+
) -> tuple[typing.Any, typing.Any]:
|
| 68 |
+
"""Turn a value into a date and a timedelta which represents how long ago it was.
|
| 69 |
+
|
| 70 |
+
If that's not possible, return `(None, value)`.
|
| 71 |
+
"""
|
| 72 |
+
if not now:
|
| 73 |
+
now = _now()
|
| 74 |
+
if isinstance(value, dt.datetime):
|
| 75 |
+
date = value
|
| 76 |
+
delta = now - value
|
| 77 |
+
elif isinstance(value, dt.timedelta):
|
| 78 |
+
date = now - value
|
| 79 |
+
delta = value
|
| 80 |
+
else:
|
| 81 |
+
try:
|
| 82 |
+
value = int(value)
|
| 83 |
+
delta = dt.timedelta(seconds=value)
|
| 84 |
+
date = now - delta
|
| 85 |
+
except (ValueError, TypeError):
|
| 86 |
+
return None, value
|
| 87 |
+
return date, _abs_timedelta(delta)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def naturaldelta(
|
| 91 |
+
value: dt.timedelta | float,
|
| 92 |
+
months: bool = True,
|
| 93 |
+
minimum_unit: str = "seconds",
|
| 94 |
+
) -> str:
|
| 95 |
+
"""Return a natural representation of a timedelta or number of seconds.
|
| 96 |
+
|
| 97 |
+
This is similar to `naturaltime`, but does not add tense to the result.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
value (datetime.timedelta, int or float): A timedelta or a number of seconds.
|
| 101 |
+
months (bool): If `True`, then a number of months (based on 30.5 days) will be
|
| 102 |
+
used for fuzziness between years.
|
| 103 |
+
minimum_unit (str): The lowest unit that can be used.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
str (str or `value`): A natural representation of the amount of time
|
| 107 |
+
elapsed unless `value` is not datetime.timedelta or cannot be
|
| 108 |
+
converted to int. In that case, a `value` is returned unchanged.
|
| 109 |
+
|
| 110 |
+
Raises:
|
| 111 |
+
OverflowError: If `value` is too large to convert to datetime.timedelta.
|
| 112 |
+
|
| 113 |
+
Examples
|
| 114 |
+
Compare two timestamps in a custom local timezone::
|
| 115 |
+
|
| 116 |
+
import datetime as dt
|
| 117 |
+
from dateutil.tz import gettz
|
| 118 |
+
|
| 119 |
+
berlin = gettz("Europe/Berlin")
|
| 120 |
+
now = dt.datetime.now(tz=berlin)
|
| 121 |
+
later = now + dt.timedelta(minutes=30)
|
| 122 |
+
|
| 123 |
+
assert naturaldelta(later - now) == "30 minutes"
|
| 124 |
+
"""
|
| 125 |
+
tmp = Unit[minimum_unit.upper()]
|
| 126 |
+
if tmp not in (Unit.SECONDS, Unit.MILLISECONDS, Unit.MICROSECONDS):
|
| 127 |
+
raise ValueError(f"Minimum unit '{minimum_unit}' not supported")
|
| 128 |
+
min_unit = tmp
|
| 129 |
+
|
| 130 |
+
if isinstance(value, dt.timedelta):
|
| 131 |
+
delta = value
|
| 132 |
+
else:
|
| 133 |
+
try:
|
| 134 |
+
value = int(value)
|
| 135 |
+
delta = dt.timedelta(seconds=value)
|
| 136 |
+
except (ValueError, TypeError):
|
| 137 |
+
return str(value)
|
| 138 |
+
|
| 139 |
+
use_months = months
|
| 140 |
+
|
| 141 |
+
seconds = abs(delta.seconds)
|
| 142 |
+
days = abs(delta.days)
|
| 143 |
+
years = days // 365
|
| 144 |
+
days = days % 365
|
| 145 |
+
num_months = int(days // 30.5)
|
| 146 |
+
|
| 147 |
+
if not years and days < 1:
|
| 148 |
+
if seconds == 0:
|
| 149 |
+
if min_unit == Unit.MICROSECONDS and delta.microseconds < 1000:
|
| 150 |
+
return (
|
| 151 |
+
_ngettext("%d microsecond", "%d microseconds", delta.microseconds)
|
| 152 |
+
% delta.microseconds
|
| 153 |
+
)
|
| 154 |
+
elif min_unit == Unit.MILLISECONDS or (
|
| 155 |
+
min_unit == Unit.MICROSECONDS and 1000 <= delta.microseconds < 1_000_000
|
| 156 |
+
):
|
| 157 |
+
milliseconds = delta.microseconds / 1000
|
| 158 |
+
return (
|
| 159 |
+
_ngettext("%d millisecond", "%d milliseconds", int(milliseconds))
|
| 160 |
+
% milliseconds
|
| 161 |
+
)
|
| 162 |
+
return _("a moment")
|
| 163 |
+
elif seconds == 1:
|
| 164 |
+
return _("a second")
|
| 165 |
+
elif seconds < 60:
|
| 166 |
+
return _ngettext("%d second", "%d seconds", seconds) % seconds
|
| 167 |
+
elif 60 <= seconds < 120:
|
| 168 |
+
return _("a minute")
|
| 169 |
+
elif 120 <= seconds < 3600:
|
| 170 |
+
minutes = seconds // 60
|
| 171 |
+
return _ngettext("%d minute", "%d minutes", minutes) % minutes
|
| 172 |
+
elif 3600 <= seconds < 3600 * 2:
|
| 173 |
+
return _("an hour")
|
| 174 |
+
elif 3600 < seconds:
|
| 175 |
+
hours = seconds // 3600
|
| 176 |
+
return _ngettext("%d hour", "%d hours", hours) % hours
|
| 177 |
+
elif years == 0:
|
| 178 |
+
if days == 1:
|
| 179 |
+
return _("a day")
|
| 180 |
+
if not use_months:
|
| 181 |
+
return _ngettext("%d day", "%d days", days) % days
|
| 182 |
+
else:
|
| 183 |
+
if not num_months:
|
| 184 |
+
return _ngettext("%d day", "%d days", days) % days
|
| 185 |
+
elif num_months == 1:
|
| 186 |
+
return _("a month")
|
| 187 |
+
else:
|
| 188 |
+
return _ngettext("%d month", "%d months", num_months) % num_months
|
| 189 |
+
elif years == 1:
|
| 190 |
+
if not num_months and not days:
|
| 191 |
+
return _("a year")
|
| 192 |
+
elif not num_months:
|
| 193 |
+
return _ngettext("1 year, %d day", "1 year, %d days", days) % days
|
| 194 |
+
elif use_months:
|
| 195 |
+
if num_months == 1:
|
| 196 |
+
return _("1 year, 1 month")
|
| 197 |
+
else:
|
| 198 |
+
return (
|
| 199 |
+
_ngettext("1 year, %d month", "1 year, %d months", num_months)
|
| 200 |
+
% num_months
|
| 201 |
+
)
|
| 202 |
+
else:
|
| 203 |
+
return _ngettext("1 year, %d day", "1 year, %d days", days) % days
|
| 204 |
+
|
| 205 |
+
return _ngettext("%d year", "%d years", years).replace("%d", "%s") % intcomma(years)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def naturaltime(
|
| 209 |
+
value: dt.datetime | float,
|
| 210 |
+
future: bool = False,
|
| 211 |
+
months: bool = True,
|
| 212 |
+
minimum_unit: str = "seconds",
|
| 213 |
+
when: dt.datetime | None = None,
|
| 214 |
+
) -> str:
|
| 215 |
+
"""Return a natural representation of a time in a resolution that makes sense.
|
| 216 |
+
|
| 217 |
+
This is more or less compatible with Django's `naturaltime` filter.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
value (datetime.datetime, int or float): A `datetime` or a number of seconds.
|
| 221 |
+
future (bool): Ignored for `datetime`s, where the tense is always figured out
|
| 222 |
+
based on the current time. For integers, the return value will be past tense
|
| 223 |
+
by default, unless future is `True`.
|
| 224 |
+
months (bool): If `True`, then a number of months (based on 30.5 days) will be
|
| 225 |
+
used for fuzziness between years.
|
| 226 |
+
minimum_unit (str): The lowest unit that can be used.
|
| 227 |
+
when (datetime.datetime): Point in time relative to which _value_ is
|
| 228 |
+
interpreted. Defaults to the current time in the local timezone.
|
| 229 |
+
|
| 230 |
+
Returns:
|
| 231 |
+
str: A natural representation of the input in a resolution that makes sense.
|
| 232 |
+
"""
|
| 233 |
+
now = when or _now()
|
| 234 |
+
date, delta = _date_and_delta(value, now=now)
|
| 235 |
+
if date is None:
|
| 236 |
+
return str(value)
|
| 237 |
+
# determine tense by value only if datetime/timedelta were passed
|
| 238 |
+
if isinstance(value, (dt.datetime, dt.timedelta)):
|
| 239 |
+
future = date > now
|
| 240 |
+
|
| 241 |
+
ago = _("%s from now") if future else _("%s ago")
|
| 242 |
+
delta = naturaldelta(delta, months, minimum_unit)
|
| 243 |
+
|
| 244 |
+
if delta == _("a moment"):
|
| 245 |
+
return _("now")
|
| 246 |
+
|
| 247 |
+
return str(ago % delta)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def naturalday(value: dt.date | dt.datetime, format: str = "%b %d") -> str:
|
| 251 |
+
"""Return a natural day.
|
| 252 |
+
|
| 253 |
+
For date values that are tomorrow, today or yesterday compared to
|
| 254 |
+
present day return representing string. Otherwise, return a string
|
| 255 |
+
formatted according to `format`.
|
| 256 |
+
|
| 257 |
+
"""
|
| 258 |
+
try:
|
| 259 |
+
value = dt.date(value.year, value.month, value.day)
|
| 260 |
+
except AttributeError:
|
| 261 |
+
# Passed value wasn't date-ish
|
| 262 |
+
return str(value)
|
| 263 |
+
except (OverflowError, ValueError):
|
| 264 |
+
# Date arguments out of range
|
| 265 |
+
return str(value)
|
| 266 |
+
delta = value - dt.date.today()
|
| 267 |
+
if delta.days == 0:
|
| 268 |
+
return _("today")
|
| 269 |
+
elif delta.days == 1:
|
| 270 |
+
return _("tomorrow")
|
| 271 |
+
elif delta.days == -1:
|
| 272 |
+
return _("yesterday")
|
| 273 |
+
return value.strftime(format)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def naturaldate(value: dt.date | dt.datetime) -> str:
|
| 277 |
+
"""Like `naturalday`, but append a year for dates more than ~five months away."""
|
| 278 |
+
try:
|
| 279 |
+
value = dt.date(value.year, value.month, value.day)
|
| 280 |
+
except AttributeError:
|
| 281 |
+
# Passed value wasn't date-ish
|
| 282 |
+
return str(value)
|
| 283 |
+
except (OverflowError, ValueError):
|
| 284 |
+
# Date arguments out of range
|
| 285 |
+
return str(value)
|
| 286 |
+
delta = _abs_timedelta(value - dt.date.today())
|
| 287 |
+
if delta.days >= 5 * 365 / 12:
|
| 288 |
+
return naturalday(value, "%b %d %Y")
|
| 289 |
+
return naturalday(value)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def _quotient_and_remainder(
|
| 293 |
+
value: float,
|
| 294 |
+
divisor: float,
|
| 295 |
+
unit: Unit,
|
| 296 |
+
minimum_unit: Unit,
|
| 297 |
+
suppress: collections.abc.Iterable[Unit],
|
| 298 |
+
) -> tuple[float, float]:
|
| 299 |
+
"""Divide `value` by `divisor` returning the quotient and remainder.
|
| 300 |
+
|
| 301 |
+
If `unit` is `minimum_unit`, makes the quotient a float number and the remainder
|
| 302 |
+
will be zero. The rational is that if `unit` is the unit of the quotient, we cannot
|
| 303 |
+
represent the remainder because it would require a unit smaller than the
|
| 304 |
+
`minimum_unit`.
|
| 305 |
+
|
| 306 |
+
>>> from humanize.time import _quotient_and_remainder, Unit
|
| 307 |
+
>>> _quotient_and_remainder(36, 24, Unit.DAYS, Unit.DAYS, [])
|
| 308 |
+
(1.5, 0)
|
| 309 |
+
|
| 310 |
+
If unit is in `suppress`, the quotient will be zero and the remainder will be the
|
| 311 |
+
initial value. The idea is that if we cannot use `unit`, we are forced to use a
|
| 312 |
+
lower unit so we cannot do the division.
|
| 313 |
+
|
| 314 |
+
>>> _quotient_and_remainder(36, 24, Unit.DAYS, Unit.HOURS, [Unit.DAYS])
|
| 315 |
+
(0, 36)
|
| 316 |
+
|
| 317 |
+
In other case return quotient and remainder as `divmod` would do it.
|
| 318 |
+
|
| 319 |
+
>>> _quotient_and_remainder(36, 24, Unit.DAYS, Unit.HOURS, [])
|
| 320 |
+
(1, 12)
|
| 321 |
+
|
| 322 |
+
"""
|
| 323 |
+
if unit == minimum_unit:
|
| 324 |
+
return value / divisor, 0
|
| 325 |
+
elif unit in suppress:
|
| 326 |
+
return 0, value
|
| 327 |
+
else:
|
| 328 |
+
return divmod(value, divisor)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def _carry(
|
| 332 |
+
value1: float,
|
| 333 |
+
value2: float,
|
| 334 |
+
ratio: float,
|
| 335 |
+
unit: Unit,
|
| 336 |
+
min_unit: Unit,
|
| 337 |
+
suppress: typing.Iterable[Unit],
|
| 338 |
+
) -> tuple[float, float]:
|
| 339 |
+
"""Return a tuple with two values.
|
| 340 |
+
|
| 341 |
+
If the unit is in `suppress`, multiply `value1` by `ratio` and add it to `value2`
|
| 342 |
+
(carry to right). The idea is that if we cannot represent `value1` we need to
|
| 343 |
+
represent it in a lower unit.
|
| 344 |
+
|
| 345 |
+
>>> from humanize.time import _carry, Unit
|
| 346 |
+
>>> _carry(2, 6, 24, Unit.DAYS, Unit.SECONDS, [Unit.DAYS])
|
| 347 |
+
(0, 54)
|
| 348 |
+
|
| 349 |
+
If the unit is the minimum unit, `value2` is divided by `ratio` and added to
|
| 350 |
+
`value1` (carry to left). We assume that `value2` has a lower unit so we need to
|
| 351 |
+
carry it to `value1`.
|
| 352 |
+
|
| 353 |
+
>>> _carry(2, 6, 24, Unit.DAYS, Unit.DAYS, [])
|
| 354 |
+
(2.25, 0)
|
| 355 |
+
|
| 356 |
+
Otherwise, just return the same input:
|
| 357 |
+
|
| 358 |
+
>>> _carry(2, 6, 24, Unit.DAYS, Unit.SECONDS, [])
|
| 359 |
+
(2, 6)
|
| 360 |
+
"""
|
| 361 |
+
if unit == min_unit:
|
| 362 |
+
return value1 + value2 / ratio, 0
|
| 363 |
+
elif unit in suppress:
|
| 364 |
+
return 0, value2 + value1 * ratio
|
| 365 |
+
else:
|
| 366 |
+
return value1, value2
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def _suitable_minimum_unit(min_unit: Unit, suppress: typing.Iterable[Unit]) -> Unit:
|
| 370 |
+
"""Return a minimum unit suitable that is not suppressed.
|
| 371 |
+
|
| 372 |
+
If not suppressed, return the same unit:
|
| 373 |
+
|
| 374 |
+
>>> from humanize.time import _suitable_minimum_unit, Unit
|
| 375 |
+
>>> _suitable_minimum_unit(Unit.HOURS, []).name
|
| 376 |
+
'HOURS'
|
| 377 |
+
|
| 378 |
+
But if suppressed, find a unit greather than the original one that is not
|
| 379 |
+
suppressed:
|
| 380 |
+
|
| 381 |
+
>>> _suitable_minimum_unit(Unit.HOURS, [Unit.HOURS]).name
|
| 382 |
+
'DAYS'
|
| 383 |
+
|
| 384 |
+
>>> _suitable_minimum_unit(Unit.HOURS, [Unit.HOURS, Unit.DAYS]).name
|
| 385 |
+
'MONTHS'
|
| 386 |
+
"""
|
| 387 |
+
if min_unit in suppress:
|
| 388 |
+
for unit in Unit:
|
| 389 |
+
if unit > min_unit and unit not in suppress:
|
| 390 |
+
return unit
|
| 391 |
+
|
| 392 |
+
raise ValueError(
|
| 393 |
+
"Minimum unit is suppressed and no suitable replacement was found"
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
return min_unit
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def _suppress_lower_units(min_unit: Unit, suppress: typing.Iterable[Unit]) -> set[Unit]:
|
| 400 |
+
"""Extend suppressed units (if any) with all units lower than the minimum unit.
|
| 401 |
+
|
| 402 |
+
>>> from humanize.time import _suppress_lower_units, Unit
|
| 403 |
+
>>> [x.name for x in sorted(_suppress_lower_units(Unit.SECONDS, [Unit.DAYS]))]
|
| 404 |
+
['MICROSECONDS', 'MILLISECONDS', 'DAYS']
|
| 405 |
+
"""
|
| 406 |
+
suppress = set(suppress)
|
| 407 |
+
for u in Unit:
|
| 408 |
+
if u == min_unit:
|
| 409 |
+
break
|
| 410 |
+
suppress.add(u)
|
| 411 |
+
|
| 412 |
+
return suppress
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def precisedelta(
|
| 416 |
+
value: dt.timedelta | int,
|
| 417 |
+
minimum_unit: str = "seconds",
|
| 418 |
+
suppress: typing.Iterable[str] = (),
|
| 419 |
+
format: str = "%0.2f",
|
| 420 |
+
) -> str:
|
| 421 |
+
"""Return a precise representation of a timedelta.
|
| 422 |
+
|
| 423 |
+
```pycon
|
| 424 |
+
>>> import datetime as dt
|
| 425 |
+
>>> from humanize.time import precisedelta
|
| 426 |
+
|
| 427 |
+
>>> delta = dt.timedelta(seconds=3633, days=2, microseconds=123000)
|
| 428 |
+
>>> precisedelta(delta)
|
| 429 |
+
'2 days, 1 hour and 33.12 seconds'
|
| 430 |
+
|
| 431 |
+
```
|
| 432 |
+
|
| 433 |
+
A custom `format` can be specified to control how the fractional part
|
| 434 |
+
is represented:
|
| 435 |
+
|
| 436 |
+
```pycon
|
| 437 |
+
>>> precisedelta(delta, format="%0.4f")
|
| 438 |
+
'2 days, 1 hour and 33.1230 seconds'
|
| 439 |
+
|
| 440 |
+
```
|
| 441 |
+
|
| 442 |
+
Instead, the `minimum_unit` can be changed to have a better resolution;
|
| 443 |
+
the function will still readjust the unit to use the greatest of the
|
| 444 |
+
units that does not lose precision.
|
| 445 |
+
|
| 446 |
+
For example setting microseconds but still representing the date with milliseconds:
|
| 447 |
+
|
| 448 |
+
```pycon
|
| 449 |
+
>>> precisedelta(delta, minimum_unit="microseconds")
|
| 450 |
+
'2 days, 1 hour, 33 seconds and 123 milliseconds'
|
| 451 |
+
|
| 452 |
+
```
|
| 453 |
+
|
| 454 |
+
If desired, some units can be suppressed: you will not see them represented and the
|
| 455 |
+
time of the other units will be adjusted to keep representing the same timedelta:
|
| 456 |
+
|
| 457 |
+
```pycon
|
| 458 |
+
>>> precisedelta(delta, suppress=['days'])
|
| 459 |
+
'49 hours and 33.12 seconds'
|
| 460 |
+
|
| 461 |
+
```
|
| 462 |
+
|
| 463 |
+
Note that microseconds precision is lost if the seconds and all
|
| 464 |
+
the units below are suppressed:
|
| 465 |
+
|
| 466 |
+
```pycon
|
| 467 |
+
>>> delta = dt.timedelta(seconds=90, microseconds=100)
|
| 468 |
+
>>> precisedelta(delta, suppress=['seconds', 'milliseconds', 'microseconds'])
|
| 469 |
+
'1.50 minutes'
|
| 470 |
+
|
| 471 |
+
```
|
| 472 |
+
|
| 473 |
+
If the delta is too small to be represented with the minimum unit,
|
| 474 |
+
a value of zero will be returned:
|
| 475 |
+
|
| 476 |
+
```pycon
|
| 477 |
+
>>> delta = dt.timedelta(seconds=1)
|
| 478 |
+
>>> precisedelta(delta, minimum_unit="minutes")
|
| 479 |
+
'0.02 minutes'
|
| 480 |
+
|
| 481 |
+
>>> delta = dt.timedelta(seconds=0.1)
|
| 482 |
+
>>> precisedelta(delta, minimum_unit="minutes")
|
| 483 |
+
'0 minutes'
|
| 484 |
+
|
| 485 |
+
```
|
| 486 |
+
"""
|
| 487 |
+
date, delta = _date_and_delta(value)
|
| 488 |
+
if date is None:
|
| 489 |
+
return str(value)
|
| 490 |
+
|
| 491 |
+
suppress_set = {Unit[s.upper()] for s in suppress}
|
| 492 |
+
|
| 493 |
+
# Find a suitable minimum unit (it can be greater the one that the
|
| 494 |
+
# user gave us if it is suppressed).
|
| 495 |
+
min_unit = Unit[minimum_unit.upper()]
|
| 496 |
+
min_unit = _suitable_minimum_unit(min_unit, suppress_set)
|
| 497 |
+
del minimum_unit
|
| 498 |
+
|
| 499 |
+
# Expand the suppressed units list/set to include all the units
|
| 500 |
+
# that are below the minimum unit
|
| 501 |
+
suppress_set = _suppress_lower_units(min_unit, suppress_set)
|
| 502 |
+
|
| 503 |
+
# handy aliases
|
| 504 |
+
days = delta.days
|
| 505 |
+
secs = delta.seconds
|
| 506 |
+
usecs = delta.microseconds
|
| 507 |
+
|
| 508 |
+
MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS, MONTHS, YEARS = list(
|
| 509 |
+
Unit
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
# Given DAYS compute YEARS and the remainder of DAYS as follows:
|
| 513 |
+
# if YEARS is the minimum unit, we cannot use DAYS so
|
| 514 |
+
# we will use a float for YEARS and 0 for DAYS:
|
| 515 |
+
# years, days = years/days, 0
|
| 516 |
+
#
|
| 517 |
+
# if YEARS is suppressed, use DAYS:
|
| 518 |
+
# years, days = 0, days
|
| 519 |
+
#
|
| 520 |
+
# otherwise:
|
| 521 |
+
# years, days = divmod(years, days)
|
| 522 |
+
#
|
| 523 |
+
# The same applies for months, hours, minutes and milliseconds below
|
| 524 |
+
years, days = _quotient_and_remainder(days, 365, YEARS, min_unit, suppress_set)
|
| 525 |
+
months, days = _quotient_and_remainder(days, 30.5, MONTHS, min_unit, suppress_set)
|
| 526 |
+
|
| 527 |
+
# If DAYS is not in suppress, we can represent the days but
|
| 528 |
+
# if it is a suppressed unit, we need to carry it to a lower unit,
|
| 529 |
+
# seconds in this case.
|
| 530 |
+
#
|
| 531 |
+
# The same applies for secs and usecs below
|
| 532 |
+
days, secs = _carry(days, secs, 24 * 3600, DAYS, min_unit, suppress_set)
|
| 533 |
+
|
| 534 |
+
hours, secs = _quotient_and_remainder(secs, 3600, HOURS, min_unit, suppress_set)
|
| 535 |
+
minutes, secs = _quotient_and_remainder(secs, 60, MINUTES, min_unit, suppress_set)
|
| 536 |
+
|
| 537 |
+
secs, usecs = _carry(secs, usecs, 1e6, SECONDS, min_unit, suppress_set)
|
| 538 |
+
|
| 539 |
+
msecs, usecs = _quotient_and_remainder(
|
| 540 |
+
usecs, 1000, MILLISECONDS, min_unit, suppress_set
|
| 541 |
+
)
|
| 542 |
+
|
| 543 |
+
# if _unused != 0 we had lost some precision
|
| 544 |
+
usecs, _unused = _carry(usecs, 0, 1, MICROSECONDS, min_unit, suppress_set)
|
| 545 |
+
|
| 546 |
+
fmts = [
|
| 547 |
+
("%d year", "%d years", years),
|
| 548 |
+
("%d month", "%d months", months),
|
| 549 |
+
("%d day", "%d days", days),
|
| 550 |
+
("%d hour", "%d hours", hours),
|
| 551 |
+
("%d minute", "%d minutes", minutes),
|
| 552 |
+
("%d second", "%d seconds", secs),
|
| 553 |
+
("%d millisecond", "%d milliseconds", msecs),
|
| 554 |
+
("%d microsecond", "%d microseconds", usecs),
|
| 555 |
+
]
|
| 556 |
+
|
| 557 |
+
texts: list[str] = []
|
| 558 |
+
for unit, fmt in zip(reversed(Unit), fmts):
|
| 559 |
+
singular_txt, plural_txt, fmt_value = fmt
|
| 560 |
+
if fmt_value > 0 or (not texts and unit == min_unit):
|
| 561 |
+
fmt_txt = _ngettext(singular_txt, plural_txt, fmt_value)
|
| 562 |
+
if unit == min_unit and math.modf(fmt_value)[0] > 0:
|
| 563 |
+
fmt_txt = fmt_txt.replace("%d", format)
|
| 564 |
+
elif unit == YEARS:
|
| 565 |
+
fmt_txt = fmt_txt.replace("%d", "%s")
|
| 566 |
+
texts.append(fmt_txt % intcomma(fmt_value))
|
| 567 |
+
continue
|
| 568 |
+
|
| 569 |
+
texts.append(fmt_txt % fmt_value)
|
| 570 |
+
|
| 571 |
+
if unit == min_unit:
|
| 572 |
+
break
|
| 573 |
+
|
| 574 |
+
if len(texts) == 1:
|
| 575 |
+
return texts[0]
|
| 576 |
+
|
| 577 |
+
head = ", ".join(texts[:-1])
|
| 578 |
+
tail = texts[-1]
|
| 579 |
+
|
| 580 |
+
return _("%s and %s") % (head, tail)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/kiwisolver/__init__.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------------------------------------
|
| 2 |
+
# Copyright (c) 2013-2022, Nucleic Development Team.
|
| 3 |
+
#
|
| 4 |
+
# Distributed under the terms of the Modified BSD License.
|
| 5 |
+
#
|
| 6 |
+
# The full license is in the file LICENSE, distributed with this software.
|
| 7 |
+
# --------------------------------------------------------------------------------------
|
| 8 |
+
from ._cext import (
|
| 9 |
+
BadRequiredStrength,
|
| 10 |
+
Constraint,
|
| 11 |
+
DuplicateConstraint,
|
| 12 |
+
DuplicateEditVariable,
|
| 13 |
+
Expression,
|
| 14 |
+
Solver,
|
| 15 |
+
Term,
|
| 16 |
+
UnknownConstraint,
|
| 17 |
+
UnknownEditVariable,
|
| 18 |
+
UnsatisfiableConstraint,
|
| 19 |
+
Variable,
|
| 20 |
+
__kiwi_version__,
|
| 21 |
+
__version__,
|
| 22 |
+
strength,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
__all__ = [
|
| 26 |
+
"BadRequiredStrength",
|
| 27 |
+
"DuplicateConstraint",
|
| 28 |
+
"DuplicateEditVariable",
|
| 29 |
+
"UnknownConstraint",
|
| 30 |
+
"UnknownEditVariable",
|
| 31 |
+
"UnsatisfiableConstraint",
|
| 32 |
+
"strength",
|
| 33 |
+
"Variable",
|
| 34 |
+
"Term",
|
| 35 |
+
"Expression",
|
| 36 |
+
"Constraint",
|
| 37 |
+
"Solver",
|
| 38 |
+
"__version__",
|
| 39 |
+
"__kiwi_version__",
|
| 40 |
+
]
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/kiwisolver/_cext.pyi
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------------------------------------
|
| 2 |
+
# Copyright (c) 2021, Nucleic Development Team.
|
| 3 |
+
#
|
| 4 |
+
# Distributed under the terms of the Modified BSD License.
|
| 5 |
+
#
|
| 6 |
+
# The full license is in the file LICENSE, distributed with this software.
|
| 7 |
+
# --------------------------------------------------------------------------------------
|
| 8 |
+
|
| 9 |
+
from typing import Any, Iterable, NoReturn, Tuple, type_check_only
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
from typing import Literal
|
| 13 |
+
except ImportError:
|
| 14 |
+
from typing_extensions import Literal # type: ignore
|
| 15 |
+
|
| 16 |
+
__version__: str
|
| 17 |
+
__kiwi_version__: str
|
| 18 |
+
|
| 19 |
+
# --- Exceptions
|
| 20 |
+
|
| 21 |
+
class BadRequiredStrength(Exception): ...
|
| 22 |
+
class DuplicateConstraint(Exception): ...
|
| 23 |
+
class DuplicateEditVariable(Exception): ...
|
| 24 |
+
class UnknownConstraint(Exception): ...
|
| 25 |
+
class UnknownEditVariable(Exception): ...
|
| 26 |
+
class UnsatisfiableConstraint(Exception): ...
|
| 27 |
+
|
| 28 |
+
# Types
|
| 29 |
+
@type_check_only
|
| 30 |
+
class Strength:
|
| 31 |
+
@property
|
| 32 |
+
def weak(self) -> float: ...
|
| 33 |
+
@property
|
| 34 |
+
def medium(self) -> float: ...
|
| 35 |
+
@property
|
| 36 |
+
def strong(self) -> float: ...
|
| 37 |
+
@property
|
| 38 |
+
def required(self) -> float: ...
|
| 39 |
+
def create(
|
| 40 |
+
self,
|
| 41 |
+
a: int | float,
|
| 42 |
+
b: int | float,
|
| 43 |
+
c: int | float,
|
| 44 |
+
weight: int | float = 1.0,
|
| 45 |
+
/,
|
| 46 |
+
) -> float: ...
|
| 47 |
+
|
| 48 |
+
# This is meant as a singleton and users should not access the Strength type.
|
| 49 |
+
strength: Strength
|
| 50 |
+
|
| 51 |
+
class Variable:
|
| 52 |
+
"""Variable to express a constraint in a solver."""
|
| 53 |
+
|
| 54 |
+
__hash__: None # type: ignore
|
| 55 |
+
def __init__(self, name: str = "", context: Any = None, /) -> None: ...
|
| 56 |
+
def name(self) -> str:
|
| 57 |
+
"""Get the name of the variable."""
|
| 58 |
+
...
|
| 59 |
+
def setName(self, name: str, /) -> Any:
|
| 60 |
+
"""Set the name of the variable."""
|
| 61 |
+
...
|
| 62 |
+
def value(self) -> float:
|
| 63 |
+
"""Get the current value of the variable."""
|
| 64 |
+
...
|
| 65 |
+
def context(self) -> Any:
|
| 66 |
+
"""Get the context object associated with the variable."""
|
| 67 |
+
...
|
| 68 |
+
def setContext(self, context: Any, /) -> Any:
|
| 69 |
+
"""Set the context object associated with the variable."""
|
| 70 |
+
...
|
| 71 |
+
def __neg__(self) -> Term: ...
|
| 72 |
+
def __add__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 73 |
+
def __radd__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 74 |
+
def __sub__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 75 |
+
def __rsub__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 76 |
+
def __mul__(self, other: float) -> Term: ...
|
| 77 |
+
def __rmul__(self, other: float) -> Term: ...
|
| 78 |
+
def __truediv__(self, other: float) -> Term: ...
|
| 79 |
+
def __rtruediv__(self, other: float) -> Term: ...
|
| 80 |
+
def __eq__(self, other: float | Variable | Term | Expression) -> Constraint: ... # type: ignore
|
| 81 |
+
def __ge__(self, other: float | Variable | Term | Expression) -> Constraint: ...
|
| 82 |
+
def __le__(self, other: float | Variable | Term | Expression) -> Constraint: ...
|
| 83 |
+
def __ne__(self, other: Any) -> NoReturn: ...
|
| 84 |
+
def __gt__(self, other: Any) -> NoReturn: ...
|
| 85 |
+
def __lt__(self, other: Any) -> NoReturn: ...
|
| 86 |
+
|
| 87 |
+
class Term:
|
| 88 |
+
"""Product of a variable by a constant pre-factor."""
|
| 89 |
+
|
| 90 |
+
__hash__: None # type: ignore
|
| 91 |
+
def __init__(
|
| 92 |
+
self, variable: Variable, coefficient: int | float = 1.0, / # noqa
|
| 93 |
+
) -> None: ...
|
| 94 |
+
def coefficient(self) -> float:
|
| 95 |
+
"""Get the coefficient for the term."""
|
| 96 |
+
...
|
| 97 |
+
def variable(self) -> Variable:
|
| 98 |
+
"""Get the variable for the term."""
|
| 99 |
+
...
|
| 100 |
+
def value(self) -> float:
|
| 101 |
+
"""Get the value for the term."""
|
| 102 |
+
...
|
| 103 |
+
def __neg__(self) -> Term: ...
|
| 104 |
+
def __add__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 105 |
+
def __radd__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 106 |
+
def __sub__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 107 |
+
def __rsub__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 108 |
+
def __mul__(self, other: float) -> Term: ...
|
| 109 |
+
def __rmul__(self, other: float) -> Term: ...
|
| 110 |
+
def __truediv__(self, other: float) -> Term: ...
|
| 111 |
+
def __rtruediv__(self, other: float) -> Term: ...
|
| 112 |
+
def __eq__(self, other: float | Variable | Term | Expression) -> Constraint: ... # type: ignore
|
| 113 |
+
def __ge__(self, other: float | Variable | Term | Expression) -> Constraint: ...
|
| 114 |
+
def __le__(self, other: float | Variable | Term | Expression) -> Constraint: ...
|
| 115 |
+
def __ne__(self, other: Any) -> NoReturn: ...
|
| 116 |
+
def __gt__(self, other: Any) -> NoReturn: ...
|
| 117 |
+
def __lt__(self, other: Any) -> NoReturn: ...
|
| 118 |
+
|
| 119 |
+
class Expression:
|
| 120 |
+
"""Sum of terms and an additional constant."""
|
| 121 |
+
|
| 122 |
+
__hash__: None # type: ignore
|
| 123 |
+
def __init__(
|
| 124 |
+
self, terms: Iterable[Term], constant: int | float = 0.0, / # noqa
|
| 125 |
+
) -> None: ...
|
| 126 |
+
def constant(self) -> float:
|
| 127 |
+
"" "Get the constant for the expression." ""
|
| 128 |
+
...
|
| 129 |
+
def terms(self) -> Tuple[Term, ...]:
|
| 130 |
+
"""Get the tuple of terms for the expression."""
|
| 131 |
+
...
|
| 132 |
+
def value(self) -> float:
|
| 133 |
+
"""Get the value for the expression."""
|
| 134 |
+
...
|
| 135 |
+
def __neg__(self) -> Expression: ...
|
| 136 |
+
def __add__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 137 |
+
def __radd__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 138 |
+
def __sub__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 139 |
+
def __rsub__(self, other: float | Variable | Term | Expression) -> Expression: ...
|
| 140 |
+
def __mul__(self, other: float) -> Expression: ...
|
| 141 |
+
def __rmul__(self, other: float) -> Expression: ...
|
| 142 |
+
def __truediv__(self, other: float) -> Expression: ...
|
| 143 |
+
def __rtruediv__(self, other: float) -> Expression: ...
|
| 144 |
+
def __eq__(self, other: float | Variable | Term | Expression) -> Constraint: ... # type: ignore
|
| 145 |
+
def __ge__(self, other: float | Variable | Term | Expression) -> Constraint: ...
|
| 146 |
+
def __le__(self, other: float | Variable | Term | Expression) -> Constraint: ...
|
| 147 |
+
def __ne__(self, other: Any) -> NoReturn: ...
|
| 148 |
+
def __gt__(self, other: Any) -> NoReturn: ...
|
| 149 |
+
def __lt__(self, other: Any) -> NoReturn: ...
|
| 150 |
+
|
| 151 |
+
class Constraint:
|
| 152 |
+
def __init__(
|
| 153 |
+
self,
|
| 154 |
+
expression: Expression,
|
| 155 |
+
op: Literal["=="] | Literal["<="] | Literal[">="],
|
| 156 |
+
strength: float
|
| 157 |
+
| Literal["weak"]
|
| 158 |
+
| Literal["medium"]
|
| 159 |
+
| Literal["strong"]
|
| 160 |
+
| Literal["required"] = "required",
|
| 161 |
+
/,
|
| 162 |
+
) -> None: ...
|
| 163 |
+
def expression(self) -> Expression:
|
| 164 |
+
"""Get the expression object for the constraint."""
|
| 165 |
+
...
|
| 166 |
+
def op(self) -> Literal["=="] | Literal["<="] | Literal[">="]:
|
| 167 |
+
"""Get the relational operator for the constraint."""
|
| 168 |
+
...
|
| 169 |
+
def strength(self) -> float:
|
| 170 |
+
"""Get the strength for the constraint."""
|
| 171 |
+
...
|
| 172 |
+
def __or__(
|
| 173 |
+
self,
|
| 174 |
+
other: float
|
| 175 |
+
| Literal["weak"]
|
| 176 |
+
| Literal["medium"]
|
| 177 |
+
| Literal["strong"]
|
| 178 |
+
| Literal["required"],
|
| 179 |
+
) -> Constraint: ...
|
| 180 |
+
def __ror__(
|
| 181 |
+
self,
|
| 182 |
+
other: float
|
| 183 |
+
| Literal["weak"]
|
| 184 |
+
| Literal["medium"]
|
| 185 |
+
| Literal["strong"]
|
| 186 |
+
| Literal["required"],
|
| 187 |
+
) -> Constraint: ...
|
| 188 |
+
|
| 189 |
+
class Solver:
|
| 190 |
+
"""Kiwi solver class."""
|
| 191 |
+
|
| 192 |
+
def __init__(self) -> None: ...
|
| 193 |
+
def addConstraint(self, constraint: Constraint, /) -> None:
|
| 194 |
+
"""Add a constraint to the solver."""
|
| 195 |
+
...
|
| 196 |
+
def removeConstraint(self, constraint: Constraint, /) -> None:
|
| 197 |
+
"""Remove a constraint from the solver."""
|
| 198 |
+
...
|
| 199 |
+
def hasConstraint(self, constraint: Constraint, /) -> bool:
|
| 200 |
+
"""Check whether the solver contains a constraint."""
|
| 201 |
+
...
|
| 202 |
+
def addEditVariable(
|
| 203 |
+
self,
|
| 204 |
+
variable: Variable,
|
| 205 |
+
strength: float
|
| 206 |
+
| Literal["weak"]
|
| 207 |
+
| Literal["medium"]
|
| 208 |
+
| Literal["strong"]
|
| 209 |
+
| Literal["required"],
|
| 210 |
+
/,
|
| 211 |
+
) -> None:
|
| 212 |
+
"""Add an edit variable to the solver."""
|
| 213 |
+
...
|
| 214 |
+
def removeEditVariable(self, variable: Variable, /) -> None:
|
| 215 |
+
"""Remove an edit variable from the solver."""
|
| 216 |
+
...
|
| 217 |
+
def hasEditVariable(self, variable: Variable, /) -> bool:
|
| 218 |
+
"""Check whether the solver contains an edit variable."""
|
| 219 |
+
...
|
| 220 |
+
def suggestValue(self, variable: Variable, value: int | float, /) -> None:
|
| 221 |
+
"""Suggest a desired value for an edit variable."""
|
| 222 |
+
...
|
| 223 |
+
def updateVariables(self) -> None:
|
| 224 |
+
"""Update the values of the solver variables."""
|
| 225 |
+
...
|
| 226 |
+
def reset(self) -> None:
|
| 227 |
+
"""Reset the solver to the initial empty starting condition."""
|
| 228 |
+
...
|
| 229 |
+
def dump(self) -> None:
|
| 230 |
+
"""Dump a representation of the solver internals to stdout."""
|
| 231 |
+
...
|
| 232 |
+
def dumps(self) -> str:
|
| 233 |
+
"""Dump a representation of the solver internals to a string."""
|
| 234 |
+
...
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/kiwisolver/py.typed
ADDED
|
File without changes
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/libpasteurize/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# empty to make this a package
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/libpasteurize/main.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pasteurize: automatic conversion of Python 3 code to clean 2/3 code
|
| 3 |
+
===================================================================
|
| 4 |
+
|
| 5 |
+
``pasteurize`` attempts to convert existing Python 3 code into source-compatible
|
| 6 |
+
Python 2 and 3 code.
|
| 7 |
+
|
| 8 |
+
Use it like this on Python 3 code:
|
| 9 |
+
|
| 10 |
+
$ pasteurize --verbose mypython3script.py
|
| 11 |
+
|
| 12 |
+
This removes any Py3-only syntax (e.g. new metaclasses) and adds these
|
| 13 |
+
import lines:
|
| 14 |
+
|
| 15 |
+
from __future__ import absolute_import
|
| 16 |
+
from __future__ import division
|
| 17 |
+
from __future__ import print_function
|
| 18 |
+
from __future__ import unicode_literals
|
| 19 |
+
from future import standard_library
|
| 20 |
+
standard_library.install_hooks()
|
| 21 |
+
from builtins import *
|
| 22 |
+
|
| 23 |
+
To write changes to the files, use the -w flag.
|
| 24 |
+
|
| 25 |
+
It also adds any other wrappers needed for Py2/3 compatibility.
|
| 26 |
+
|
| 27 |
+
Note that separate stages are not available (or needed) when converting from
|
| 28 |
+
Python 3 with ``pasteurize`` as they are when converting from Python 2 with
|
| 29 |
+
``futurize``.
|
| 30 |
+
|
| 31 |
+
The --all-imports option forces adding all ``__future__`` imports,
|
| 32 |
+
``builtins`` imports, and standard library aliases, even if they don't
|
| 33 |
+
seem necessary for the current state of each module. (This can simplify
|
| 34 |
+
testing, and can reduce the need to think about Py2 compatibility when editing
|
| 35 |
+
the code further.)
|
| 36 |
+
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
from __future__ import (absolute_import, print_function, unicode_literals)
|
| 40 |
+
|
| 41 |
+
import sys
|
| 42 |
+
import logging
|
| 43 |
+
import optparse
|
| 44 |
+
from lib2to3.main import main, warn, StdoutRefactoringTool
|
| 45 |
+
from lib2to3 import refactor
|
| 46 |
+
|
| 47 |
+
from future import __version__
|
| 48 |
+
from libpasteurize.fixes import fix_names
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def main(args=None):
|
| 52 |
+
"""Main program.
|
| 53 |
+
|
| 54 |
+
Returns a suggested exit status (0, 1, 2).
|
| 55 |
+
"""
|
| 56 |
+
# Set up option parser
|
| 57 |
+
parser = optparse.OptionParser(usage="pasteurize [options] file|dir ...")
|
| 58 |
+
parser.add_option("-V", "--version", action="store_true",
|
| 59 |
+
help="Report the version number of pasteurize")
|
| 60 |
+
parser.add_option("-a", "--all-imports", action="store_true",
|
| 61 |
+
help="Adds all __future__ and future imports to each module")
|
| 62 |
+
parser.add_option("-f", "--fix", action="append", default=[],
|
| 63 |
+
help="Each FIX specifies a transformation; default: all")
|
| 64 |
+
parser.add_option("-j", "--processes", action="store", default=1,
|
| 65 |
+
type="int", help="Run 2to3 concurrently")
|
| 66 |
+
parser.add_option("-x", "--nofix", action="append", default=[],
|
| 67 |
+
help="Prevent a fixer from being run.")
|
| 68 |
+
parser.add_option("-l", "--list-fixes", action="store_true",
|
| 69 |
+
help="List available transformations")
|
| 70 |
+
# parser.add_option("-p", "--print-function", action="store_true",
|
| 71 |
+
# help="Modify the grammar so that print() is a function")
|
| 72 |
+
parser.add_option("-v", "--verbose", action="store_true",
|
| 73 |
+
help="More verbose logging")
|
| 74 |
+
parser.add_option("--no-diffs", action="store_true",
|
| 75 |
+
help="Don't show diffs of the refactoring")
|
| 76 |
+
parser.add_option("-w", "--write", action="store_true",
|
| 77 |
+
help="Write back modified files")
|
| 78 |
+
parser.add_option("-n", "--nobackups", action="store_true", default=False,
|
| 79 |
+
help="Don't write backups for modified files.")
|
| 80 |
+
|
| 81 |
+
# Parse command line arguments
|
| 82 |
+
refactor_stdin = False
|
| 83 |
+
flags = {}
|
| 84 |
+
options, args = parser.parse_args(args)
|
| 85 |
+
fixer_pkg = 'libpasteurize.fixes'
|
| 86 |
+
avail_fixes = fix_names
|
| 87 |
+
flags["print_function"] = True
|
| 88 |
+
|
| 89 |
+
if not options.write and options.no_diffs:
|
| 90 |
+
warn("not writing files and not printing diffs; that's not very useful")
|
| 91 |
+
if not options.write and options.nobackups:
|
| 92 |
+
parser.error("Can't use -n without -w")
|
| 93 |
+
if options.version:
|
| 94 |
+
print(__version__)
|
| 95 |
+
return 0
|
| 96 |
+
if options.list_fixes:
|
| 97 |
+
print("Available transformations for the -f/--fix option:")
|
| 98 |
+
for fixname in sorted(avail_fixes):
|
| 99 |
+
print(fixname)
|
| 100 |
+
if not args:
|
| 101 |
+
return 0
|
| 102 |
+
if not args:
|
| 103 |
+
print("At least one file or directory argument required.",
|
| 104 |
+
file=sys.stderr)
|
| 105 |
+
print("Use --help to show usage.", file=sys.stderr)
|
| 106 |
+
return 2
|
| 107 |
+
if "-" in args:
|
| 108 |
+
refactor_stdin = True
|
| 109 |
+
if options.write:
|
| 110 |
+
print("Can't write to stdin.", file=sys.stderr)
|
| 111 |
+
return 2
|
| 112 |
+
|
| 113 |
+
# Set up logging handler
|
| 114 |
+
level = logging.DEBUG if options.verbose else logging.INFO
|
| 115 |
+
logging.basicConfig(format='%(name)s: %(message)s', level=level)
|
| 116 |
+
|
| 117 |
+
unwanted_fixes = set()
|
| 118 |
+
for fix in options.nofix:
|
| 119 |
+
if ".fix_" in fix:
|
| 120 |
+
unwanted_fixes.add(fix)
|
| 121 |
+
else:
|
| 122 |
+
# Infer the full module name for the fixer.
|
| 123 |
+
# First ensure that no names clash (e.g.
|
| 124 |
+
# lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah):
|
| 125 |
+
found = [f for f in avail_fixes
|
| 126 |
+
if f.endswith('fix_{0}'.format(fix))]
|
| 127 |
+
if len(found) > 1:
|
| 128 |
+
print("Ambiguous fixer name. Choose a fully qualified "
|
| 129 |
+
"module name instead from these:\n" +
|
| 130 |
+
"\n".join(" " + myf for myf in found),
|
| 131 |
+
file=sys.stderr)
|
| 132 |
+
return 2
|
| 133 |
+
elif len(found) == 0:
|
| 134 |
+
print("Unknown fixer. Use --list-fixes or -l for a list.",
|
| 135 |
+
file=sys.stderr)
|
| 136 |
+
return 2
|
| 137 |
+
unwanted_fixes.add(found[0])
|
| 138 |
+
|
| 139 |
+
extra_fixes = set()
|
| 140 |
+
if options.all_imports:
|
| 141 |
+
prefix = 'libpasteurize.fixes.'
|
| 142 |
+
extra_fixes.add(prefix + 'fix_add_all__future__imports')
|
| 143 |
+
extra_fixes.add(prefix + 'fix_add_future_standard_library_import')
|
| 144 |
+
extra_fixes.add(prefix + 'fix_add_all_future_builtins')
|
| 145 |
+
|
| 146 |
+
explicit = set()
|
| 147 |
+
if options.fix:
|
| 148 |
+
all_present = False
|
| 149 |
+
for fix in options.fix:
|
| 150 |
+
if fix == 'all':
|
| 151 |
+
all_present = True
|
| 152 |
+
else:
|
| 153 |
+
if ".fix_" in fix:
|
| 154 |
+
explicit.add(fix)
|
| 155 |
+
else:
|
| 156 |
+
# Infer the full module name for the fixer.
|
| 157 |
+
# First ensure that no names clash (e.g.
|
| 158 |
+
# lib2to3.fixes.fix_blah and libpasteurize.fixes.fix_blah):
|
| 159 |
+
found = [f for f in avail_fixes
|
| 160 |
+
if f.endswith('fix_{0}'.format(fix))]
|
| 161 |
+
if len(found) > 1:
|
| 162 |
+
print("Ambiguous fixer name. Choose a fully qualified "
|
| 163 |
+
"module name instead from these:\n" +
|
| 164 |
+
"\n".join(" " + myf for myf in found),
|
| 165 |
+
file=sys.stderr)
|
| 166 |
+
return 2
|
| 167 |
+
elif len(found) == 0:
|
| 168 |
+
print("Unknown fixer. Use --list-fixes or -l for a list.",
|
| 169 |
+
file=sys.stderr)
|
| 170 |
+
return 2
|
| 171 |
+
explicit.add(found[0])
|
| 172 |
+
if len(explicit & unwanted_fixes) > 0:
|
| 173 |
+
print("Conflicting usage: the following fixers have been "
|
| 174 |
+
"simultaneously requested and disallowed:\n" +
|
| 175 |
+
"\n".join(" " + myf for myf in (explicit & unwanted_fixes)),
|
| 176 |
+
file=sys.stderr)
|
| 177 |
+
return 2
|
| 178 |
+
requested = avail_fixes.union(explicit) if all_present else explicit
|
| 179 |
+
else:
|
| 180 |
+
requested = avail_fixes.union(explicit)
|
| 181 |
+
|
| 182 |
+
fixer_names = requested | extra_fixes - unwanted_fixes
|
| 183 |
+
|
| 184 |
+
# Initialize the refactoring tool
|
| 185 |
+
rt = StdoutRefactoringTool(sorted(fixer_names), flags, set(),
|
| 186 |
+
options.nobackups, not options.no_diffs)
|
| 187 |
+
|
| 188 |
+
# Refactor all files and directories passed as arguments
|
| 189 |
+
if not rt.errors:
|
| 190 |
+
if refactor_stdin:
|
| 191 |
+
rt.refactor_stdin()
|
| 192 |
+
else:
|
| 193 |
+
try:
|
| 194 |
+
rt.refactor(args, options.write, None,
|
| 195 |
+
options.processes)
|
| 196 |
+
except refactor.MultiprocessingUnsupported:
|
| 197 |
+
assert options.processes > 1
|
| 198 |
+
print("Sorry, -j isn't " \
|
| 199 |
+
"supported on this platform.", file=sys.stderr)
|
| 200 |
+
return 1
|
| 201 |
+
rt.summarize()
|
| 202 |
+
|
| 203 |
+
# Return error status (0 if rt.errors is zero)
|
| 204 |
+
return int(bool(rt.errors))
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/__main__.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import sys
|
| 23 |
+
import optparse
|
| 24 |
+
import codecs
|
| 25 |
+
import warnings
|
| 26 |
+
import markdown
|
| 27 |
+
try:
|
| 28 |
+
# We use `unsafe_load` because users may need to pass in actual Python
|
| 29 |
+
# objects. As this is only available from the CLI, the user has much
|
| 30 |
+
# worse problems if an attacker can use this as an attach vector.
|
| 31 |
+
from yaml import unsafe_load as yaml_load
|
| 32 |
+
except ImportError: # pragma: no cover
|
| 33 |
+
try:
|
| 34 |
+
# Fall back to PyYAML <5.1
|
| 35 |
+
from yaml import load as yaml_load
|
| 36 |
+
except ImportError:
|
| 37 |
+
# Fall back to JSON
|
| 38 |
+
from json import load as yaml_load
|
| 39 |
+
|
| 40 |
+
import logging
|
| 41 |
+
from logging import DEBUG, WARNING, CRITICAL
|
| 42 |
+
|
| 43 |
+
logger = logging.getLogger('MARKDOWN')
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def parse_options(args=None, values=None):
|
| 47 |
+
"""
|
| 48 |
+
Define and parse `optparse` options for command-line usage.
|
| 49 |
+
"""
|
| 50 |
+
usage = """%prog [options] [INPUTFILE]
|
| 51 |
+
(STDIN is assumed if no INPUTFILE is given)"""
|
| 52 |
+
desc = "A Python implementation of John Gruber's Markdown. " \
|
| 53 |
+
"https://Python-Markdown.github.io/"
|
| 54 |
+
ver = "%%prog %s" % markdown.__version__
|
| 55 |
+
|
| 56 |
+
parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
|
| 57 |
+
parser.add_option("-f", "--file", dest="filename", default=None,
|
| 58 |
+
help="Write output to OUTPUT_FILE. Defaults to STDOUT.",
|
| 59 |
+
metavar="OUTPUT_FILE")
|
| 60 |
+
parser.add_option("-e", "--encoding", dest="encoding",
|
| 61 |
+
help="Encoding for input and output files.",)
|
| 62 |
+
parser.add_option("-o", "--output_format", dest="output_format",
|
| 63 |
+
default='xhtml', metavar="OUTPUT_FORMAT",
|
| 64 |
+
help="Use output format 'xhtml' (default) or 'html'.")
|
| 65 |
+
parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol",
|
| 66 |
+
action='store_false', default=True,
|
| 67 |
+
help="Observe number of first item of ordered lists.")
|
| 68 |
+
parser.add_option("-x", "--extension", action="append", dest="extensions",
|
| 69 |
+
help="Load extension EXTENSION.", metavar="EXTENSION")
|
| 70 |
+
parser.add_option("-c", "--extension_configs",
|
| 71 |
+
dest="configfile", default=None,
|
| 72 |
+
help="Read extension configurations from CONFIG_FILE. "
|
| 73 |
+
"CONFIG_FILE must be of JSON or YAML format. YAML "
|
| 74 |
+
"format requires that a python YAML library be "
|
| 75 |
+
"installed. The parsed JSON or YAML must result in a "
|
| 76 |
+
"python dictionary which would be accepted by the "
|
| 77 |
+
"'extension_configs' keyword on the markdown.Markdown "
|
| 78 |
+
"class. The extensions must also be loaded with the "
|
| 79 |
+
"`--extension` option.",
|
| 80 |
+
metavar="CONFIG_FILE")
|
| 81 |
+
parser.add_option("-q", "--quiet", default=CRITICAL,
|
| 82 |
+
action="store_const", const=CRITICAL+10, dest="verbose",
|
| 83 |
+
help="Suppress all warnings.")
|
| 84 |
+
parser.add_option("-v", "--verbose",
|
| 85 |
+
action="store_const", const=WARNING, dest="verbose",
|
| 86 |
+
help="Print all warnings.")
|
| 87 |
+
parser.add_option("--noisy",
|
| 88 |
+
action="store_const", const=DEBUG, dest="verbose",
|
| 89 |
+
help="Print debug messages.")
|
| 90 |
+
|
| 91 |
+
(options, args) = parser.parse_args(args, values)
|
| 92 |
+
|
| 93 |
+
if len(args) == 0:
|
| 94 |
+
input_file = None
|
| 95 |
+
else:
|
| 96 |
+
input_file = args[0]
|
| 97 |
+
|
| 98 |
+
if not options.extensions:
|
| 99 |
+
options.extensions = []
|
| 100 |
+
|
| 101 |
+
extension_configs = {}
|
| 102 |
+
if options.configfile:
|
| 103 |
+
with codecs.open(
|
| 104 |
+
options.configfile, mode="r", encoding=options.encoding
|
| 105 |
+
) as fp:
|
| 106 |
+
try:
|
| 107 |
+
extension_configs = yaml_load(fp)
|
| 108 |
+
except Exception as e:
|
| 109 |
+
message = "Failed parsing extension config file: %s" % \
|
| 110 |
+
options.configfile
|
| 111 |
+
e.args = (message,) + e.args[1:]
|
| 112 |
+
raise
|
| 113 |
+
|
| 114 |
+
opts = {
|
| 115 |
+
'input': input_file,
|
| 116 |
+
'output': options.filename,
|
| 117 |
+
'extensions': options.extensions,
|
| 118 |
+
'extension_configs': extension_configs,
|
| 119 |
+
'encoding': options.encoding,
|
| 120 |
+
'output_format': options.output_format,
|
| 121 |
+
'lazy_ol': options.lazy_ol
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
return opts, options.verbose
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def run(): # pragma: no cover
|
| 128 |
+
"""Run Markdown from the command line."""
|
| 129 |
+
|
| 130 |
+
# Parse options and adjust logging level if necessary
|
| 131 |
+
options, logging_level = parse_options()
|
| 132 |
+
if not options:
|
| 133 |
+
sys.exit(2)
|
| 134 |
+
logger.setLevel(logging_level)
|
| 135 |
+
console_handler = logging.StreamHandler()
|
| 136 |
+
logger.addHandler(console_handler)
|
| 137 |
+
if logging_level <= WARNING:
|
| 138 |
+
# Ensure deprecation warnings get displayed
|
| 139 |
+
warnings.filterwarnings('default')
|
| 140 |
+
logging.captureWarnings(True)
|
| 141 |
+
warn_logger = logging.getLogger('py.warnings')
|
| 142 |
+
warn_logger.addHandler(console_handler)
|
| 143 |
+
|
| 144 |
+
# Run
|
| 145 |
+
markdown.markdownFromFile(**options)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
if __name__ == '__main__': # pragma: no cover
|
| 149 |
+
# Support running module as a commandline command.
|
| 150 |
+
# `python -m markdown [options] [args]`.
|
| 151 |
+
run()
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/__meta__.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
# __version_info__ format:
|
| 23 |
+
# (major, minor, patch, dev/alpha/beta/rc/final, #)
|
| 24 |
+
# (1, 1, 2, 'dev', 0) => "1.1.2.dev0"
|
| 25 |
+
# (1, 1, 2, 'alpha', 1) => "1.1.2a1"
|
| 26 |
+
# (1, 2, 0, 'beta', 2) => "1.2b2"
|
| 27 |
+
# (1, 2, 0, 'rc', 4) => "1.2rc4"
|
| 28 |
+
# (1, 2, 0, 'final', 0) => "1.2"
|
| 29 |
+
__version_info__ = (3, 3, 7, 'final', 0)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _get_version(version_info):
|
| 33 |
+
" Returns a PEP 440-compliant version number from version_info. "
|
| 34 |
+
assert len(version_info) == 5
|
| 35 |
+
assert version_info[3] in ('dev', 'alpha', 'beta', 'rc', 'final')
|
| 36 |
+
|
| 37 |
+
parts = 2 if version_info[2] == 0 else 3
|
| 38 |
+
v = '.'.join(map(str, version_info[:parts]))
|
| 39 |
+
|
| 40 |
+
if version_info[3] == 'dev':
|
| 41 |
+
v += '.dev' + str(version_info[4])
|
| 42 |
+
elif version_info[3] != 'final':
|
| 43 |
+
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
|
| 44 |
+
v += mapping[version_info[3]] + str(version_info[4])
|
| 45 |
+
|
| 46 |
+
return v
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
__version__ = _get_version(__version_info__)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/blockparser.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import xml.etree.ElementTree as etree
|
| 23 |
+
from . import util
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class State(list):
|
| 27 |
+
""" Track the current and nested state of the parser.
|
| 28 |
+
|
| 29 |
+
This utility class is used to track the state of the BlockParser and
|
| 30 |
+
support multiple levels if nesting. It's just a simple API wrapped around
|
| 31 |
+
a list. Each time a state is set, that state is appended to the end of the
|
| 32 |
+
list. Each time a state is reset, that state is removed from the end of
|
| 33 |
+
the list.
|
| 34 |
+
|
| 35 |
+
Therefore, each time a state is set for a nested block, that state must be
|
| 36 |
+
reset when we back out of that level of nesting or the state could be
|
| 37 |
+
corrupted.
|
| 38 |
+
|
| 39 |
+
While all the methods of a list object are available, only the three
|
| 40 |
+
defined below need be used.
|
| 41 |
+
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def set(self, state):
|
| 45 |
+
""" Set a new state. """
|
| 46 |
+
self.append(state)
|
| 47 |
+
|
| 48 |
+
def reset(self):
|
| 49 |
+
""" Step back one step in nested state. """
|
| 50 |
+
self.pop()
|
| 51 |
+
|
| 52 |
+
def isstate(self, state):
|
| 53 |
+
""" Test that top (current) level is of given state. """
|
| 54 |
+
if len(self):
|
| 55 |
+
return self[-1] == state
|
| 56 |
+
else:
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class BlockParser:
|
| 61 |
+
""" Parse Markdown blocks into an ElementTree object.
|
| 62 |
+
|
| 63 |
+
A wrapper class that stitches the various BlockProcessors together,
|
| 64 |
+
looping through them and creating an ElementTree object.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def __init__(self, md):
|
| 68 |
+
self.blockprocessors = util.Registry()
|
| 69 |
+
self.state = State()
|
| 70 |
+
self.md = md
|
| 71 |
+
|
| 72 |
+
@property
|
| 73 |
+
@util.deprecated("Use 'md' instead.")
|
| 74 |
+
def markdown(self):
|
| 75 |
+
# TODO: remove this later
|
| 76 |
+
return self.md
|
| 77 |
+
|
| 78 |
+
def parseDocument(self, lines):
|
| 79 |
+
""" Parse a markdown document into an ElementTree.
|
| 80 |
+
|
| 81 |
+
Given a list of lines, an ElementTree object (not just a parent
|
| 82 |
+
Element) is created and the root element is passed to the parser
|
| 83 |
+
as the parent. The ElementTree object is returned.
|
| 84 |
+
|
| 85 |
+
This should only be called on an entire document, not pieces.
|
| 86 |
+
|
| 87 |
+
"""
|
| 88 |
+
# Create a ElementTree from the lines
|
| 89 |
+
self.root = etree.Element(self.md.doc_tag)
|
| 90 |
+
self.parseChunk(self.root, '\n'.join(lines))
|
| 91 |
+
return etree.ElementTree(self.root)
|
| 92 |
+
|
| 93 |
+
def parseChunk(self, parent, text):
|
| 94 |
+
""" Parse a chunk of markdown text and attach to given etree node.
|
| 95 |
+
|
| 96 |
+
While the ``text`` argument is generally assumed to contain multiple
|
| 97 |
+
blocks which will be split on blank lines, it could contain only one
|
| 98 |
+
block. Generally, this method would be called by extensions when
|
| 99 |
+
block parsing is required.
|
| 100 |
+
|
| 101 |
+
The ``parent`` etree Element passed in is altered in place.
|
| 102 |
+
Nothing is returned.
|
| 103 |
+
|
| 104 |
+
"""
|
| 105 |
+
self.parseBlocks(parent, text.split('\n\n'))
|
| 106 |
+
|
| 107 |
+
def parseBlocks(self, parent, blocks):
|
| 108 |
+
""" Process blocks of markdown text and attach to given etree node.
|
| 109 |
+
|
| 110 |
+
Given a list of ``blocks``, each blockprocessor is stepped through
|
| 111 |
+
until there are no blocks left. While an extension could potentially
|
| 112 |
+
call this method directly, it's generally expected to be used
|
| 113 |
+
internally.
|
| 114 |
+
|
| 115 |
+
This is a public method as an extension may need to add/alter
|
| 116 |
+
additional BlockProcessors which call this method to recursively
|
| 117 |
+
parse a nested block.
|
| 118 |
+
|
| 119 |
+
"""
|
| 120 |
+
while blocks:
|
| 121 |
+
for processor in self.blockprocessors:
|
| 122 |
+
if processor.test(parent, blocks[0]):
|
| 123 |
+
if processor.run(parent, blocks) is not False:
|
| 124 |
+
# run returns True or None
|
| 125 |
+
break
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/blockprocessors.py
ADDED
|
@@ -0,0 +1,623 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
|
| 21 |
+
CORE MARKDOWN BLOCKPARSER
|
| 22 |
+
===========================================================================
|
| 23 |
+
|
| 24 |
+
This parser handles basic parsing of Markdown blocks. It doesn't concern
|
| 25 |
+
itself with inline elements such as **bold** or *italics*, but rather just
|
| 26 |
+
catches blocks, lists, quotes, etc.
|
| 27 |
+
|
| 28 |
+
The BlockParser is made up of a bunch of BlockProcessors, each handling a
|
| 29 |
+
different type of block. Extensions may add/replace/remove BlockProcessors
|
| 30 |
+
as they need to alter how markdown blocks are parsed.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
import logging
|
| 34 |
+
import re
|
| 35 |
+
import xml.etree.ElementTree as etree
|
| 36 |
+
from . import util
|
| 37 |
+
from .blockparser import BlockParser
|
| 38 |
+
|
| 39 |
+
logger = logging.getLogger('MARKDOWN')
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def build_block_parser(md, **kwargs):
|
| 43 |
+
""" Build the default block parser used by Markdown. """
|
| 44 |
+
parser = BlockParser(md)
|
| 45 |
+
parser.blockprocessors.register(EmptyBlockProcessor(parser), 'empty', 100)
|
| 46 |
+
parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90)
|
| 47 |
+
parser.blockprocessors.register(CodeBlockProcessor(parser), 'code', 80)
|
| 48 |
+
parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 70)
|
| 49 |
+
parser.blockprocessors.register(SetextHeaderProcessor(parser), 'setextheader', 60)
|
| 50 |
+
parser.blockprocessors.register(HRProcessor(parser), 'hr', 50)
|
| 51 |
+
parser.blockprocessors.register(OListProcessor(parser), 'olist', 40)
|
| 52 |
+
parser.blockprocessors.register(UListProcessor(parser), 'ulist', 30)
|
| 53 |
+
parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 20)
|
| 54 |
+
parser.blockprocessors.register(ReferenceProcessor(parser), 'reference', 15)
|
| 55 |
+
parser.blockprocessors.register(ParagraphProcessor(parser), 'paragraph', 10)
|
| 56 |
+
return parser
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class BlockProcessor:
|
| 60 |
+
""" Base class for block processors.
|
| 61 |
+
|
| 62 |
+
Each subclass will provide the methods below to work with the source and
|
| 63 |
+
tree. Each processor will need to define it's own ``test`` and ``run``
|
| 64 |
+
methods. The ``test`` method should return True or False, to indicate
|
| 65 |
+
whether the current block should be processed by this processor. If the
|
| 66 |
+
test passes, the parser will call the processors ``run`` method.
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
def __init__(self, parser):
|
| 71 |
+
self.parser = parser
|
| 72 |
+
self.tab_length = parser.md.tab_length
|
| 73 |
+
|
| 74 |
+
def lastChild(self, parent):
|
| 75 |
+
""" Return the last child of an etree element. """
|
| 76 |
+
if len(parent):
|
| 77 |
+
return parent[-1]
|
| 78 |
+
else:
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
def detab(self, text, length=None):
|
| 82 |
+
""" Remove a tab from the front of each line of the given text. """
|
| 83 |
+
if length is None:
|
| 84 |
+
length = self.tab_length
|
| 85 |
+
newtext = []
|
| 86 |
+
lines = text.split('\n')
|
| 87 |
+
for line in lines:
|
| 88 |
+
if line.startswith(' ' * length):
|
| 89 |
+
newtext.append(line[length:])
|
| 90 |
+
elif not line.strip():
|
| 91 |
+
newtext.append('')
|
| 92 |
+
else:
|
| 93 |
+
break
|
| 94 |
+
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
|
| 95 |
+
|
| 96 |
+
def looseDetab(self, text, level=1):
|
| 97 |
+
""" Remove a tab from front of lines but allowing dedented lines. """
|
| 98 |
+
lines = text.split('\n')
|
| 99 |
+
for i in range(len(lines)):
|
| 100 |
+
if lines[i].startswith(' '*self.tab_length*level):
|
| 101 |
+
lines[i] = lines[i][self.tab_length*level:]
|
| 102 |
+
return '\n'.join(lines)
|
| 103 |
+
|
| 104 |
+
def test(self, parent, block):
|
| 105 |
+
""" Test for block type. Must be overridden by subclasses.
|
| 106 |
+
|
| 107 |
+
As the parser loops through processors, it will call the ``test``
|
| 108 |
+
method on each to determine if the given block of text is of that
|
| 109 |
+
type. This method must return a boolean ``True`` or ``False``. The
|
| 110 |
+
actual method of testing is left to the needs of that particular
|
| 111 |
+
block type. It could be as simple as ``block.startswith(some_string)``
|
| 112 |
+
or a complex regular expression. As the block type may be different
|
| 113 |
+
depending on the parent of the block (i.e. inside a list), the parent
|
| 114 |
+
etree element is also provided and may be used as part of the test.
|
| 115 |
+
|
| 116 |
+
Keywords:
|
| 117 |
+
|
| 118 |
+
* ``parent``: A etree element which will be the parent of the block.
|
| 119 |
+
* ``block``: A block of text from the source which has been split at
|
| 120 |
+
blank lines.
|
| 121 |
+
"""
|
| 122 |
+
pass # pragma: no cover
|
| 123 |
+
|
| 124 |
+
def run(self, parent, blocks):
|
| 125 |
+
""" Run processor. Must be overridden by subclasses.
|
| 126 |
+
|
| 127 |
+
When the parser determines the appropriate type of a block, the parser
|
| 128 |
+
will call the corresponding processor's ``run`` method. This method
|
| 129 |
+
should parse the individual lines of the block and append them to
|
| 130 |
+
the etree.
|
| 131 |
+
|
| 132 |
+
Note that both the ``parent`` and ``etree`` keywords are pointers
|
| 133 |
+
to instances of the objects which should be edited in place. Each
|
| 134 |
+
processor must make changes to the existing objects as there is no
|
| 135 |
+
mechanism to return new/different objects to replace them.
|
| 136 |
+
|
| 137 |
+
This means that this method should be adding SubElements or adding text
|
| 138 |
+
to the parent, and should remove (``pop``) or add (``insert``) items to
|
| 139 |
+
the list of blocks.
|
| 140 |
+
|
| 141 |
+
Keywords:
|
| 142 |
+
|
| 143 |
+
* ``parent``: A etree element which is the parent of the current block.
|
| 144 |
+
* ``blocks``: A list of all remaining blocks of the document.
|
| 145 |
+
"""
|
| 146 |
+
pass # pragma: no cover
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class ListIndentProcessor(BlockProcessor):
|
| 150 |
+
""" Process children of list items.
|
| 151 |
+
|
| 152 |
+
Example:
|
| 153 |
+
* a list item
|
| 154 |
+
process this part
|
| 155 |
+
|
| 156 |
+
or this part
|
| 157 |
+
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
ITEM_TYPES = ['li']
|
| 161 |
+
LIST_TYPES = ['ul', 'ol']
|
| 162 |
+
|
| 163 |
+
def __init__(self, *args):
|
| 164 |
+
super().__init__(*args)
|
| 165 |
+
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length)
|
| 166 |
+
|
| 167 |
+
def test(self, parent, block):
|
| 168 |
+
return block.startswith(' '*self.tab_length) and \
|
| 169 |
+
not self.parser.state.isstate('detabbed') and \
|
| 170 |
+
(parent.tag in self.ITEM_TYPES or
|
| 171 |
+
(len(parent) and parent[-1] is not None and
|
| 172 |
+
(parent[-1].tag in self.LIST_TYPES)))
|
| 173 |
+
|
| 174 |
+
def run(self, parent, blocks):
|
| 175 |
+
block = blocks.pop(0)
|
| 176 |
+
level, sibling = self.get_level(parent, block)
|
| 177 |
+
block = self.looseDetab(block, level)
|
| 178 |
+
|
| 179 |
+
self.parser.state.set('detabbed')
|
| 180 |
+
if parent.tag in self.ITEM_TYPES:
|
| 181 |
+
# It's possible that this parent has a 'ul' or 'ol' child list
|
| 182 |
+
# with a member. If that is the case, then that should be the
|
| 183 |
+
# parent. This is intended to catch the edge case of an indented
|
| 184 |
+
# list whose first member was parsed previous to this point
|
| 185 |
+
# see OListProcessor
|
| 186 |
+
if len(parent) and parent[-1].tag in self.LIST_TYPES:
|
| 187 |
+
self.parser.parseBlocks(parent[-1], [block])
|
| 188 |
+
else:
|
| 189 |
+
# The parent is already a li. Just parse the child block.
|
| 190 |
+
self.parser.parseBlocks(parent, [block])
|
| 191 |
+
elif sibling.tag in self.ITEM_TYPES:
|
| 192 |
+
# The sibling is a li. Use it as parent.
|
| 193 |
+
self.parser.parseBlocks(sibling, [block])
|
| 194 |
+
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
|
| 195 |
+
# The parent is a list (``ol`` or ``ul``) which has children.
|
| 196 |
+
# Assume the last child li is the parent of this block.
|
| 197 |
+
if sibling[-1].text:
|
| 198 |
+
# If the parent li has text, that text needs to be moved to a p
|
| 199 |
+
# The p must be 'inserted' at beginning of list in the event
|
| 200 |
+
# that other children already exist i.e.; a nested sublist.
|
| 201 |
+
p = etree.Element('p')
|
| 202 |
+
p.text = sibling[-1].text
|
| 203 |
+
sibling[-1].text = ''
|
| 204 |
+
sibling[-1].insert(0, p)
|
| 205 |
+
self.parser.parseChunk(sibling[-1], block)
|
| 206 |
+
else:
|
| 207 |
+
self.create_item(sibling, block)
|
| 208 |
+
self.parser.state.reset()
|
| 209 |
+
|
| 210 |
+
def create_item(self, parent, block):
|
| 211 |
+
""" Create a new li and parse the block with it as the parent. """
|
| 212 |
+
li = etree.SubElement(parent, 'li')
|
| 213 |
+
self.parser.parseBlocks(li, [block])
|
| 214 |
+
|
| 215 |
+
def get_level(self, parent, block):
|
| 216 |
+
""" Get level of indent based on list level. """
|
| 217 |
+
# Get indent level
|
| 218 |
+
m = self.INDENT_RE.match(block)
|
| 219 |
+
if m:
|
| 220 |
+
indent_level = len(m.group(1))/self.tab_length
|
| 221 |
+
else:
|
| 222 |
+
indent_level = 0
|
| 223 |
+
if self.parser.state.isstate('list'):
|
| 224 |
+
# We're in a tightlist - so we already are at correct parent.
|
| 225 |
+
level = 1
|
| 226 |
+
else:
|
| 227 |
+
# We're in a looselist - so we need to find parent.
|
| 228 |
+
level = 0
|
| 229 |
+
# Step through children of tree to find matching indent level.
|
| 230 |
+
while indent_level > level:
|
| 231 |
+
child = self.lastChild(parent)
|
| 232 |
+
if (child is not None and
|
| 233 |
+
(child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)):
|
| 234 |
+
if child.tag in self.LIST_TYPES:
|
| 235 |
+
level += 1
|
| 236 |
+
parent = child
|
| 237 |
+
else:
|
| 238 |
+
# No more child levels. If we're short of indent_level,
|
| 239 |
+
# we have a code block. So we stop here.
|
| 240 |
+
break
|
| 241 |
+
return level, parent
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class CodeBlockProcessor(BlockProcessor):
|
| 245 |
+
""" Process code blocks. """
|
| 246 |
+
|
| 247 |
+
def test(self, parent, block):
|
| 248 |
+
return block.startswith(' '*self.tab_length)
|
| 249 |
+
|
| 250 |
+
def run(self, parent, blocks):
|
| 251 |
+
sibling = self.lastChild(parent)
|
| 252 |
+
block = blocks.pop(0)
|
| 253 |
+
theRest = ''
|
| 254 |
+
if (sibling is not None and sibling.tag == "pre" and
|
| 255 |
+
len(sibling) and sibling[0].tag == "code"):
|
| 256 |
+
# The previous block was a code block. As blank lines do not start
|
| 257 |
+
# new code blocks, append this block to the previous, adding back
|
| 258 |
+
# linebreaks removed from the split into a list.
|
| 259 |
+
code = sibling[0]
|
| 260 |
+
block, theRest = self.detab(block)
|
| 261 |
+
code.text = util.AtomicString(
|
| 262 |
+
'{}\n{}\n'.format(code.text, util.code_escape(block.rstrip()))
|
| 263 |
+
)
|
| 264 |
+
else:
|
| 265 |
+
# This is a new codeblock. Create the elements and insert text.
|
| 266 |
+
pre = etree.SubElement(parent, 'pre')
|
| 267 |
+
code = etree.SubElement(pre, 'code')
|
| 268 |
+
block, theRest = self.detab(block)
|
| 269 |
+
code.text = util.AtomicString('%s\n' % util.code_escape(block.rstrip()))
|
| 270 |
+
if theRest:
|
| 271 |
+
# This block contained unindented line(s) after the first indented
|
| 272 |
+
# line. Insert these lines as the first block of the master blocks
|
| 273 |
+
# list for future processing.
|
| 274 |
+
blocks.insert(0, theRest)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class BlockQuoteProcessor(BlockProcessor):
|
| 278 |
+
|
| 279 |
+
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
|
| 280 |
+
|
| 281 |
+
def test(self, parent, block):
|
| 282 |
+
return bool(self.RE.search(block)) and not util.nearing_recursion_limit()
|
| 283 |
+
|
| 284 |
+
def run(self, parent, blocks):
|
| 285 |
+
block = blocks.pop(0)
|
| 286 |
+
m = self.RE.search(block)
|
| 287 |
+
if m:
|
| 288 |
+
before = block[:m.start()] # Lines before blockquote
|
| 289 |
+
# Pass lines before blockquote in recursively for parsing first.
|
| 290 |
+
self.parser.parseBlocks(parent, [before])
|
| 291 |
+
# Remove ``> `` from beginning of each line.
|
| 292 |
+
block = '\n'.join(
|
| 293 |
+
[self.clean(line) for line in block[m.start():].split('\n')]
|
| 294 |
+
)
|
| 295 |
+
sibling = self.lastChild(parent)
|
| 296 |
+
if sibling is not None and sibling.tag == "blockquote":
|
| 297 |
+
# Previous block was a blockquote so set that as this blocks parent
|
| 298 |
+
quote = sibling
|
| 299 |
+
else:
|
| 300 |
+
# This is a new blockquote. Create a new parent element.
|
| 301 |
+
quote = etree.SubElement(parent, 'blockquote')
|
| 302 |
+
# Recursively parse block with blockquote as parent.
|
| 303 |
+
# change parser state so blockquotes embedded in lists use p tags
|
| 304 |
+
self.parser.state.set('blockquote')
|
| 305 |
+
self.parser.parseChunk(quote, block)
|
| 306 |
+
self.parser.state.reset()
|
| 307 |
+
|
| 308 |
+
def clean(self, line):
|
| 309 |
+
""" Remove ``>`` from beginning of a line. """
|
| 310 |
+
m = self.RE.match(line)
|
| 311 |
+
if line.strip() == ">":
|
| 312 |
+
return ""
|
| 313 |
+
elif m:
|
| 314 |
+
return m.group(2)
|
| 315 |
+
else:
|
| 316 |
+
return line
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
class OListProcessor(BlockProcessor):
|
| 320 |
+
""" Process ordered list blocks. """
|
| 321 |
+
|
| 322 |
+
TAG = 'ol'
|
| 323 |
+
# The integer (python string) with which the lists starts (default=1)
|
| 324 |
+
# Eg: If list is initialized as)
|
| 325 |
+
# 3. Item
|
| 326 |
+
# The ol tag will get starts="3" attribute
|
| 327 |
+
STARTSWITH = '1'
|
| 328 |
+
# Lazy ol - ignore startswith
|
| 329 |
+
LAZY_OL = True
|
| 330 |
+
# List of allowed sibling tags.
|
| 331 |
+
SIBLING_TAGS = ['ol', 'ul']
|
| 332 |
+
|
| 333 |
+
def __init__(self, parser):
|
| 334 |
+
super().__init__(parser)
|
| 335 |
+
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
|
| 336 |
+
self.RE = re.compile(r'^[ ]{0,%d}\d+\.[ ]+(.*)' % (self.tab_length - 1))
|
| 337 |
+
# Detect items on secondary lines. they can be of either list type.
|
| 338 |
+
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.)|[*+-])[ ]+(.*)' %
|
| 339 |
+
(self.tab_length - 1))
|
| 340 |
+
# Detect indented (nested) items of either type
|
| 341 |
+
self.INDENT_RE = re.compile(r'^[ ]{%d,%d}((\d+\.)|[*+-])[ ]+.*' %
|
| 342 |
+
(self.tab_length, self.tab_length * 2 - 1))
|
| 343 |
+
|
| 344 |
+
def test(self, parent, block):
|
| 345 |
+
return bool(self.RE.match(block))
|
| 346 |
+
|
| 347 |
+
def run(self, parent, blocks):
|
| 348 |
+
# Check fr multiple items in one block.
|
| 349 |
+
items = self.get_items(blocks.pop(0))
|
| 350 |
+
sibling = self.lastChild(parent)
|
| 351 |
+
|
| 352 |
+
if sibling is not None and sibling.tag in self.SIBLING_TAGS:
|
| 353 |
+
# Previous block was a list item, so set that as parent
|
| 354 |
+
lst = sibling
|
| 355 |
+
# make sure previous item is in a p- if the item has text,
|
| 356 |
+
# then it isn't in a p
|
| 357 |
+
if lst[-1].text:
|
| 358 |
+
# since it's possible there are other children for this
|
| 359 |
+
# sibling, we can't just SubElement the p, we need to
|
| 360 |
+
# insert it as the first item.
|
| 361 |
+
p = etree.Element('p')
|
| 362 |
+
p.text = lst[-1].text
|
| 363 |
+
lst[-1].text = ''
|
| 364 |
+
lst[-1].insert(0, p)
|
| 365 |
+
# if the last item has a tail, then the tail needs to be put in a p
|
| 366 |
+
# likely only when a header is not followed by a blank line
|
| 367 |
+
lch = self.lastChild(lst[-1])
|
| 368 |
+
if lch is not None and lch.tail:
|
| 369 |
+
p = etree.SubElement(lst[-1], 'p')
|
| 370 |
+
p.text = lch.tail.lstrip()
|
| 371 |
+
lch.tail = ''
|
| 372 |
+
|
| 373 |
+
# parse first block differently as it gets wrapped in a p.
|
| 374 |
+
li = etree.SubElement(lst, 'li')
|
| 375 |
+
self.parser.state.set('looselist')
|
| 376 |
+
firstitem = items.pop(0)
|
| 377 |
+
self.parser.parseBlocks(li, [firstitem])
|
| 378 |
+
self.parser.state.reset()
|
| 379 |
+
elif parent.tag in ['ol', 'ul']:
|
| 380 |
+
# this catches the edge case of a multi-item indented list whose
|
| 381 |
+
# first item is in a blank parent-list item:
|
| 382 |
+
# * * subitem1
|
| 383 |
+
# * subitem2
|
| 384 |
+
# see also ListIndentProcessor
|
| 385 |
+
lst = parent
|
| 386 |
+
else:
|
| 387 |
+
# This is a new list so create parent with appropriate tag.
|
| 388 |
+
lst = etree.SubElement(parent, self.TAG)
|
| 389 |
+
# Check if a custom start integer is set
|
| 390 |
+
if not self.LAZY_OL and self.STARTSWITH != '1':
|
| 391 |
+
lst.attrib['start'] = self.STARTSWITH
|
| 392 |
+
|
| 393 |
+
self.parser.state.set('list')
|
| 394 |
+
# Loop through items in block, recursively parsing each with the
|
| 395 |
+
# appropriate parent.
|
| 396 |
+
for item in items:
|
| 397 |
+
if item.startswith(' '*self.tab_length):
|
| 398 |
+
# Item is indented. Parse with last item as parent
|
| 399 |
+
self.parser.parseBlocks(lst[-1], [item])
|
| 400 |
+
else:
|
| 401 |
+
# New item. Create li and parse with it as parent
|
| 402 |
+
li = etree.SubElement(lst, 'li')
|
| 403 |
+
self.parser.parseBlocks(li, [item])
|
| 404 |
+
self.parser.state.reset()
|
| 405 |
+
|
| 406 |
+
def get_items(self, block):
|
| 407 |
+
""" Break a block into list items. """
|
| 408 |
+
items = []
|
| 409 |
+
for line in block.split('\n'):
|
| 410 |
+
m = self.CHILD_RE.match(line)
|
| 411 |
+
if m:
|
| 412 |
+
# This is a new list item
|
| 413 |
+
# Check first item for the start index
|
| 414 |
+
if not items and self.TAG == 'ol':
|
| 415 |
+
# Detect the integer value of first list item
|
| 416 |
+
INTEGER_RE = re.compile(r'(\d+)')
|
| 417 |
+
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
|
| 418 |
+
# Append to the list
|
| 419 |
+
items.append(m.group(3))
|
| 420 |
+
elif self.INDENT_RE.match(line):
|
| 421 |
+
# This is an indented (possibly nested) item.
|
| 422 |
+
if items[-1].startswith(' '*self.tab_length):
|
| 423 |
+
# Previous item was indented. Append to that item.
|
| 424 |
+
items[-1] = '{}\n{}'.format(items[-1], line)
|
| 425 |
+
else:
|
| 426 |
+
items.append(line)
|
| 427 |
+
else:
|
| 428 |
+
# This is another line of previous item. Append to that item.
|
| 429 |
+
items[-1] = '{}\n{}'.format(items[-1], line)
|
| 430 |
+
return items
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
class UListProcessor(OListProcessor):
|
| 434 |
+
""" Process unordered list blocks. """
|
| 435 |
+
|
| 436 |
+
TAG = 'ul'
|
| 437 |
+
|
| 438 |
+
def __init__(self, parser):
|
| 439 |
+
super().__init__(parser)
|
| 440 |
+
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
|
| 441 |
+
self.RE = re.compile(r'^[ ]{0,%d}[*+-][ ]+(.*)' % (self.tab_length - 1))
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
class HashHeaderProcessor(BlockProcessor):
|
| 445 |
+
""" Process Hash Headers. """
|
| 446 |
+
|
| 447 |
+
# Detect a header at start of any line in block
|
| 448 |
+
RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
|
| 449 |
+
|
| 450 |
+
def test(self, parent, block):
|
| 451 |
+
return bool(self.RE.search(block))
|
| 452 |
+
|
| 453 |
+
def run(self, parent, blocks):
|
| 454 |
+
block = blocks.pop(0)
|
| 455 |
+
m = self.RE.search(block)
|
| 456 |
+
if m:
|
| 457 |
+
before = block[:m.start()] # All lines before header
|
| 458 |
+
after = block[m.end():] # All lines after header
|
| 459 |
+
if before:
|
| 460 |
+
# As the header was not the first line of the block and the
|
| 461 |
+
# lines before the header must be parsed first,
|
| 462 |
+
# recursively parse this lines as a block.
|
| 463 |
+
self.parser.parseBlocks(parent, [before])
|
| 464 |
+
# Create header using named groups from RE
|
| 465 |
+
h = etree.SubElement(parent, 'h%d' % len(m.group('level')))
|
| 466 |
+
h.text = m.group('header').strip()
|
| 467 |
+
if after:
|
| 468 |
+
# Insert remaining lines as first block for future parsing.
|
| 469 |
+
blocks.insert(0, after)
|
| 470 |
+
else: # pragma: no cover
|
| 471 |
+
# This should never happen, but just in case...
|
| 472 |
+
logger.warn("We've got a problem header: %r" % block)
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
class SetextHeaderProcessor(BlockProcessor):
|
| 476 |
+
""" Process Setext-style Headers. """
|
| 477 |
+
|
| 478 |
+
# Detect Setext-style header. Must be first 2 lines of block.
|
| 479 |
+
RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE)
|
| 480 |
+
|
| 481 |
+
def test(self, parent, block):
|
| 482 |
+
return bool(self.RE.match(block))
|
| 483 |
+
|
| 484 |
+
def run(self, parent, blocks):
|
| 485 |
+
lines = blocks.pop(0).split('\n')
|
| 486 |
+
# Determine level. ``=`` is 1 and ``-`` is 2.
|
| 487 |
+
if lines[1].startswith('='):
|
| 488 |
+
level = 1
|
| 489 |
+
else:
|
| 490 |
+
level = 2
|
| 491 |
+
h = etree.SubElement(parent, 'h%d' % level)
|
| 492 |
+
h.text = lines[0].strip()
|
| 493 |
+
if len(lines) > 2:
|
| 494 |
+
# Block contains additional lines. Add to master blocks for later.
|
| 495 |
+
blocks.insert(0, '\n'.join(lines[2:]))
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
class HRProcessor(BlockProcessor):
|
| 499 |
+
""" Process Horizontal Rules. """
|
| 500 |
+
|
| 501 |
+
# Python's re module doesn't officially support atomic grouping. However you can fake it.
|
| 502 |
+
# See https://stackoverflow.com/a/13577411/866026
|
| 503 |
+
RE = r'^[ ]{0,3}(?=(?P<atomicgroup>(-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,}))(?P=atomicgroup)[ ]*$'
|
| 504 |
+
# Detect hr on any line of a block.
|
| 505 |
+
SEARCH_RE = re.compile(RE, re.MULTILINE)
|
| 506 |
+
|
| 507 |
+
def test(self, parent, block):
|
| 508 |
+
m = self.SEARCH_RE.search(block)
|
| 509 |
+
if m:
|
| 510 |
+
# Save match object on class instance so we can use it later.
|
| 511 |
+
self.match = m
|
| 512 |
+
return True
|
| 513 |
+
return False
|
| 514 |
+
|
| 515 |
+
def run(self, parent, blocks):
|
| 516 |
+
block = blocks.pop(0)
|
| 517 |
+
match = self.match
|
| 518 |
+
# Check for lines in block before hr.
|
| 519 |
+
prelines = block[:match.start()].rstrip('\n')
|
| 520 |
+
if prelines:
|
| 521 |
+
# Recursively parse lines before hr so they get parsed first.
|
| 522 |
+
self.parser.parseBlocks(parent, [prelines])
|
| 523 |
+
# create hr
|
| 524 |
+
etree.SubElement(parent, 'hr')
|
| 525 |
+
# check for lines in block after hr.
|
| 526 |
+
postlines = block[match.end():].lstrip('\n')
|
| 527 |
+
if postlines:
|
| 528 |
+
# Add lines after hr to master blocks for later parsing.
|
| 529 |
+
blocks.insert(0, postlines)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
class EmptyBlockProcessor(BlockProcessor):
|
| 533 |
+
""" Process blocks that are empty or start with an empty line. """
|
| 534 |
+
|
| 535 |
+
def test(self, parent, block):
|
| 536 |
+
return not block or block.startswith('\n')
|
| 537 |
+
|
| 538 |
+
def run(self, parent, blocks):
|
| 539 |
+
block = blocks.pop(0)
|
| 540 |
+
filler = '\n\n'
|
| 541 |
+
if block:
|
| 542 |
+
# Starts with empty line
|
| 543 |
+
# Only replace a single line.
|
| 544 |
+
filler = '\n'
|
| 545 |
+
# Save the rest for later.
|
| 546 |
+
theRest = block[1:]
|
| 547 |
+
if theRest:
|
| 548 |
+
# Add remaining lines to master blocks for later.
|
| 549 |
+
blocks.insert(0, theRest)
|
| 550 |
+
sibling = self.lastChild(parent)
|
| 551 |
+
if (sibling is not None and sibling.tag == 'pre' and
|
| 552 |
+
len(sibling) and sibling[0].tag == 'code'):
|
| 553 |
+
# Last block is a codeblock. Append to preserve whitespace.
|
| 554 |
+
sibling[0].text = util.AtomicString(
|
| 555 |
+
'{}{}'.format(sibling[0].text, filler)
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
class ReferenceProcessor(BlockProcessor):
|
| 560 |
+
""" Process link references. """
|
| 561 |
+
RE = re.compile(
|
| 562 |
+
r'^[ ]{0,3}\[([^\[\]]*)\]:[ ]*\n?[ ]*([^\s]+)[ ]*(?:\n[ ]*)?((["\'])(.*)\4[ ]*|\((.*)\)[ ]*)?$', re.MULTILINE
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
def test(self, parent, block):
|
| 566 |
+
return True
|
| 567 |
+
|
| 568 |
+
def run(self, parent, blocks):
|
| 569 |
+
block = blocks.pop(0)
|
| 570 |
+
m = self.RE.search(block)
|
| 571 |
+
if m:
|
| 572 |
+
id = m.group(1).strip().lower()
|
| 573 |
+
link = m.group(2).lstrip('<').rstrip('>')
|
| 574 |
+
title = m.group(5) or m.group(6)
|
| 575 |
+
self.parser.md.references[id] = (link, title)
|
| 576 |
+
if block[m.end():].strip():
|
| 577 |
+
# Add any content after match back to blocks as separate block
|
| 578 |
+
blocks.insert(0, block[m.end():].lstrip('\n'))
|
| 579 |
+
if block[:m.start()].strip():
|
| 580 |
+
# Add any content before match back to blocks as separate block
|
| 581 |
+
blocks.insert(0, block[:m.start()].rstrip('\n'))
|
| 582 |
+
return True
|
| 583 |
+
# No match. Restore block.
|
| 584 |
+
blocks.insert(0, block)
|
| 585 |
+
return False
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
class ParagraphProcessor(BlockProcessor):
|
| 589 |
+
""" Process Paragraph blocks. """
|
| 590 |
+
|
| 591 |
+
def test(self, parent, block):
|
| 592 |
+
return True
|
| 593 |
+
|
| 594 |
+
def run(self, parent, blocks):
|
| 595 |
+
block = blocks.pop(0)
|
| 596 |
+
if block.strip():
|
| 597 |
+
# Not a blank block. Add to parent, otherwise throw it away.
|
| 598 |
+
if self.parser.state.isstate('list'):
|
| 599 |
+
# The parent is a tight-list.
|
| 600 |
+
#
|
| 601 |
+
# Check for any children. This will likely only happen in a
|
| 602 |
+
# tight-list when a header isn't followed by a blank line.
|
| 603 |
+
# For example:
|
| 604 |
+
#
|
| 605 |
+
# * # Header
|
| 606 |
+
# Line 2 of list item - not part of header.
|
| 607 |
+
sibling = self.lastChild(parent)
|
| 608 |
+
if sibling is not None:
|
| 609 |
+
# Insetrt after sibling.
|
| 610 |
+
if sibling.tail:
|
| 611 |
+
sibling.tail = '{}\n{}'.format(sibling.tail, block)
|
| 612 |
+
else:
|
| 613 |
+
sibling.tail = '\n%s' % block
|
| 614 |
+
else:
|
| 615 |
+
# Append to parent.text
|
| 616 |
+
if parent.text:
|
| 617 |
+
parent.text = '{}\n{}'.format(parent.text, block)
|
| 618 |
+
else:
|
| 619 |
+
parent.text = block.lstrip()
|
| 620 |
+
else:
|
| 621 |
+
# Create a regular paragraph
|
| 622 |
+
p = etree.SubElement(parent, 'p')
|
| 623 |
+
p.text = block.lstrip()
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/core.py
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import codecs
|
| 23 |
+
import sys
|
| 24 |
+
import logging
|
| 25 |
+
import importlib
|
| 26 |
+
from . import util
|
| 27 |
+
from .preprocessors import build_preprocessors
|
| 28 |
+
from .blockprocessors import build_block_parser
|
| 29 |
+
from .treeprocessors import build_treeprocessors
|
| 30 |
+
from .inlinepatterns import build_inlinepatterns
|
| 31 |
+
from .postprocessors import build_postprocessors
|
| 32 |
+
from .extensions import Extension
|
| 33 |
+
from .serializers import to_html_string, to_xhtml_string
|
| 34 |
+
|
| 35 |
+
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
logger = logging.getLogger('MARKDOWN')
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Markdown:
|
| 42 |
+
"""Convert Markdown to HTML."""
|
| 43 |
+
|
| 44 |
+
doc_tag = "div" # Element used to wrap document - later removed
|
| 45 |
+
|
| 46 |
+
output_formats = {
|
| 47 |
+
'html': to_html_string,
|
| 48 |
+
'xhtml': to_xhtml_string,
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
def __init__(self, **kwargs):
|
| 52 |
+
"""
|
| 53 |
+
Creates a new Markdown instance.
|
| 54 |
+
|
| 55 |
+
Keyword arguments:
|
| 56 |
+
|
| 57 |
+
* extensions: A list of extensions.
|
| 58 |
+
If an item is an instance of a subclass of `markdown.extension.Extension`, the instance will be used
|
| 59 |
+
as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is
|
| 60 |
+
assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If
|
| 61 |
+
no class is specified, then a `makeExtension` function is called within the specified module.
|
| 62 |
+
* extension_configs: Configuration settings for extensions.
|
| 63 |
+
* output_format: Format of output. Supported formats are:
|
| 64 |
+
* "xhtml": Outputs XHTML style tags. Default.
|
| 65 |
+
* "html": Outputs HTML style tags.
|
| 66 |
+
* tab_length: Length of tabs in the source. Default: 4
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
self.tab_length = kwargs.get('tab_length', 4)
|
| 71 |
+
|
| 72 |
+
self.ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
|
| 73 |
+
'(', ')', '>', '#', '+', '-', '.', '!']
|
| 74 |
+
|
| 75 |
+
self.block_level_elements = [
|
| 76 |
+
# Elements which are invalid to wrap in a `<p>` tag.
|
| 77 |
+
# See https://w3c.github.io/html/grouping-content.html#the-p-element
|
| 78 |
+
'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl',
|
| 79 |
+
'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3',
|
| 80 |
+
'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol',
|
| 81 |
+
'p', 'pre', 'section', 'table', 'ul',
|
| 82 |
+
# Other elements which Markdown should not be mucking up the contents of.
|
| 83 |
+
'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend',
|
| 84 |
+
'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script',
|
| 85 |
+
'style', 'summary', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video'
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
self.registeredExtensions = []
|
| 89 |
+
self.docType = ""
|
| 90 |
+
self.stripTopLevelTags = True
|
| 91 |
+
|
| 92 |
+
self.build_parser()
|
| 93 |
+
|
| 94 |
+
self.references = {}
|
| 95 |
+
self.htmlStash = util.HtmlStash()
|
| 96 |
+
self.registerExtensions(extensions=kwargs.get('extensions', []),
|
| 97 |
+
configs=kwargs.get('extension_configs', {}))
|
| 98 |
+
self.set_output_format(kwargs.get('output_format', 'xhtml'))
|
| 99 |
+
self.reset()
|
| 100 |
+
|
| 101 |
+
def build_parser(self):
|
| 102 |
+
""" Build the parser from the various parts. """
|
| 103 |
+
self.preprocessors = build_preprocessors(self)
|
| 104 |
+
self.parser = build_block_parser(self)
|
| 105 |
+
self.inlinePatterns = build_inlinepatterns(self)
|
| 106 |
+
self.treeprocessors = build_treeprocessors(self)
|
| 107 |
+
self.postprocessors = build_postprocessors(self)
|
| 108 |
+
return self
|
| 109 |
+
|
| 110 |
+
def registerExtensions(self, extensions, configs):
|
| 111 |
+
"""
|
| 112 |
+
Register extensions with this instance of Markdown.
|
| 113 |
+
|
| 114 |
+
Keyword arguments:
|
| 115 |
+
|
| 116 |
+
* extensions: A list of extensions, which can either
|
| 117 |
+
be strings or objects.
|
| 118 |
+
* configs: A dictionary mapping extension names to config options.
|
| 119 |
+
|
| 120 |
+
"""
|
| 121 |
+
for ext in extensions:
|
| 122 |
+
if isinstance(ext, str):
|
| 123 |
+
ext = self.build_extension(ext, configs.get(ext, {}))
|
| 124 |
+
if isinstance(ext, Extension):
|
| 125 |
+
ext._extendMarkdown(self)
|
| 126 |
+
logger.debug(
|
| 127 |
+
'Successfully loaded extension "%s.%s".'
|
| 128 |
+
% (ext.__class__.__module__, ext.__class__.__name__)
|
| 129 |
+
)
|
| 130 |
+
elif ext is not None:
|
| 131 |
+
raise TypeError(
|
| 132 |
+
'Extension "{}.{}" must be of type: "{}.{}"'.format(
|
| 133 |
+
ext.__class__.__module__, ext.__class__.__name__,
|
| 134 |
+
Extension.__module__, Extension.__name__
|
| 135 |
+
)
|
| 136 |
+
)
|
| 137 |
+
return self
|
| 138 |
+
|
| 139 |
+
def build_extension(self, ext_name, configs):
|
| 140 |
+
"""
|
| 141 |
+
Build extension from a string name, then return an instance.
|
| 142 |
+
|
| 143 |
+
First attempt to load an entry point. The string name must be registered as an entry point in the
|
| 144 |
+
`markdown.extensions` group which points to a subclass of the `markdown.extensions.Extension` class.
|
| 145 |
+
If multiple distributions have registered the same name, the first one found is returned.
|
| 146 |
+
|
| 147 |
+
If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and
|
| 148 |
+
return an instance. If no class is specified, import the module and call a `makeExtension` function and return
|
| 149 |
+
the Extension instance returned by that function.
|
| 150 |
+
"""
|
| 151 |
+
configs = dict(configs)
|
| 152 |
+
|
| 153 |
+
entry_points = [ep for ep in util.INSTALLED_EXTENSIONS if ep.name == ext_name]
|
| 154 |
+
if entry_points:
|
| 155 |
+
ext = entry_points[0].load()
|
| 156 |
+
return ext(**configs)
|
| 157 |
+
|
| 158 |
+
# Get class name (if provided): `path.to.module:ClassName`
|
| 159 |
+
ext_name, class_name = ext_name.split(':', 1) if ':' in ext_name else (ext_name, '')
|
| 160 |
+
|
| 161 |
+
try:
|
| 162 |
+
module = importlib.import_module(ext_name)
|
| 163 |
+
logger.debug(
|
| 164 |
+
'Successfully imported extension module "%s".' % ext_name
|
| 165 |
+
)
|
| 166 |
+
except ImportError as e:
|
| 167 |
+
message = 'Failed loading extension "%s".' % ext_name
|
| 168 |
+
e.args = (message,) + e.args[1:]
|
| 169 |
+
raise
|
| 170 |
+
|
| 171 |
+
if class_name:
|
| 172 |
+
# Load given class name from module.
|
| 173 |
+
return getattr(module, class_name)(**configs)
|
| 174 |
+
else:
|
| 175 |
+
# Expect makeExtension() function to return a class.
|
| 176 |
+
try:
|
| 177 |
+
return module.makeExtension(**configs)
|
| 178 |
+
except AttributeError as e:
|
| 179 |
+
message = e.args[0]
|
| 180 |
+
message = "Failed to initiate extension " \
|
| 181 |
+
"'%s': %s" % (ext_name, message)
|
| 182 |
+
e.args = (message,) + e.args[1:]
|
| 183 |
+
raise
|
| 184 |
+
|
| 185 |
+
def registerExtension(self, extension):
|
| 186 |
+
""" This gets called by the extension """
|
| 187 |
+
self.registeredExtensions.append(extension)
|
| 188 |
+
return self
|
| 189 |
+
|
| 190 |
+
def reset(self):
|
| 191 |
+
"""
|
| 192 |
+
Resets all state variables so that we can start with a new text.
|
| 193 |
+
"""
|
| 194 |
+
self.htmlStash.reset()
|
| 195 |
+
self.references.clear()
|
| 196 |
+
|
| 197 |
+
for extension in self.registeredExtensions:
|
| 198 |
+
if hasattr(extension, 'reset'):
|
| 199 |
+
extension.reset()
|
| 200 |
+
|
| 201 |
+
return self
|
| 202 |
+
|
| 203 |
+
def set_output_format(self, format):
|
| 204 |
+
""" Set the output format for the class instance. """
|
| 205 |
+
self.output_format = format.lower().rstrip('145') # ignore num
|
| 206 |
+
try:
|
| 207 |
+
self.serializer = self.output_formats[self.output_format]
|
| 208 |
+
except KeyError as e:
|
| 209 |
+
valid_formats = list(self.output_formats.keys())
|
| 210 |
+
valid_formats.sort()
|
| 211 |
+
message = 'Invalid Output Format: "%s". Use one of %s.' \
|
| 212 |
+
% (self.output_format,
|
| 213 |
+
'"' + '", "'.join(valid_formats) + '"')
|
| 214 |
+
e.args = (message,) + e.args[1:]
|
| 215 |
+
raise
|
| 216 |
+
return self
|
| 217 |
+
|
| 218 |
+
def is_block_level(self, tag):
|
| 219 |
+
"""Check if the tag is a block level HTML tag."""
|
| 220 |
+
if isinstance(tag, str):
|
| 221 |
+
return tag.lower().rstrip('/') in self.block_level_elements
|
| 222 |
+
# Some ElementTree tags are not strings, so return False.
|
| 223 |
+
return False
|
| 224 |
+
|
| 225 |
+
def convert(self, source):
|
| 226 |
+
"""
|
| 227 |
+
Convert markdown to serialized XHTML or HTML.
|
| 228 |
+
|
| 229 |
+
Keyword arguments:
|
| 230 |
+
|
| 231 |
+
* source: Source text as a Unicode string.
|
| 232 |
+
|
| 233 |
+
Markdown processing takes place in five steps:
|
| 234 |
+
|
| 235 |
+
1. A bunch of "preprocessors" munge the input text.
|
| 236 |
+
2. BlockParser() parses the high-level structural elements of the
|
| 237 |
+
pre-processed text into an ElementTree.
|
| 238 |
+
3. A bunch of "treeprocessors" are run against the ElementTree. One
|
| 239 |
+
such treeprocessor runs InlinePatterns against the ElementTree,
|
| 240 |
+
detecting inline markup.
|
| 241 |
+
4. Some post-processors are run against the text after the ElementTree
|
| 242 |
+
has been serialized into text.
|
| 243 |
+
5. The output is written to a string.
|
| 244 |
+
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
# Fixup the source text
|
| 248 |
+
if not source.strip():
|
| 249 |
+
return '' # a blank unicode string
|
| 250 |
+
|
| 251 |
+
try:
|
| 252 |
+
source = str(source)
|
| 253 |
+
except UnicodeDecodeError as e: # pragma: no cover
|
| 254 |
+
# Customise error message while maintaining original trackback
|
| 255 |
+
e.reason += '. -- Note: Markdown only accepts unicode input!'
|
| 256 |
+
raise
|
| 257 |
+
|
| 258 |
+
# Split into lines and run the line preprocessors.
|
| 259 |
+
self.lines = source.split("\n")
|
| 260 |
+
for prep in self.preprocessors:
|
| 261 |
+
self.lines = prep.run(self.lines)
|
| 262 |
+
|
| 263 |
+
# Parse the high-level elements.
|
| 264 |
+
root = self.parser.parseDocument(self.lines).getroot()
|
| 265 |
+
|
| 266 |
+
# Run the tree-processors
|
| 267 |
+
for treeprocessor in self.treeprocessors:
|
| 268 |
+
newRoot = treeprocessor.run(root)
|
| 269 |
+
if newRoot is not None:
|
| 270 |
+
root = newRoot
|
| 271 |
+
|
| 272 |
+
# Serialize _properly_. Strip top-level tags.
|
| 273 |
+
output = self.serializer(root)
|
| 274 |
+
if self.stripTopLevelTags:
|
| 275 |
+
try:
|
| 276 |
+
start = output.index(
|
| 277 |
+
'<%s>' % self.doc_tag) + len(self.doc_tag) + 2
|
| 278 |
+
end = output.rindex('</%s>' % self.doc_tag)
|
| 279 |
+
output = output[start:end].strip()
|
| 280 |
+
except ValueError as e: # pragma: no cover
|
| 281 |
+
if output.strip().endswith('<%s />' % self.doc_tag):
|
| 282 |
+
# We have an empty document
|
| 283 |
+
output = ''
|
| 284 |
+
else:
|
| 285 |
+
# We have a serious problem
|
| 286 |
+
raise ValueError('Markdown failed to strip top-level '
|
| 287 |
+
'tags. Document=%r' % output.strip()) from e
|
| 288 |
+
|
| 289 |
+
# Run the text post-processors
|
| 290 |
+
for pp in self.postprocessors:
|
| 291 |
+
output = pp.run(output)
|
| 292 |
+
|
| 293 |
+
return output.strip()
|
| 294 |
+
|
| 295 |
+
def convertFile(self, input=None, output=None, encoding=None):
|
| 296 |
+
"""Converts a markdown file and returns the HTML as a unicode string.
|
| 297 |
+
|
| 298 |
+
Decodes the file using the provided encoding (defaults to utf-8),
|
| 299 |
+
passes the file content to markdown, and outputs the html to either
|
| 300 |
+
the provided stream or the file with provided name, using the same
|
| 301 |
+
encoding as the source file. The 'xmlcharrefreplace' error handler is
|
| 302 |
+
used when encoding the output.
|
| 303 |
+
|
| 304 |
+
**Note:** This is the only place that decoding and encoding of unicode
|
| 305 |
+
takes place in Python-Markdown. (All other code is unicode-in /
|
| 306 |
+
unicode-out.)
|
| 307 |
+
|
| 308 |
+
Keyword arguments:
|
| 309 |
+
|
| 310 |
+
* input: File object or path. Reads from stdin if `None`.
|
| 311 |
+
* output: File object or path. Writes to stdout if `None`.
|
| 312 |
+
* encoding: Encoding of input and output files. Defaults to utf-8.
|
| 313 |
+
|
| 314 |
+
"""
|
| 315 |
+
|
| 316 |
+
encoding = encoding or "utf-8"
|
| 317 |
+
|
| 318 |
+
# Read the source
|
| 319 |
+
if input:
|
| 320 |
+
if isinstance(input, str):
|
| 321 |
+
input_file = codecs.open(input, mode="r", encoding=encoding)
|
| 322 |
+
else:
|
| 323 |
+
input_file = codecs.getreader(encoding)(input)
|
| 324 |
+
text = input_file.read()
|
| 325 |
+
input_file.close()
|
| 326 |
+
else:
|
| 327 |
+
text = sys.stdin.read()
|
| 328 |
+
if not isinstance(text, str): # pragma: no cover
|
| 329 |
+
text = text.decode(encoding)
|
| 330 |
+
|
| 331 |
+
text = text.lstrip('\ufeff') # remove the byte-order mark
|
| 332 |
+
|
| 333 |
+
# Convert
|
| 334 |
+
html = self.convert(text)
|
| 335 |
+
|
| 336 |
+
# Write to file or stdout
|
| 337 |
+
if output:
|
| 338 |
+
if isinstance(output, str):
|
| 339 |
+
output_file = codecs.open(output, "w",
|
| 340 |
+
encoding=encoding,
|
| 341 |
+
errors="xmlcharrefreplace")
|
| 342 |
+
output_file.write(html)
|
| 343 |
+
output_file.close()
|
| 344 |
+
else:
|
| 345 |
+
writer = codecs.getwriter(encoding)
|
| 346 |
+
output_file = writer(output, errors="xmlcharrefreplace")
|
| 347 |
+
output_file.write(html)
|
| 348 |
+
# Don't close here. User may want to write more.
|
| 349 |
+
else:
|
| 350 |
+
# Encode manually and write bytes to stdout.
|
| 351 |
+
html = html.encode(encoding, "xmlcharrefreplace")
|
| 352 |
+
try:
|
| 353 |
+
# Write bytes directly to buffer (Python 3).
|
| 354 |
+
sys.stdout.buffer.write(html)
|
| 355 |
+
except AttributeError: # pragma: no cover
|
| 356 |
+
# Probably Python 2, which works with bytes by default.
|
| 357 |
+
sys.stdout.write(html)
|
| 358 |
+
|
| 359 |
+
return self
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
"""
|
| 363 |
+
EXPORTED FUNCTIONS
|
| 364 |
+
=============================================================================
|
| 365 |
+
|
| 366 |
+
Those are the two functions we really mean to export: markdown() and
|
| 367 |
+
markdownFromFile().
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def markdown(text, **kwargs):
|
| 372 |
+
"""Convert a markdown string to HTML and return HTML as a unicode string.
|
| 373 |
+
|
| 374 |
+
This is a shortcut function for `Markdown` class to cover the most
|
| 375 |
+
basic use case. It initializes an instance of Markdown, loads the
|
| 376 |
+
necessary extensions and runs the parser on the given text.
|
| 377 |
+
|
| 378 |
+
Keyword arguments:
|
| 379 |
+
|
| 380 |
+
* text: Markdown formatted text as Unicode or ASCII string.
|
| 381 |
+
* Any arguments accepted by the Markdown class.
|
| 382 |
+
|
| 383 |
+
Returns: An HTML document as a string.
|
| 384 |
+
|
| 385 |
+
"""
|
| 386 |
+
md = Markdown(**kwargs)
|
| 387 |
+
return md.convert(text)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def markdownFromFile(**kwargs):
|
| 391 |
+
"""Read markdown code from a file and write it to a file or a stream.
|
| 392 |
+
|
| 393 |
+
This is a shortcut function which initializes an instance of Markdown,
|
| 394 |
+
and calls the convertFile method rather than convert.
|
| 395 |
+
|
| 396 |
+
Keyword arguments:
|
| 397 |
+
|
| 398 |
+
* input: a file name or readable object.
|
| 399 |
+
* output: a file name or writable object.
|
| 400 |
+
* encoding: Encoding of input and output.
|
| 401 |
+
* Any arguments accepted by the Markdown class.
|
| 402 |
+
|
| 403 |
+
"""
|
| 404 |
+
md = Markdown(**kwargs)
|
| 405 |
+
md.convertFile(kwargs.get('input', None),
|
| 406 |
+
kwargs.get('output', None),
|
| 407 |
+
kwargs.get('encoding', None))
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/inlinepatterns.py
ADDED
|
@@ -0,0 +1,892 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
|
| 21 |
+
INLINE PATTERNS
|
| 22 |
+
=============================================================================
|
| 23 |
+
|
| 24 |
+
Inline patterns such as *emphasis* are handled by means of auxiliary
|
| 25 |
+
objects, one per pattern. Pattern objects must be instances of classes
|
| 26 |
+
that extend markdown.Pattern. Each pattern object uses a single regular
|
| 27 |
+
expression and needs support the following methods:
|
| 28 |
+
|
| 29 |
+
pattern.getCompiledRegExp() # returns a regular expression
|
| 30 |
+
|
| 31 |
+
pattern.handleMatch(m) # takes a match object and returns
|
| 32 |
+
# an ElementTree element or just plain text
|
| 33 |
+
|
| 34 |
+
All of python markdown's built-in patterns subclass from Pattern,
|
| 35 |
+
but you can add additional patterns that don't.
|
| 36 |
+
|
| 37 |
+
Also note that all the regular expressions used by inline must
|
| 38 |
+
capture the whole block. For this reason, they all start with
|
| 39 |
+
'^(.*)' and end with '(.*)!'. In case with built-in expression
|
| 40 |
+
Pattern takes care of adding the "^(.*)" and "(.*)!".
|
| 41 |
+
|
| 42 |
+
Finally, the order in which regular expressions are applied is very
|
| 43 |
+
important - e.g. if we first replace http://.../ links with <a> tags
|
| 44 |
+
and _then_ try to replace inline html, we would end up with a mess.
|
| 45 |
+
So, we apply the expressions in the following order:
|
| 46 |
+
|
| 47 |
+
* escape and backticks have to go before everything else, so
|
| 48 |
+
that we can preempt any markdown patterns by escaping them.
|
| 49 |
+
|
| 50 |
+
* then we handle auto-links (must be done before inline html)
|
| 51 |
+
|
| 52 |
+
* then we handle inline HTML. At this point we will simply
|
| 53 |
+
replace all inline HTML strings with a placeholder and add
|
| 54 |
+
the actual HTML to a hash.
|
| 55 |
+
|
| 56 |
+
* then inline images (must be done before links)
|
| 57 |
+
|
| 58 |
+
* then bracketed links, first regular then reference-style
|
| 59 |
+
|
| 60 |
+
* finally we apply strong and emphasis
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
from . import util
|
| 64 |
+
from collections import namedtuple
|
| 65 |
+
import re
|
| 66 |
+
import xml.etree.ElementTree as etree
|
| 67 |
+
try: # pragma: no cover
|
| 68 |
+
from html import entities
|
| 69 |
+
except ImportError: # pragma: no cover
|
| 70 |
+
import htmlentitydefs as entities
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def build_inlinepatterns(md, **kwargs):
|
| 74 |
+
""" Build the default set of inline patterns for Markdown. """
|
| 75 |
+
inlinePatterns = util.Registry()
|
| 76 |
+
inlinePatterns.register(BacktickInlineProcessor(BACKTICK_RE), 'backtick', 190)
|
| 77 |
+
inlinePatterns.register(EscapeInlineProcessor(ESCAPE_RE, md), 'escape', 180)
|
| 78 |
+
inlinePatterns.register(ReferenceInlineProcessor(REFERENCE_RE, md), 'reference', 170)
|
| 79 |
+
inlinePatterns.register(LinkInlineProcessor(LINK_RE, md), 'link', 160)
|
| 80 |
+
inlinePatterns.register(ImageInlineProcessor(IMAGE_LINK_RE, md), 'image_link', 150)
|
| 81 |
+
inlinePatterns.register(
|
| 82 |
+
ImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'image_reference', 140
|
| 83 |
+
)
|
| 84 |
+
inlinePatterns.register(
|
| 85 |
+
ShortReferenceInlineProcessor(REFERENCE_RE, md), 'short_reference', 130
|
| 86 |
+
)
|
| 87 |
+
inlinePatterns.register(
|
| 88 |
+
ShortImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'short_image_ref', 125
|
| 89 |
+
)
|
| 90 |
+
inlinePatterns.register(AutolinkInlineProcessor(AUTOLINK_RE, md), 'autolink', 120)
|
| 91 |
+
inlinePatterns.register(AutomailInlineProcessor(AUTOMAIL_RE, md), 'automail', 110)
|
| 92 |
+
inlinePatterns.register(SubstituteTagInlineProcessor(LINE_BREAK_RE, 'br'), 'linebreak', 100)
|
| 93 |
+
inlinePatterns.register(HtmlInlineProcessor(HTML_RE, md), 'html', 90)
|
| 94 |
+
inlinePatterns.register(HtmlInlineProcessor(ENTITY_RE, md), 'entity', 80)
|
| 95 |
+
inlinePatterns.register(SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 70)
|
| 96 |
+
inlinePatterns.register(AsteriskProcessor(r'\*'), 'em_strong', 60)
|
| 97 |
+
inlinePatterns.register(UnderscoreProcessor(r'_'), 'em_strong2', 50)
|
| 98 |
+
return inlinePatterns
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
"""
|
| 102 |
+
The actual regular expressions for patterns
|
| 103 |
+
-----------------------------------------------------------------------------
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
NOIMG = r'(?<!\!)'
|
| 107 |
+
|
| 108 |
+
# `e=f()` or ``e=f("`")``
|
| 109 |
+
BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\2(?!`))'
|
| 110 |
+
|
| 111 |
+
# \<
|
| 112 |
+
ESCAPE_RE = r'\\(.)'
|
| 113 |
+
|
| 114 |
+
# *emphasis*
|
| 115 |
+
EMPHASIS_RE = r'(\*)([^\*]+)\1'
|
| 116 |
+
|
| 117 |
+
# **strong**
|
| 118 |
+
STRONG_RE = r'(\*{2})(.+?)\1'
|
| 119 |
+
|
| 120 |
+
# __smart__strong__
|
| 121 |
+
SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\1(?!\w)'
|
| 122 |
+
|
| 123 |
+
# _smart_emphasis_
|
| 124 |
+
SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\1(?!\w)'
|
| 125 |
+
|
| 126 |
+
# __strong _em__
|
| 127 |
+
SMART_STRONG_EM_RE = r'(?<!\w)(\_)\1(?!\1)(.+?)(?<!\w)\1(?!\1)(.+?)\1{3}(?!\w)'
|
| 128 |
+
|
| 129 |
+
# ***strongem*** or ***em*strong**
|
| 130 |
+
EM_STRONG_RE = r'(\*)\1{2}(.+?)\1(.*?)\1{2}'
|
| 131 |
+
|
| 132 |
+
# ___strongem___ or ___em_strong__
|
| 133 |
+
EM_STRONG2_RE = r'(_)\1{2}(.+?)\1(.*?)\1{2}'
|
| 134 |
+
|
| 135 |
+
# ***strong**em*
|
| 136 |
+
STRONG_EM_RE = r'(\*)\1{2}(.+?)\1{2}(.*?)\1'
|
| 137 |
+
|
| 138 |
+
# ___strong__em_
|
| 139 |
+
STRONG_EM2_RE = r'(_)\1{2}(.+?)\1{2}(.*?)\1'
|
| 140 |
+
|
| 141 |
+
# **strong*em***
|
| 142 |
+
STRONG_EM3_RE = r'(\*)\1(?!\1)([^*]+?)\1(?!\1)(.+?)\1{3}'
|
| 143 |
+
|
| 144 |
+
# [text](url) or [text](<url>) or [text](url "title")
|
| 145 |
+
LINK_RE = NOIMG + r'\['
|
| 146 |
+
|
| 147 |
+
#  or 
|
| 148 |
+
IMAGE_LINK_RE = r'\!\['
|
| 149 |
+
|
| 150 |
+
# [Google][3]
|
| 151 |
+
REFERENCE_RE = LINK_RE
|
| 152 |
+
|
| 153 |
+
# ![alt text][2]
|
| 154 |
+
IMAGE_REFERENCE_RE = IMAGE_LINK_RE
|
| 155 |
+
|
| 156 |
+
# stand-alone * or _
|
| 157 |
+
NOT_STRONG_RE = r'((^|\s)(\*|_)(\s|$))'
|
| 158 |
+
|
| 159 |
+
# <http://www.123.com>
|
| 160 |
+
AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^<>]*)>'
|
| 161 |
+
|
| 162 |
+
# <me@example.com>
|
| 163 |
+
AUTOMAIL_RE = r'<([^<> !]+@[^@<> ]+)>'
|
| 164 |
+
|
| 165 |
+
# <...>
|
| 166 |
+
HTML_RE = r'(<(\/?[a-zA-Z][^<>@ ]*( [^<>]*)?|!--(?:(?!<!--|-->).)*--)>)'
|
| 167 |
+
|
| 168 |
+
# "&" (decimal) or "&" (hex) or "&" (named)
|
| 169 |
+
ENTITY_RE = r'(&(?:\#[0-9]+|\#x[0-9a-fA-F]+|[a-zA-Z0-9]+);)'
|
| 170 |
+
|
| 171 |
+
# two spaces at end of line
|
| 172 |
+
LINE_BREAK_RE = r' \n'
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def dequote(string):
|
| 176 |
+
"""Remove quotes from around a string."""
|
| 177 |
+
if ((string.startswith('"') and string.endswith('"')) or
|
| 178 |
+
(string.startswith("'") and string.endswith("'"))):
|
| 179 |
+
return string[1:-1]
|
| 180 |
+
else:
|
| 181 |
+
return string
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class EmStrongItem(namedtuple('EmStrongItem', ['pattern', 'builder', 'tags'])):
|
| 185 |
+
"""Emphasis/strong pattern item."""
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
"""
|
| 189 |
+
The pattern classes
|
| 190 |
+
-----------------------------------------------------------------------------
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class Pattern: # pragma: no cover
|
| 195 |
+
"""Base class that inline patterns subclass. """
|
| 196 |
+
|
| 197 |
+
ANCESTOR_EXCLUDES = tuple()
|
| 198 |
+
|
| 199 |
+
def __init__(self, pattern, md=None):
|
| 200 |
+
"""
|
| 201 |
+
Create an instant of an inline pattern.
|
| 202 |
+
|
| 203 |
+
Keyword arguments:
|
| 204 |
+
|
| 205 |
+
* pattern: A regular expression that matches a pattern
|
| 206 |
+
|
| 207 |
+
"""
|
| 208 |
+
self.pattern = pattern
|
| 209 |
+
self.compiled_re = re.compile(r"^(.*?)%s(.*)$" % pattern,
|
| 210 |
+
re.DOTALL | re.UNICODE)
|
| 211 |
+
|
| 212 |
+
self.md = md
|
| 213 |
+
|
| 214 |
+
@property
|
| 215 |
+
@util.deprecated("Use 'md' instead.")
|
| 216 |
+
def markdown(self):
|
| 217 |
+
# TODO: remove this later
|
| 218 |
+
return self.md
|
| 219 |
+
|
| 220 |
+
def getCompiledRegExp(self):
|
| 221 |
+
""" Return a compiled regular expression. """
|
| 222 |
+
return self.compiled_re
|
| 223 |
+
|
| 224 |
+
def handleMatch(self, m):
|
| 225 |
+
"""Return a ElementTree element from the given match.
|
| 226 |
+
|
| 227 |
+
Subclasses should override this method.
|
| 228 |
+
|
| 229 |
+
Keyword arguments:
|
| 230 |
+
|
| 231 |
+
* m: A re match object containing a match of the pattern.
|
| 232 |
+
|
| 233 |
+
"""
|
| 234 |
+
pass # pragma: no cover
|
| 235 |
+
|
| 236 |
+
def type(self):
|
| 237 |
+
""" Return class name, to define pattern type """
|
| 238 |
+
return self.__class__.__name__
|
| 239 |
+
|
| 240 |
+
def unescape(self, text):
|
| 241 |
+
""" Return unescaped text given text with an inline placeholder. """
|
| 242 |
+
try:
|
| 243 |
+
stash = self.md.treeprocessors['inline'].stashed_nodes
|
| 244 |
+
except KeyError: # pragma: no cover
|
| 245 |
+
return text
|
| 246 |
+
|
| 247 |
+
def get_stash(m):
|
| 248 |
+
id = m.group(1)
|
| 249 |
+
if id in stash:
|
| 250 |
+
value = stash.get(id)
|
| 251 |
+
if isinstance(value, str):
|
| 252 |
+
return value
|
| 253 |
+
else:
|
| 254 |
+
# An etree Element - return text content only
|
| 255 |
+
return ''.join(value.itertext())
|
| 256 |
+
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
class InlineProcessor(Pattern):
|
| 260 |
+
"""
|
| 261 |
+
Base class that inline patterns subclass.
|
| 262 |
+
|
| 263 |
+
This is the newer style inline processor that uses a more
|
| 264 |
+
efficient and flexible search approach.
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
def __init__(self, pattern, md=None):
|
| 268 |
+
"""
|
| 269 |
+
Create an instant of an inline pattern.
|
| 270 |
+
|
| 271 |
+
Keyword arguments:
|
| 272 |
+
|
| 273 |
+
* pattern: A regular expression that matches a pattern
|
| 274 |
+
|
| 275 |
+
"""
|
| 276 |
+
self.pattern = pattern
|
| 277 |
+
self.compiled_re = re.compile(pattern, re.DOTALL | re.UNICODE)
|
| 278 |
+
|
| 279 |
+
# Api for Markdown to pass safe_mode into instance
|
| 280 |
+
self.safe_mode = False
|
| 281 |
+
self.md = md
|
| 282 |
+
|
| 283 |
+
def handleMatch(self, m, data):
|
| 284 |
+
"""Return a ElementTree element from the given match and the
|
| 285 |
+
start and end index of the matched text.
|
| 286 |
+
|
| 287 |
+
If `start` and/or `end` are returned as `None`, it will be
|
| 288 |
+
assumed that the processor did not find a valid region of text.
|
| 289 |
+
|
| 290 |
+
Subclasses should override this method.
|
| 291 |
+
|
| 292 |
+
Keyword arguments:
|
| 293 |
+
|
| 294 |
+
* m: A re match object containing a match of the pattern.
|
| 295 |
+
* data: The buffer current under analysis
|
| 296 |
+
|
| 297 |
+
Returns:
|
| 298 |
+
|
| 299 |
+
* el: The ElementTree element, text or None.
|
| 300 |
+
* start: The start of the region that has been matched or None.
|
| 301 |
+
* end: The end of the region that has been matched or None.
|
| 302 |
+
|
| 303 |
+
"""
|
| 304 |
+
pass # pragma: no cover
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
class SimpleTextPattern(Pattern): # pragma: no cover
|
| 308 |
+
""" Return a simple text of group(2) of a Pattern. """
|
| 309 |
+
def handleMatch(self, m):
|
| 310 |
+
return m.group(2)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
class SimpleTextInlineProcessor(InlineProcessor):
|
| 314 |
+
""" Return a simple text of group(1) of a Pattern. """
|
| 315 |
+
def handleMatch(self, m, data):
|
| 316 |
+
return m.group(1), m.start(0), m.end(0)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
class EscapeInlineProcessor(InlineProcessor):
|
| 320 |
+
""" Return an escaped character. """
|
| 321 |
+
|
| 322 |
+
def handleMatch(self, m, data):
|
| 323 |
+
char = m.group(1)
|
| 324 |
+
if char in self.md.ESCAPED_CHARS:
|
| 325 |
+
return '{}{}{}'.format(util.STX, ord(char), util.ETX), m.start(0), m.end(0)
|
| 326 |
+
else:
|
| 327 |
+
return None, m.start(0), m.end(0)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
class SimpleTagPattern(Pattern): # pragma: no cover
|
| 331 |
+
"""
|
| 332 |
+
Return element of type `tag` with a text attribute of group(3)
|
| 333 |
+
of a Pattern.
|
| 334 |
+
|
| 335 |
+
"""
|
| 336 |
+
def __init__(self, pattern, tag):
|
| 337 |
+
Pattern.__init__(self, pattern)
|
| 338 |
+
self.tag = tag
|
| 339 |
+
|
| 340 |
+
def handleMatch(self, m):
|
| 341 |
+
el = etree.Element(self.tag)
|
| 342 |
+
el.text = m.group(3)
|
| 343 |
+
return el
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
class SimpleTagInlineProcessor(InlineProcessor):
|
| 347 |
+
"""
|
| 348 |
+
Return element of type `tag` with a text attribute of group(2)
|
| 349 |
+
of a Pattern.
|
| 350 |
+
|
| 351 |
+
"""
|
| 352 |
+
def __init__(self, pattern, tag):
|
| 353 |
+
InlineProcessor.__init__(self, pattern)
|
| 354 |
+
self.tag = tag
|
| 355 |
+
|
| 356 |
+
def handleMatch(self, m, data): # pragma: no cover
|
| 357 |
+
el = etree.Element(self.tag)
|
| 358 |
+
el.text = m.group(2)
|
| 359 |
+
return el, m.start(0), m.end(0)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
class SubstituteTagPattern(SimpleTagPattern): # pragma: no cover
|
| 363 |
+
""" Return an element of type `tag` with no children. """
|
| 364 |
+
def handleMatch(self, m):
|
| 365 |
+
return etree.Element(self.tag)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
class SubstituteTagInlineProcessor(SimpleTagInlineProcessor):
|
| 369 |
+
""" Return an element of type `tag` with no children. """
|
| 370 |
+
def handleMatch(self, m, data):
|
| 371 |
+
return etree.Element(self.tag), m.start(0), m.end(0)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
class BacktickInlineProcessor(InlineProcessor):
|
| 375 |
+
""" Return a `<code>` element containing the matching text. """
|
| 376 |
+
def __init__(self, pattern):
|
| 377 |
+
InlineProcessor.__init__(self, pattern)
|
| 378 |
+
self.ESCAPED_BSLASH = '{}{}{}'.format(util.STX, ord('\\'), util.ETX)
|
| 379 |
+
self.tag = 'code'
|
| 380 |
+
|
| 381 |
+
def handleMatch(self, m, data):
|
| 382 |
+
if m.group(3):
|
| 383 |
+
el = etree.Element(self.tag)
|
| 384 |
+
el.text = util.AtomicString(util.code_escape(m.group(3).strip()))
|
| 385 |
+
return el, m.start(0), m.end(0)
|
| 386 |
+
else:
|
| 387 |
+
return m.group(1).replace('\\\\', self.ESCAPED_BSLASH), m.start(0), m.end(0)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
class DoubleTagPattern(SimpleTagPattern): # pragma: no cover
|
| 391 |
+
"""Return a ElementTree element nested in tag2 nested in tag1.
|
| 392 |
+
|
| 393 |
+
Useful for strong emphasis etc.
|
| 394 |
+
|
| 395 |
+
"""
|
| 396 |
+
def handleMatch(self, m):
|
| 397 |
+
tag1, tag2 = self.tag.split(",")
|
| 398 |
+
el1 = etree.Element(tag1)
|
| 399 |
+
el2 = etree.SubElement(el1, tag2)
|
| 400 |
+
el2.text = m.group(3)
|
| 401 |
+
if len(m.groups()) == 5:
|
| 402 |
+
el2.tail = m.group(4)
|
| 403 |
+
return el1
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
class DoubleTagInlineProcessor(SimpleTagInlineProcessor):
|
| 407 |
+
"""Return a ElementTree element nested in tag2 nested in tag1.
|
| 408 |
+
|
| 409 |
+
Useful for strong emphasis etc.
|
| 410 |
+
|
| 411 |
+
"""
|
| 412 |
+
def handleMatch(self, m, data): # pragma: no cover
|
| 413 |
+
tag1, tag2 = self.tag.split(",")
|
| 414 |
+
el1 = etree.Element(tag1)
|
| 415 |
+
el2 = etree.SubElement(el1, tag2)
|
| 416 |
+
el2.text = m.group(2)
|
| 417 |
+
if len(m.groups()) == 3:
|
| 418 |
+
el2.tail = m.group(3)
|
| 419 |
+
return el1, m.start(0), m.end(0)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
class HtmlInlineProcessor(InlineProcessor):
|
| 423 |
+
""" Store raw inline html and return a placeholder. """
|
| 424 |
+
def handleMatch(self, m, data):
|
| 425 |
+
rawhtml = self.unescape(m.group(1))
|
| 426 |
+
place_holder = self.md.htmlStash.store(rawhtml)
|
| 427 |
+
return place_holder, m.start(0), m.end(0)
|
| 428 |
+
|
| 429 |
+
def unescape(self, text):
|
| 430 |
+
""" Return unescaped text given text with an inline placeholder. """
|
| 431 |
+
try:
|
| 432 |
+
stash = self.md.treeprocessors['inline'].stashed_nodes
|
| 433 |
+
except KeyError: # pragma: no cover
|
| 434 |
+
return text
|
| 435 |
+
|
| 436 |
+
def get_stash(m):
|
| 437 |
+
id = m.group(1)
|
| 438 |
+
value = stash.get(id)
|
| 439 |
+
if value is not None:
|
| 440 |
+
try:
|
| 441 |
+
return self.md.serializer(value)
|
| 442 |
+
except Exception:
|
| 443 |
+
return r'\%s' % value
|
| 444 |
+
|
| 445 |
+
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
class AsteriskProcessor(InlineProcessor):
|
| 449 |
+
"""Emphasis processor for handling strong and em matches inside asterisks."""
|
| 450 |
+
|
| 451 |
+
PATTERNS = [
|
| 452 |
+
EmStrongItem(re.compile(EM_STRONG_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
|
| 453 |
+
EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
|
| 454 |
+
EmStrongItem(re.compile(STRONG_EM3_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
|
| 455 |
+
EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
|
| 456 |
+
EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
|
| 457 |
+
]
|
| 458 |
+
|
| 459 |
+
def build_single(self, m, tag, idx):
|
| 460 |
+
"""Return single tag."""
|
| 461 |
+
el1 = etree.Element(tag)
|
| 462 |
+
text = m.group(2)
|
| 463 |
+
self.parse_sub_patterns(text, el1, None, idx)
|
| 464 |
+
return el1
|
| 465 |
+
|
| 466 |
+
def build_double(self, m, tags, idx):
|
| 467 |
+
"""Return double tag."""
|
| 468 |
+
|
| 469 |
+
tag1, tag2 = tags.split(",")
|
| 470 |
+
el1 = etree.Element(tag1)
|
| 471 |
+
el2 = etree.Element(tag2)
|
| 472 |
+
text = m.group(2)
|
| 473 |
+
self.parse_sub_patterns(text, el2, None, idx)
|
| 474 |
+
el1.append(el2)
|
| 475 |
+
if len(m.groups()) == 3:
|
| 476 |
+
text = m.group(3)
|
| 477 |
+
self.parse_sub_patterns(text, el1, el2, idx)
|
| 478 |
+
return el1
|
| 479 |
+
|
| 480 |
+
def build_double2(self, m, tags, idx):
|
| 481 |
+
"""Return double tags (variant 2): `<strong>text <em>text</em></strong>`."""
|
| 482 |
+
|
| 483 |
+
tag1, tag2 = tags.split(",")
|
| 484 |
+
el1 = etree.Element(tag1)
|
| 485 |
+
el2 = etree.Element(tag2)
|
| 486 |
+
text = m.group(2)
|
| 487 |
+
self.parse_sub_patterns(text, el1, None, idx)
|
| 488 |
+
text = m.group(3)
|
| 489 |
+
el1.append(el2)
|
| 490 |
+
self.parse_sub_patterns(text, el2, None, idx)
|
| 491 |
+
return el1
|
| 492 |
+
|
| 493 |
+
def parse_sub_patterns(self, data, parent, last, idx):
|
| 494 |
+
"""
|
| 495 |
+
Parses sub patterns.
|
| 496 |
+
|
| 497 |
+
`data` (`str`):
|
| 498 |
+
text to evaluate.
|
| 499 |
+
|
| 500 |
+
`parent` (`etree.Element`):
|
| 501 |
+
Parent to attach text and sub elements to.
|
| 502 |
+
|
| 503 |
+
`last` (`etree.Element`):
|
| 504 |
+
Last appended child to parent. Can also be None if parent has no children.
|
| 505 |
+
|
| 506 |
+
`idx` (`int`):
|
| 507 |
+
Current pattern index that was used to evaluate the parent.
|
| 508 |
+
|
| 509 |
+
"""
|
| 510 |
+
|
| 511 |
+
offset = 0
|
| 512 |
+
pos = 0
|
| 513 |
+
|
| 514 |
+
length = len(data)
|
| 515 |
+
while pos < length:
|
| 516 |
+
# Find the start of potential emphasis or strong tokens
|
| 517 |
+
if self.compiled_re.match(data, pos):
|
| 518 |
+
matched = False
|
| 519 |
+
# See if the we can match an emphasis/strong pattern
|
| 520 |
+
for index, item in enumerate(self.PATTERNS):
|
| 521 |
+
# Only evaluate patterns that are after what was used on the parent
|
| 522 |
+
if index <= idx:
|
| 523 |
+
continue
|
| 524 |
+
m = item.pattern.match(data, pos)
|
| 525 |
+
if m:
|
| 526 |
+
# Append child nodes to parent
|
| 527 |
+
# Text nodes should be appended to the last
|
| 528 |
+
# child if present, and if not, it should
|
| 529 |
+
# be added as the parent's text node.
|
| 530 |
+
text = data[offset:m.start(0)]
|
| 531 |
+
if text:
|
| 532 |
+
if last is not None:
|
| 533 |
+
last.tail = text
|
| 534 |
+
else:
|
| 535 |
+
parent.text = text
|
| 536 |
+
el = self.build_element(m, item.builder, item.tags, index)
|
| 537 |
+
parent.append(el)
|
| 538 |
+
last = el
|
| 539 |
+
# Move our position past the matched hunk
|
| 540 |
+
offset = pos = m.end(0)
|
| 541 |
+
matched = True
|
| 542 |
+
if not matched:
|
| 543 |
+
# We matched nothing, move on to the next character
|
| 544 |
+
pos += 1
|
| 545 |
+
else:
|
| 546 |
+
# Increment position as no potential emphasis start was found.
|
| 547 |
+
pos += 1
|
| 548 |
+
|
| 549 |
+
# Append any leftover text as a text node.
|
| 550 |
+
text = data[offset:]
|
| 551 |
+
if text:
|
| 552 |
+
if last is not None:
|
| 553 |
+
last.tail = text
|
| 554 |
+
else:
|
| 555 |
+
parent.text = text
|
| 556 |
+
|
| 557 |
+
def build_element(self, m, builder, tags, index):
|
| 558 |
+
"""Element builder."""
|
| 559 |
+
|
| 560 |
+
if builder == 'double2':
|
| 561 |
+
return self.build_double2(m, tags, index)
|
| 562 |
+
elif builder == 'double':
|
| 563 |
+
return self.build_double(m, tags, index)
|
| 564 |
+
else:
|
| 565 |
+
return self.build_single(m, tags, index)
|
| 566 |
+
|
| 567 |
+
def handleMatch(self, m, data):
|
| 568 |
+
"""Parse patterns."""
|
| 569 |
+
|
| 570 |
+
el = None
|
| 571 |
+
start = None
|
| 572 |
+
end = None
|
| 573 |
+
|
| 574 |
+
for index, item in enumerate(self.PATTERNS):
|
| 575 |
+
m1 = item.pattern.match(data, m.start(0))
|
| 576 |
+
if m1:
|
| 577 |
+
start = m1.start(0)
|
| 578 |
+
end = m1.end(0)
|
| 579 |
+
el = self.build_element(m1, item.builder, item.tags, index)
|
| 580 |
+
break
|
| 581 |
+
return el, start, end
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
class UnderscoreProcessor(AsteriskProcessor):
|
| 585 |
+
"""Emphasis processor for handling strong and em matches inside underscores."""
|
| 586 |
+
|
| 587 |
+
PATTERNS = [
|
| 588 |
+
EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
|
| 589 |
+
EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
|
| 590 |
+
EmStrongItem(re.compile(SMART_STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
|
| 591 |
+
EmStrongItem(re.compile(SMART_STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
|
| 592 |
+
EmStrongItem(re.compile(SMART_EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
|
| 593 |
+
]
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
class LinkInlineProcessor(InlineProcessor):
|
| 597 |
+
""" Return a link element from the given match. """
|
| 598 |
+
RE_LINK = re.compile(r'''\(\s*(?:(<[^<>]*>)\s*(?:('[^']*'|"[^"]*")\s*)?\))?''', re.DOTALL | re.UNICODE)
|
| 599 |
+
RE_TITLE_CLEAN = re.compile(r'\s')
|
| 600 |
+
|
| 601 |
+
def handleMatch(self, m, data):
|
| 602 |
+
text, index, handled = self.getText(data, m.end(0))
|
| 603 |
+
|
| 604 |
+
if not handled:
|
| 605 |
+
return None, None, None
|
| 606 |
+
|
| 607 |
+
href, title, index, handled = self.getLink(data, index)
|
| 608 |
+
if not handled:
|
| 609 |
+
return None, None, None
|
| 610 |
+
|
| 611 |
+
el = etree.Element("a")
|
| 612 |
+
el.text = text
|
| 613 |
+
|
| 614 |
+
el.set("href", href)
|
| 615 |
+
|
| 616 |
+
if title is not None:
|
| 617 |
+
el.set("title", title)
|
| 618 |
+
|
| 619 |
+
return el, m.start(0), index
|
| 620 |
+
|
| 621 |
+
def getLink(self, data, index):
|
| 622 |
+
"""Parse data between `()` of `[Text]()` allowing recursive `()`. """
|
| 623 |
+
|
| 624 |
+
href = ''
|
| 625 |
+
title = None
|
| 626 |
+
handled = False
|
| 627 |
+
|
| 628 |
+
m = self.RE_LINK.match(data, pos=index)
|
| 629 |
+
if m and m.group(1):
|
| 630 |
+
# Matches [Text](<link> "title")
|
| 631 |
+
href = m.group(1)[1:-1].strip()
|
| 632 |
+
if m.group(2):
|
| 633 |
+
title = m.group(2)[1:-1]
|
| 634 |
+
index = m.end(0)
|
| 635 |
+
handled = True
|
| 636 |
+
elif m:
|
| 637 |
+
# Track bracket nesting and index in string
|
| 638 |
+
bracket_count = 1
|
| 639 |
+
backtrack_count = 1
|
| 640 |
+
start_index = m.end()
|
| 641 |
+
index = start_index
|
| 642 |
+
last_bracket = -1
|
| 643 |
+
|
| 644 |
+
# Primary (first found) quote tracking.
|
| 645 |
+
quote = None
|
| 646 |
+
start_quote = -1
|
| 647 |
+
exit_quote = -1
|
| 648 |
+
ignore_matches = False
|
| 649 |
+
|
| 650 |
+
# Secondary (second found) quote tracking.
|
| 651 |
+
alt_quote = None
|
| 652 |
+
start_alt_quote = -1
|
| 653 |
+
exit_alt_quote = -1
|
| 654 |
+
|
| 655 |
+
# Track last character
|
| 656 |
+
last = ''
|
| 657 |
+
|
| 658 |
+
for pos in range(index, len(data)):
|
| 659 |
+
c = data[pos]
|
| 660 |
+
if c == '(':
|
| 661 |
+
# Count nested (
|
| 662 |
+
# Don't increment the bracket count if we are sure we're in a title.
|
| 663 |
+
if not ignore_matches:
|
| 664 |
+
bracket_count += 1
|
| 665 |
+
elif backtrack_count > 0:
|
| 666 |
+
backtrack_count -= 1
|
| 667 |
+
elif c == ')':
|
| 668 |
+
# Match nested ) to (
|
| 669 |
+
# Don't decrement if we are sure we are in a title that is unclosed.
|
| 670 |
+
if ((exit_quote != -1 and quote == last) or (exit_alt_quote != -1 and alt_quote == last)):
|
| 671 |
+
bracket_count = 0
|
| 672 |
+
elif not ignore_matches:
|
| 673 |
+
bracket_count -= 1
|
| 674 |
+
elif backtrack_count > 0:
|
| 675 |
+
backtrack_count -= 1
|
| 676 |
+
# We've found our backup end location if the title doesn't resolve.
|
| 677 |
+
if backtrack_count == 0:
|
| 678 |
+
last_bracket = index + 1
|
| 679 |
+
|
| 680 |
+
elif c in ("'", '"'):
|
| 681 |
+
# Quote has started
|
| 682 |
+
if not quote:
|
| 683 |
+
# We'll assume we are now in a title.
|
| 684 |
+
# Brackets are quoted, so no need to match them (except for the final one).
|
| 685 |
+
ignore_matches = True
|
| 686 |
+
backtrack_count = bracket_count
|
| 687 |
+
bracket_count = 1
|
| 688 |
+
start_quote = index + 1
|
| 689 |
+
quote = c
|
| 690 |
+
# Secondary quote (in case the first doesn't resolve): [text](link'"title")
|
| 691 |
+
elif c != quote and not alt_quote:
|
| 692 |
+
start_alt_quote = index + 1
|
| 693 |
+
alt_quote = c
|
| 694 |
+
# Update primary quote match
|
| 695 |
+
elif c == quote:
|
| 696 |
+
exit_quote = index + 1
|
| 697 |
+
# Update secondary quote match
|
| 698 |
+
elif alt_quote and c == alt_quote:
|
| 699 |
+
exit_alt_quote = index + 1
|
| 700 |
+
|
| 701 |
+
index += 1
|
| 702 |
+
|
| 703 |
+
# Link is closed, so let's break out of the loop
|
| 704 |
+
if bracket_count == 0:
|
| 705 |
+
# Get the title if we closed a title string right before link closed
|
| 706 |
+
if exit_quote >= 0 and quote == last:
|
| 707 |
+
href = data[start_index:start_quote - 1]
|
| 708 |
+
title = ''.join(data[start_quote:exit_quote - 1])
|
| 709 |
+
elif exit_alt_quote >= 0 and alt_quote == last:
|
| 710 |
+
href = data[start_index:start_alt_quote - 1]
|
| 711 |
+
title = ''.join(data[start_alt_quote:exit_alt_quote - 1])
|
| 712 |
+
else:
|
| 713 |
+
href = data[start_index:index - 1]
|
| 714 |
+
break
|
| 715 |
+
|
| 716 |
+
if c != ' ':
|
| 717 |
+
last = c
|
| 718 |
+
|
| 719 |
+
# We have a scenario: [test](link"notitle)
|
| 720 |
+
# When we enter a string, we stop tracking bracket resolution in the main counter,
|
| 721 |
+
# but we do keep a backup counter up until we discover where we might resolve all brackets
|
| 722 |
+
# if the title string fails to resolve.
|
| 723 |
+
if bracket_count != 0 and backtrack_count == 0:
|
| 724 |
+
href = data[start_index:last_bracket - 1]
|
| 725 |
+
index = last_bracket
|
| 726 |
+
bracket_count = 0
|
| 727 |
+
|
| 728 |
+
handled = bracket_count == 0
|
| 729 |
+
|
| 730 |
+
if title is not None:
|
| 731 |
+
title = self.RE_TITLE_CLEAN.sub(' ', dequote(self.unescape(title.strip())))
|
| 732 |
+
|
| 733 |
+
href = self.unescape(href).strip()
|
| 734 |
+
|
| 735 |
+
return href, title, index, handled
|
| 736 |
+
|
| 737 |
+
def getText(self, data, index):
|
| 738 |
+
"""Parse the content between `[]` of the start of an image or link
|
| 739 |
+
resolving nested square brackets.
|
| 740 |
+
|
| 741 |
+
"""
|
| 742 |
+
bracket_count = 1
|
| 743 |
+
text = []
|
| 744 |
+
for pos in range(index, len(data)):
|
| 745 |
+
c = data[pos]
|
| 746 |
+
if c == ']':
|
| 747 |
+
bracket_count -= 1
|
| 748 |
+
elif c == '[':
|
| 749 |
+
bracket_count += 1
|
| 750 |
+
index += 1
|
| 751 |
+
if bracket_count == 0:
|
| 752 |
+
break
|
| 753 |
+
text.append(c)
|
| 754 |
+
return ''.join(text), index, bracket_count == 0
|
| 755 |
+
|
| 756 |
+
|
| 757 |
+
class ImageInlineProcessor(LinkInlineProcessor):
|
| 758 |
+
""" Return a img element from the given match. """
|
| 759 |
+
|
| 760 |
+
def handleMatch(self, m, data):
|
| 761 |
+
text, index, handled = self.getText(data, m.end(0))
|
| 762 |
+
if not handled:
|
| 763 |
+
return None, None, None
|
| 764 |
+
|
| 765 |
+
src, title, index, handled = self.getLink(data, index)
|
| 766 |
+
if not handled:
|
| 767 |
+
return None, None, None
|
| 768 |
+
|
| 769 |
+
el = etree.Element("img")
|
| 770 |
+
|
| 771 |
+
el.set("src", src)
|
| 772 |
+
|
| 773 |
+
if title is not None:
|
| 774 |
+
el.set("title", title)
|
| 775 |
+
|
| 776 |
+
el.set('alt', self.unescape(text))
|
| 777 |
+
return el, m.start(0), index
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
class ReferenceInlineProcessor(LinkInlineProcessor):
|
| 781 |
+
""" Match to a stored reference and return link element. """
|
| 782 |
+
NEWLINE_CLEANUP_RE = re.compile(r'\s+', re.MULTILINE)
|
| 783 |
+
|
| 784 |
+
RE_LINK = re.compile(r'\s?\[([^\]]*)\]', re.DOTALL | re.UNICODE)
|
| 785 |
+
|
| 786 |
+
def handleMatch(self, m, data):
|
| 787 |
+
text, index, handled = self.getText(data, m.end(0))
|
| 788 |
+
if not handled:
|
| 789 |
+
return None, None, None
|
| 790 |
+
|
| 791 |
+
id, end, handled = self.evalId(data, index, text)
|
| 792 |
+
if not handled:
|
| 793 |
+
return None, None, None
|
| 794 |
+
|
| 795 |
+
# Clean up linebreaks in id
|
| 796 |
+
id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
|
| 797 |
+
if id not in self.md.references: # ignore undefined refs
|
| 798 |
+
return None, m.start(0), end
|
| 799 |
+
|
| 800 |
+
href, title = self.md.references[id]
|
| 801 |
+
|
| 802 |
+
return self.makeTag(href, title, text), m.start(0), end
|
| 803 |
+
|
| 804 |
+
def evalId(self, data, index, text):
|
| 805 |
+
"""
|
| 806 |
+
Evaluate the id portion of [ref][id].
|
| 807 |
+
|
| 808 |
+
If [ref][] use [ref].
|
| 809 |
+
"""
|
| 810 |
+
m = self.RE_LINK.match(data, pos=index)
|
| 811 |
+
if not m:
|
| 812 |
+
return None, index, False
|
| 813 |
+
else:
|
| 814 |
+
id = m.group(1).lower()
|
| 815 |
+
end = m.end(0)
|
| 816 |
+
if not id:
|
| 817 |
+
id = text.lower()
|
| 818 |
+
return id, end, True
|
| 819 |
+
|
| 820 |
+
def makeTag(self, href, title, text):
|
| 821 |
+
el = etree.Element('a')
|
| 822 |
+
|
| 823 |
+
el.set('href', href)
|
| 824 |
+
if title:
|
| 825 |
+
el.set('title', title)
|
| 826 |
+
|
| 827 |
+
el.text = text
|
| 828 |
+
return el
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
class ShortReferenceInlineProcessor(ReferenceInlineProcessor):
|
| 832 |
+
"""Short form of reference: [google]. """
|
| 833 |
+
def evalId(self, data, index, text):
|
| 834 |
+
"""Evaluate the id from of [ref] """
|
| 835 |
+
|
| 836 |
+
return text.lower(), index, True
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
class ImageReferenceInlineProcessor(ReferenceInlineProcessor):
|
| 840 |
+
""" Match to a stored reference and return img element. """
|
| 841 |
+
def makeTag(self, href, title, text):
|
| 842 |
+
el = etree.Element("img")
|
| 843 |
+
el.set("src", href)
|
| 844 |
+
if title:
|
| 845 |
+
el.set("title", title)
|
| 846 |
+
el.set("alt", self.unescape(text))
|
| 847 |
+
return el
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
class ShortImageReferenceInlineProcessor(ImageReferenceInlineProcessor):
|
| 851 |
+
""" Short form of inage reference: ![ref]. """
|
| 852 |
+
def evalId(self, data, index, text):
|
| 853 |
+
"""Evaluate the id from of [ref] """
|
| 854 |
+
|
| 855 |
+
return text.lower(), index, True
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
class AutolinkInlineProcessor(InlineProcessor):
|
| 859 |
+
""" Return a link Element given an autolink (`<http://example/com>`). """
|
| 860 |
+
def handleMatch(self, m, data):
|
| 861 |
+
el = etree.Element("a")
|
| 862 |
+
el.set('href', self.unescape(m.group(1)))
|
| 863 |
+
el.text = util.AtomicString(m.group(1))
|
| 864 |
+
return el, m.start(0), m.end(0)
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
class AutomailInlineProcessor(InlineProcessor):
|
| 868 |
+
"""
|
| 869 |
+
Return a mailto link Element given an automail link (`<foo@example.com>`).
|
| 870 |
+
"""
|
| 871 |
+
def handleMatch(self, m, data):
|
| 872 |
+
el = etree.Element('a')
|
| 873 |
+
email = self.unescape(m.group(1))
|
| 874 |
+
if email.startswith("mailto:"):
|
| 875 |
+
email = email[len("mailto:"):]
|
| 876 |
+
|
| 877 |
+
def codepoint2name(code):
|
| 878 |
+
"""Return entity definition by code, or the code if not defined."""
|
| 879 |
+
entity = entities.codepoint2name.get(code)
|
| 880 |
+
if entity:
|
| 881 |
+
return "{}{};".format(util.AMP_SUBSTITUTE, entity)
|
| 882 |
+
else:
|
| 883 |
+
return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
|
| 884 |
+
|
| 885 |
+
letters = [codepoint2name(ord(letter)) for letter in email]
|
| 886 |
+
el.text = util.AtomicString(''.join(letters))
|
| 887 |
+
|
| 888 |
+
mailto = "mailto:" + email
|
| 889 |
+
mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
|
| 890 |
+
ord(letter) for letter in mailto])
|
| 891 |
+
el.set('href', mailto)
|
| 892 |
+
return el, m.start(0), m.end(0)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/postprocessors.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
|
| 21 |
+
POST-PROCESSORS
|
| 22 |
+
=============================================================================
|
| 23 |
+
|
| 24 |
+
Markdown also allows post-processors, which are similar to preprocessors in
|
| 25 |
+
that they need to implement a "run" method. However, they are run after core
|
| 26 |
+
processing.
|
| 27 |
+
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
from collections import OrderedDict
|
| 31 |
+
from . import util
|
| 32 |
+
import re
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def build_postprocessors(md, **kwargs):
|
| 36 |
+
""" Build the default postprocessors for Markdown. """
|
| 37 |
+
postprocessors = util.Registry()
|
| 38 |
+
postprocessors.register(RawHtmlPostprocessor(md), 'raw_html', 30)
|
| 39 |
+
postprocessors.register(AndSubstitutePostprocessor(), 'amp_substitute', 20)
|
| 40 |
+
postprocessors.register(UnescapePostprocessor(), 'unescape', 10)
|
| 41 |
+
return postprocessors
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Postprocessor(util.Processor):
|
| 45 |
+
"""
|
| 46 |
+
Postprocessors are run after the ElementTree it converted back into text.
|
| 47 |
+
|
| 48 |
+
Each Postprocessor implements a "run" method that takes a pointer to a
|
| 49 |
+
text string, modifies it as necessary and returns a text string.
|
| 50 |
+
|
| 51 |
+
Postprocessors must extend markdown.Postprocessor.
|
| 52 |
+
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def run(self, text):
|
| 56 |
+
"""
|
| 57 |
+
Subclasses of Postprocessor should implement a `run` method, which
|
| 58 |
+
takes the html document as a single text string and returns a
|
| 59 |
+
(possibly modified) string.
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
pass # pragma: no cover
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class RawHtmlPostprocessor(Postprocessor):
|
| 66 |
+
""" Restore raw html to the document. """
|
| 67 |
+
|
| 68 |
+
BLOCK_LEVEL_REGEX = re.compile(r'^\<\/?([^ >]+)')
|
| 69 |
+
|
| 70 |
+
def run(self, text):
|
| 71 |
+
""" Iterate over html stash and restore html. """
|
| 72 |
+
replacements = OrderedDict()
|
| 73 |
+
for i in range(self.md.htmlStash.html_counter):
|
| 74 |
+
html = self.stash_to_string(self.md.htmlStash.rawHtmlBlocks[i])
|
| 75 |
+
if self.isblocklevel(html):
|
| 76 |
+
replacements["<p>{}</p>".format(
|
| 77 |
+
self.md.htmlStash.get_placeholder(i))] = html
|
| 78 |
+
replacements[self.md.htmlStash.get_placeholder(i)] = html
|
| 79 |
+
|
| 80 |
+
def substitute_match(m):
|
| 81 |
+
key = m.group(0)
|
| 82 |
+
|
| 83 |
+
if key not in replacements:
|
| 84 |
+
if key[3:-4] in replacements:
|
| 85 |
+
return f'<p>{ replacements[key[3:-4]] }</p>'
|
| 86 |
+
else:
|
| 87 |
+
return key
|
| 88 |
+
|
| 89 |
+
return replacements[key]
|
| 90 |
+
|
| 91 |
+
if replacements:
|
| 92 |
+
base_placeholder = util.HTML_PLACEHOLDER % r'([0-9]+)'
|
| 93 |
+
pattern = re.compile(f'<p>{ base_placeholder }</p>|{ base_placeholder }')
|
| 94 |
+
processed_text = pattern.sub(substitute_match, text)
|
| 95 |
+
else:
|
| 96 |
+
return text
|
| 97 |
+
|
| 98 |
+
if processed_text == text:
|
| 99 |
+
return processed_text
|
| 100 |
+
else:
|
| 101 |
+
return self.run(processed_text)
|
| 102 |
+
|
| 103 |
+
def isblocklevel(self, html):
|
| 104 |
+
m = self.BLOCK_LEVEL_REGEX.match(html)
|
| 105 |
+
if m:
|
| 106 |
+
if m.group(1)[0] in ('!', '?', '@', '%'):
|
| 107 |
+
# Comment, php etc...
|
| 108 |
+
return True
|
| 109 |
+
return self.md.is_block_level(m.group(1))
|
| 110 |
+
return False
|
| 111 |
+
|
| 112 |
+
def stash_to_string(self, text):
|
| 113 |
+
""" Convert a stashed object to a string. """
|
| 114 |
+
return str(text)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class AndSubstitutePostprocessor(Postprocessor):
|
| 118 |
+
""" Restore valid entities """
|
| 119 |
+
|
| 120 |
+
def run(self, text):
|
| 121 |
+
text = text.replace(util.AMP_SUBSTITUTE, "&")
|
| 122 |
+
return text
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class UnescapePostprocessor(Postprocessor):
|
| 126 |
+
""" Restore escaped chars """
|
| 127 |
+
|
| 128 |
+
RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX))
|
| 129 |
+
|
| 130 |
+
def unescape(self, m):
|
| 131 |
+
return chr(int(m.group(1)))
|
| 132 |
+
|
| 133 |
+
def run(self, text):
|
| 134 |
+
return self.RE.sub(self.unescape, text)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/preprocessors.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
|
| 21 |
+
PRE-PROCESSORS
|
| 22 |
+
=============================================================================
|
| 23 |
+
|
| 24 |
+
Preprocessors work on source text before we start doing anything too
|
| 25 |
+
complicated.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
from . import util
|
| 29 |
+
from .htmlparser import HTMLExtractor
|
| 30 |
+
import re
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def build_preprocessors(md, **kwargs):
|
| 34 |
+
""" Build the default set of preprocessors used by Markdown. """
|
| 35 |
+
preprocessors = util.Registry()
|
| 36 |
+
preprocessors.register(NormalizeWhitespace(md), 'normalize_whitespace', 30)
|
| 37 |
+
preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20)
|
| 38 |
+
return preprocessors
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Preprocessor(util.Processor):
|
| 42 |
+
"""
|
| 43 |
+
Preprocessors are run after the text is broken into lines.
|
| 44 |
+
|
| 45 |
+
Each preprocessor implements a "run" method that takes a pointer to a
|
| 46 |
+
list of lines of the document, modifies it as necessary and returns
|
| 47 |
+
either the same pointer or a pointer to a new list.
|
| 48 |
+
|
| 49 |
+
Preprocessors must extend markdown.Preprocessor.
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
def run(self, lines):
|
| 53 |
+
"""
|
| 54 |
+
Each subclass of Preprocessor should override the `run` method, which
|
| 55 |
+
takes the document as a list of strings split by newlines and returns
|
| 56 |
+
the (possibly modified) list of lines.
|
| 57 |
+
|
| 58 |
+
"""
|
| 59 |
+
pass # pragma: no cover
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class NormalizeWhitespace(Preprocessor):
|
| 63 |
+
""" Normalize whitespace for consistent parsing. """
|
| 64 |
+
|
| 65 |
+
def run(self, lines):
|
| 66 |
+
source = '\n'.join(lines)
|
| 67 |
+
source = source.replace(util.STX, "").replace(util.ETX, "")
|
| 68 |
+
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
|
| 69 |
+
source = source.expandtabs(self.md.tab_length)
|
| 70 |
+
source = re.sub(r'(?<=\n) +\n', '\n', source)
|
| 71 |
+
return source.split('\n')
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class HtmlBlockPreprocessor(Preprocessor):
|
| 75 |
+
"""Remove html blocks from the text and store them for later retrieval."""
|
| 76 |
+
|
| 77 |
+
def run(self, lines):
|
| 78 |
+
source = '\n'.join(lines)
|
| 79 |
+
parser = HTMLExtractor(self.md)
|
| 80 |
+
parser.feed(source)
|
| 81 |
+
parser.close()
|
| 82 |
+
return ''.join(parser.cleandoc).split('\n')
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/serializers.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# markdown/searializers.py
|
| 2 |
+
#
|
| 3 |
+
# Add x/html serialization to Elementree
|
| 4 |
+
# Taken from ElementTree 1.3 preview with slight modifications
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
|
| 7 |
+
#
|
| 8 |
+
# fredrik@pythonware.com
|
| 9 |
+
# https://www.pythonware.com/
|
| 10 |
+
#
|
| 11 |
+
# --------------------------------------------------------------------
|
| 12 |
+
# The ElementTree toolkit is
|
| 13 |
+
#
|
| 14 |
+
# Copyright (c) 1999-2007 by Fredrik Lundh
|
| 15 |
+
#
|
| 16 |
+
# By obtaining, using, and/or copying this software and/or its
|
| 17 |
+
# associated documentation, you agree that you have read, understood,
|
| 18 |
+
# and will comply with the following terms and conditions:
|
| 19 |
+
#
|
| 20 |
+
# Permission to use, copy, modify, and distribute this software and
|
| 21 |
+
# its associated documentation for any purpose and without fee is
|
| 22 |
+
# hereby granted, provided that the above copyright notice appears in
|
| 23 |
+
# all copies, and that both that copyright notice and this permission
|
| 24 |
+
# notice appear in supporting documentation, and that the name of
|
| 25 |
+
# Secret Labs AB or the author not be used in advertising or publicity
|
| 26 |
+
# pertaining to distribution of the software without specific, written
|
| 27 |
+
# prior permission.
|
| 28 |
+
#
|
| 29 |
+
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
|
| 30 |
+
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
|
| 31 |
+
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
|
| 32 |
+
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
|
| 33 |
+
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 34 |
+
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 35 |
+
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 36 |
+
# OF THIS SOFTWARE.
|
| 37 |
+
# --------------------------------------------------------------------
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
from xml.etree.ElementTree import ProcessingInstruction
|
| 41 |
+
from xml.etree.ElementTree import Comment, ElementTree, QName
|
| 42 |
+
import re
|
| 43 |
+
|
| 44 |
+
__all__ = ['to_html_string', 'to_xhtml_string']
|
| 45 |
+
|
| 46 |
+
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
|
| 47 |
+
"img", "input", "isindex", "link", "meta", "param")
|
| 48 |
+
RE_AMP = re.compile(r'&(?!(?:\#[0-9]+|\#x[0-9a-f]+|[0-9a-z]+);)', re.I)
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
HTML_EMPTY = set(HTML_EMPTY)
|
| 52 |
+
except NameError: # pragma: no cover
|
| 53 |
+
pass
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _raise_serialization_error(text): # pragma: no cover
|
| 57 |
+
raise TypeError(
|
| 58 |
+
"cannot serialize {!r} (type {})".format(text, type(text).__name__)
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _escape_cdata(text):
|
| 63 |
+
# escape character data
|
| 64 |
+
try:
|
| 65 |
+
# it's worth avoiding do-nothing calls for strings that are
|
| 66 |
+
# shorter than 500 character, or so. assume that's, by far,
|
| 67 |
+
# the most common case in most applications.
|
| 68 |
+
if "&" in text:
|
| 69 |
+
# Only replace & when not part of an entity
|
| 70 |
+
text = RE_AMP.sub('&', text)
|
| 71 |
+
if "<" in text:
|
| 72 |
+
text = text.replace("<", "<")
|
| 73 |
+
if ">" in text:
|
| 74 |
+
text = text.replace(">", ">")
|
| 75 |
+
return text
|
| 76 |
+
except (TypeError, AttributeError): # pragma: no cover
|
| 77 |
+
_raise_serialization_error(text)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _escape_attrib(text):
|
| 81 |
+
# escape attribute value
|
| 82 |
+
try:
|
| 83 |
+
if "&" in text:
|
| 84 |
+
# Only replace & when not part of an entity
|
| 85 |
+
text = RE_AMP.sub('&', text)
|
| 86 |
+
if "<" in text:
|
| 87 |
+
text = text.replace("<", "<")
|
| 88 |
+
if ">" in text:
|
| 89 |
+
text = text.replace(">", ">")
|
| 90 |
+
if "\"" in text:
|
| 91 |
+
text = text.replace("\"", """)
|
| 92 |
+
if "\n" in text:
|
| 93 |
+
text = text.replace("\n", " ")
|
| 94 |
+
return text
|
| 95 |
+
except (TypeError, AttributeError): # pragma: no cover
|
| 96 |
+
_raise_serialization_error(text)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def _escape_attrib_html(text):
|
| 100 |
+
# escape attribute value
|
| 101 |
+
try:
|
| 102 |
+
if "&" in text:
|
| 103 |
+
# Only replace & when not part of an entity
|
| 104 |
+
text = RE_AMP.sub('&', text)
|
| 105 |
+
if "<" in text:
|
| 106 |
+
text = text.replace("<", "<")
|
| 107 |
+
if ">" in text:
|
| 108 |
+
text = text.replace(">", ">")
|
| 109 |
+
if "\"" in text:
|
| 110 |
+
text = text.replace("\"", """)
|
| 111 |
+
return text
|
| 112 |
+
except (TypeError, AttributeError): # pragma: no cover
|
| 113 |
+
_raise_serialization_error(text)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _serialize_html(write, elem, format):
|
| 117 |
+
tag = elem.tag
|
| 118 |
+
text = elem.text
|
| 119 |
+
if tag is Comment:
|
| 120 |
+
write("<!--%s-->" % _escape_cdata(text))
|
| 121 |
+
elif tag is ProcessingInstruction:
|
| 122 |
+
write("<?%s?>" % _escape_cdata(text))
|
| 123 |
+
elif tag is None:
|
| 124 |
+
if text:
|
| 125 |
+
write(_escape_cdata(text))
|
| 126 |
+
for e in elem:
|
| 127 |
+
_serialize_html(write, e, format)
|
| 128 |
+
else:
|
| 129 |
+
namespace_uri = None
|
| 130 |
+
if isinstance(tag, QName):
|
| 131 |
+
# QNAME objects store their data as a string: `{uri}tag`
|
| 132 |
+
if tag.text[:1] == "{":
|
| 133 |
+
namespace_uri, tag = tag.text[1:].split("}", 1)
|
| 134 |
+
else:
|
| 135 |
+
raise ValueError('QName objects must define a tag.')
|
| 136 |
+
write("<" + tag)
|
| 137 |
+
items = elem.items()
|
| 138 |
+
if items:
|
| 139 |
+
items = sorted(items) # lexical order
|
| 140 |
+
for k, v in items:
|
| 141 |
+
if isinstance(k, QName):
|
| 142 |
+
# Assume a text only QName
|
| 143 |
+
k = k.text
|
| 144 |
+
if isinstance(v, QName):
|
| 145 |
+
# Assume a text only QName
|
| 146 |
+
v = v.text
|
| 147 |
+
else:
|
| 148 |
+
v = _escape_attrib_html(v)
|
| 149 |
+
if k == v and format == 'html':
|
| 150 |
+
# handle boolean attributes
|
| 151 |
+
write(" %s" % v)
|
| 152 |
+
else:
|
| 153 |
+
write(' {}="{}"'.format(k, v))
|
| 154 |
+
if namespace_uri:
|
| 155 |
+
write(' xmlns="%s"' % (_escape_attrib(namespace_uri)))
|
| 156 |
+
if format == "xhtml" and tag.lower() in HTML_EMPTY:
|
| 157 |
+
write(" />")
|
| 158 |
+
else:
|
| 159 |
+
write(">")
|
| 160 |
+
if text:
|
| 161 |
+
if tag.lower() in ["script", "style"]:
|
| 162 |
+
write(text)
|
| 163 |
+
else:
|
| 164 |
+
write(_escape_cdata(text))
|
| 165 |
+
for e in elem:
|
| 166 |
+
_serialize_html(write, e, format)
|
| 167 |
+
if tag.lower() not in HTML_EMPTY:
|
| 168 |
+
write("</" + tag + ">")
|
| 169 |
+
if elem.tail:
|
| 170 |
+
write(_escape_cdata(elem.tail))
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def _write_html(root, format="html"):
|
| 174 |
+
assert root is not None
|
| 175 |
+
data = []
|
| 176 |
+
write = data.append
|
| 177 |
+
_serialize_html(write, root, format)
|
| 178 |
+
return "".join(data)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# --------------------------------------------------------------------
|
| 182 |
+
# public functions
|
| 183 |
+
|
| 184 |
+
def to_html_string(element):
|
| 185 |
+
return _write_html(ElementTree(element).getroot(), format="html")
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def to_xhtml_string(element):
|
| 189 |
+
return _write_html(ElementTree(element).getroot(), format="xhtml")
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/test_tools.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import os
|
| 23 |
+
import sys
|
| 24 |
+
import unittest
|
| 25 |
+
import textwrap
|
| 26 |
+
from . import markdown, Markdown, util
|
| 27 |
+
|
| 28 |
+
try:
|
| 29 |
+
import tidylib
|
| 30 |
+
except ImportError:
|
| 31 |
+
tidylib = None
|
| 32 |
+
|
| 33 |
+
__all__ = ['TestCase', 'LegacyTestCase', 'Kwargs']
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class TestCase(unittest.TestCase):
|
| 37 |
+
"""
|
| 38 |
+
A unittest.TestCase subclass with helpers for testing Markdown output.
|
| 39 |
+
|
| 40 |
+
Define `default_kwargs` as a dict of keywords to pass to Markdown for each
|
| 41 |
+
test. The defaults can be overridden on individual tests.
|
| 42 |
+
|
| 43 |
+
The `assertMarkdownRenders` method accepts the source text, the expected
|
| 44 |
+
output, and any keywords to pass to Markdown. The `default_kwargs` are used
|
| 45 |
+
except where overridden by `kwargs`. The output and expected output are passed
|
| 46 |
+
to `TestCase.assertMultiLineEqual`. An AssertionError is raised with a diff
|
| 47 |
+
if the actual output does not equal the expected output.
|
| 48 |
+
|
| 49 |
+
The `dedent` method is available to dedent triple-quoted strings if
|
| 50 |
+
necessary.
|
| 51 |
+
|
| 52 |
+
In all other respects, behaves as unittest.TestCase.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
default_kwargs = {}
|
| 56 |
+
|
| 57 |
+
def assertMarkdownRenders(self, source, expected, expected_attrs=None, **kwargs):
|
| 58 |
+
"""
|
| 59 |
+
Test that source Markdown text renders to expected output with given keywords.
|
| 60 |
+
|
| 61 |
+
`expected_attrs` accepts a dict. Each key should be the name of an attribute
|
| 62 |
+
on the `Markdown` instance and the value should be the expected value after
|
| 63 |
+
the source text is parsed by Markdown. After the expected output is tested,
|
| 64 |
+
the expected value for each attribute is compared against the actual
|
| 65 |
+
attribute of the `Markdown` instance using `TestCase.assertEqual`.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
expected_attrs = expected_attrs or {}
|
| 69 |
+
kws = self.default_kwargs.copy()
|
| 70 |
+
kws.update(kwargs)
|
| 71 |
+
md = Markdown(**kws)
|
| 72 |
+
output = md.convert(source)
|
| 73 |
+
self.assertMultiLineEqual(output, expected)
|
| 74 |
+
for key, value in expected_attrs.items():
|
| 75 |
+
self.assertEqual(getattr(md, key), value)
|
| 76 |
+
|
| 77 |
+
def dedent(self, text):
|
| 78 |
+
"""
|
| 79 |
+
Dedent text.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
# TODO: If/when actual output ends with a newline, then use:
|
| 83 |
+
# return textwrap.dedent(text.strip('/n'))
|
| 84 |
+
return textwrap.dedent(text).strip()
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class recursionlimit:
|
| 88 |
+
"""
|
| 89 |
+
A context manager which temporarily modifies the Python recursion limit.
|
| 90 |
+
|
| 91 |
+
The testing framework, coverage, etc. may add an arbitrary number of levels to the depth. To maintain consistency
|
| 92 |
+
in the tests, the current stack depth is determined when called, then added to the provided limit.
|
| 93 |
+
|
| 94 |
+
Example usage:
|
| 95 |
+
|
| 96 |
+
with recursionlimit(20):
|
| 97 |
+
# test code here
|
| 98 |
+
|
| 99 |
+
See https://stackoverflow.com/a/50120316/866026
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
def __init__(self, limit):
|
| 103 |
+
self.limit = util._get_stack_depth() + limit
|
| 104 |
+
self.old_limit = sys.getrecursionlimit()
|
| 105 |
+
|
| 106 |
+
def __enter__(self):
|
| 107 |
+
sys.setrecursionlimit(self.limit)
|
| 108 |
+
|
| 109 |
+
def __exit__(self, type, value, tb):
|
| 110 |
+
sys.setrecursionlimit(self.old_limit)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
#########################
|
| 114 |
+
# Legacy Test Framework #
|
| 115 |
+
#########################
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class Kwargs(dict):
|
| 119 |
+
""" A dict like class for holding keyword arguments. """
|
| 120 |
+
pass
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _normalize_whitespace(text):
|
| 124 |
+
""" Normalize whitespace for a string of html using tidylib. """
|
| 125 |
+
output, errors = tidylib.tidy_fragment(text, options={
|
| 126 |
+
'drop_empty_paras': 0,
|
| 127 |
+
'fix_backslash': 0,
|
| 128 |
+
'fix_bad_comments': 0,
|
| 129 |
+
'fix_uri': 0,
|
| 130 |
+
'join_styles': 0,
|
| 131 |
+
'lower_literals': 0,
|
| 132 |
+
'merge_divs': 0,
|
| 133 |
+
'output_xhtml': 1,
|
| 134 |
+
'quote_ampersand': 0,
|
| 135 |
+
'newline': 'LF'
|
| 136 |
+
})
|
| 137 |
+
return output
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class LegacyTestMeta(type):
|
| 141 |
+
def __new__(cls, name, bases, dct):
|
| 142 |
+
|
| 143 |
+
def generate_test(infile, outfile, normalize, kwargs):
|
| 144 |
+
def test(self):
|
| 145 |
+
with open(infile, encoding="utf-8") as f:
|
| 146 |
+
input = f.read()
|
| 147 |
+
with open(outfile, encoding="utf-8") as f:
|
| 148 |
+
# Normalize line endings
|
| 149 |
+
# (on Windows, git may have altered line endings).
|
| 150 |
+
expected = f.read().replace("\r\n", "\n")
|
| 151 |
+
output = markdown(input, **kwargs)
|
| 152 |
+
if tidylib and normalize:
|
| 153 |
+
try:
|
| 154 |
+
expected = _normalize_whitespace(expected)
|
| 155 |
+
output = _normalize_whitespace(output)
|
| 156 |
+
except OSError:
|
| 157 |
+
self.skipTest("Tidylib's c library not available.")
|
| 158 |
+
elif normalize:
|
| 159 |
+
self.skipTest('Tidylib not available.')
|
| 160 |
+
self.assertMultiLineEqual(output, expected)
|
| 161 |
+
return test
|
| 162 |
+
|
| 163 |
+
location = dct.get('location', '')
|
| 164 |
+
exclude = dct.get('exclude', [])
|
| 165 |
+
normalize = dct.get('normalize', False)
|
| 166 |
+
input_ext = dct.get('input_ext', '.txt')
|
| 167 |
+
output_ext = dct.get('output_ext', '.html')
|
| 168 |
+
kwargs = dct.get('default_kwargs', Kwargs())
|
| 169 |
+
|
| 170 |
+
if os.path.isdir(location):
|
| 171 |
+
for file in os.listdir(location):
|
| 172 |
+
infile = os.path.join(location, file)
|
| 173 |
+
if os.path.isfile(infile):
|
| 174 |
+
tname, ext = os.path.splitext(file)
|
| 175 |
+
if ext == input_ext:
|
| 176 |
+
outfile = os.path.join(location, tname + output_ext)
|
| 177 |
+
tname = tname.replace(' ', '_').replace('-', '_')
|
| 178 |
+
kws = kwargs.copy()
|
| 179 |
+
if tname in dct:
|
| 180 |
+
kws.update(dct[tname])
|
| 181 |
+
test_name = 'test_%s' % tname
|
| 182 |
+
if tname not in exclude:
|
| 183 |
+
dct[test_name] = generate_test(infile, outfile, normalize, kws)
|
| 184 |
+
else:
|
| 185 |
+
dct[test_name] = unittest.skip('Excluded')(lambda: None)
|
| 186 |
+
|
| 187 |
+
return type.__new__(cls, name, bases, dct)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class LegacyTestCase(unittest.TestCase, metaclass=LegacyTestMeta):
|
| 191 |
+
"""
|
| 192 |
+
A `unittest.TestCase` subclass for running Markdown's legacy file-based tests.
|
| 193 |
+
|
| 194 |
+
A subclass should define various properties which point to a directory of
|
| 195 |
+
text-based test files and define various behaviors/defaults for those tests.
|
| 196 |
+
The following properties are supported:
|
| 197 |
+
|
| 198 |
+
location: A path to the directory of test files. An absolute path is preferred.
|
| 199 |
+
exclude: A list of tests to exclude. Each test name should comprise the filename
|
| 200 |
+
without an extension.
|
| 201 |
+
normalize: A boolean value indicating if the HTML should be normalized.
|
| 202 |
+
Default: `False`.
|
| 203 |
+
input_ext: A string containing the file extension of input files. Default: `.txt`.
|
| 204 |
+
ouput_ext: A string containing the file extension of expected output files.
|
| 205 |
+
Default: `html`.
|
| 206 |
+
default_kwargs: A `Kwargs` instance which stores the default set of keyword
|
| 207 |
+
arguments for all test files in the directory.
|
| 208 |
+
|
| 209 |
+
In addition, properties can be defined for each individual set of test files within
|
| 210 |
+
the directory. The property should be given the name of the file without the file
|
| 211 |
+
extension. Any spaces and dashes in the filename should be replaced with
|
| 212 |
+
underscores. The value of the property should be a `Kwargs` instance which
|
| 213 |
+
contains the keyword arguments that should be passed to `Markdown` for that
|
| 214 |
+
test file. The keyword arguments will "update" the `default_kwargs`.
|
| 215 |
+
|
| 216 |
+
When the class instance is created, it will walk the given directory and create
|
| 217 |
+
a separate unitttest for each set of test files using the naming scheme:
|
| 218 |
+
`test_filename`. One unittest will be run for each set of input and output files.
|
| 219 |
+
"""
|
| 220 |
+
pass
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/markdown/treeprocessors.py
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python Markdown
|
| 3 |
+
|
| 4 |
+
A Python implementation of John Gruber's Markdown.
|
| 5 |
+
|
| 6 |
+
Documentation: https://python-markdown.github.io/
|
| 7 |
+
GitHub: https://github.com/Python-Markdown/markdown/
|
| 8 |
+
PyPI: https://pypi.org/project/Markdown/
|
| 9 |
+
|
| 10 |
+
Started by Manfred Stienstra (http://www.dwerg.net/).
|
| 11 |
+
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
| 12 |
+
Currently maintained by Waylan Limberg (https://github.com/waylan),
|
| 13 |
+
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
| 14 |
+
|
| 15 |
+
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
|
| 16 |
+
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
| 17 |
+
Copyright 2004 Manfred Stienstra (the original version)
|
| 18 |
+
|
| 19 |
+
License: BSD (see LICENSE.md for details).
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import xml.etree.ElementTree as etree
|
| 23 |
+
from . import util
|
| 24 |
+
from . import inlinepatterns
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def build_treeprocessors(md, **kwargs):
|
| 28 |
+
""" Build the default treeprocessors for Markdown. """
|
| 29 |
+
treeprocessors = util.Registry()
|
| 30 |
+
treeprocessors.register(InlineProcessor(md), 'inline', 20)
|
| 31 |
+
treeprocessors.register(PrettifyTreeprocessor(md), 'prettify', 10)
|
| 32 |
+
return treeprocessors
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def isString(s):
|
| 36 |
+
""" Check if it's string """
|
| 37 |
+
if not isinstance(s, util.AtomicString):
|
| 38 |
+
return isinstance(s, str)
|
| 39 |
+
return False
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class Treeprocessor(util.Processor):
|
| 43 |
+
"""
|
| 44 |
+
Treeprocessors are run on the ElementTree object before serialization.
|
| 45 |
+
|
| 46 |
+
Each Treeprocessor implements a "run" method that takes a pointer to an
|
| 47 |
+
ElementTree, modifies it as necessary and returns an ElementTree
|
| 48 |
+
object.
|
| 49 |
+
|
| 50 |
+
Treeprocessors must extend markdown.Treeprocessor.
|
| 51 |
+
|
| 52 |
+
"""
|
| 53 |
+
def run(self, root):
|
| 54 |
+
"""
|
| 55 |
+
Subclasses of Treeprocessor should implement a `run` method, which
|
| 56 |
+
takes a root ElementTree. This method can return another ElementTree
|
| 57 |
+
object, and the existing root ElementTree will be replaced, or it can
|
| 58 |
+
modify the current tree and return None.
|
| 59 |
+
"""
|
| 60 |
+
pass # pragma: no cover
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class InlineProcessor(Treeprocessor):
|
| 64 |
+
"""
|
| 65 |
+
A Treeprocessor that traverses a tree, applying inline patterns.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def __init__(self, md):
|
| 69 |
+
self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
|
| 70 |
+
self.__placeholder_suffix = util.ETX
|
| 71 |
+
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
|
| 72 |
+
+ len(self.__placeholder_suffix)
|
| 73 |
+
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
|
| 74 |
+
self.md = md
|
| 75 |
+
self.inlinePatterns = md.inlinePatterns
|
| 76 |
+
self.ancestors = []
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
@util.deprecated("Use 'md' instead.")
|
| 80 |
+
def markdown(self):
|
| 81 |
+
# TODO: remove this later
|
| 82 |
+
return self.md
|
| 83 |
+
|
| 84 |
+
def __makePlaceholder(self, type):
|
| 85 |
+
""" Generate a placeholder """
|
| 86 |
+
id = "%04d" % len(self.stashed_nodes)
|
| 87 |
+
hash = util.INLINE_PLACEHOLDER % id
|
| 88 |
+
return hash, id
|
| 89 |
+
|
| 90 |
+
def __findPlaceholder(self, data, index):
|
| 91 |
+
"""
|
| 92 |
+
Extract id from data string, start from index
|
| 93 |
+
|
| 94 |
+
Keyword arguments:
|
| 95 |
+
|
| 96 |
+
* data: string
|
| 97 |
+
* index: index, from which we start search
|
| 98 |
+
|
| 99 |
+
Returns: placeholder id and string index, after the found placeholder.
|
| 100 |
+
|
| 101 |
+
"""
|
| 102 |
+
m = self.__placeholder_re.search(data, index)
|
| 103 |
+
if m:
|
| 104 |
+
return m.group(1), m.end()
|
| 105 |
+
else:
|
| 106 |
+
return None, index + 1
|
| 107 |
+
|
| 108 |
+
def __stashNode(self, node, type):
|
| 109 |
+
""" Add node to stash """
|
| 110 |
+
placeholder, id = self.__makePlaceholder(type)
|
| 111 |
+
self.stashed_nodes[id] = node
|
| 112 |
+
return placeholder
|
| 113 |
+
|
| 114 |
+
def __handleInline(self, data, patternIndex=0):
|
| 115 |
+
"""
|
| 116 |
+
Process string with inline patterns and replace it
|
| 117 |
+
with placeholders
|
| 118 |
+
|
| 119 |
+
Keyword arguments:
|
| 120 |
+
|
| 121 |
+
* data: A line of Markdown text
|
| 122 |
+
* patternIndex: The index of the inlinePattern to start with
|
| 123 |
+
|
| 124 |
+
Returns: String with placeholders.
|
| 125 |
+
|
| 126 |
+
"""
|
| 127 |
+
if not isinstance(data, util.AtomicString):
|
| 128 |
+
startIndex = 0
|
| 129 |
+
count = len(self.inlinePatterns)
|
| 130 |
+
while patternIndex < count:
|
| 131 |
+
data, matched, startIndex = self.__applyPattern(
|
| 132 |
+
self.inlinePatterns[patternIndex], data, patternIndex, startIndex
|
| 133 |
+
)
|
| 134 |
+
if not matched:
|
| 135 |
+
patternIndex += 1
|
| 136 |
+
return data
|
| 137 |
+
|
| 138 |
+
def __processElementText(self, node, subnode, isText=True):
|
| 139 |
+
"""
|
| 140 |
+
Process placeholders in Element.text or Element.tail
|
| 141 |
+
of Elements popped from self.stashed_nodes.
|
| 142 |
+
|
| 143 |
+
Keywords arguments:
|
| 144 |
+
|
| 145 |
+
* node: parent node
|
| 146 |
+
* subnode: processing node
|
| 147 |
+
* isText: bool variable, True - it's text, False - it's tail
|
| 148 |
+
|
| 149 |
+
Returns: None
|
| 150 |
+
|
| 151 |
+
"""
|
| 152 |
+
if isText:
|
| 153 |
+
text = subnode.text
|
| 154 |
+
subnode.text = None
|
| 155 |
+
else:
|
| 156 |
+
text = subnode.tail
|
| 157 |
+
subnode.tail = None
|
| 158 |
+
|
| 159 |
+
childResult = self.__processPlaceholders(text, subnode, isText)
|
| 160 |
+
|
| 161 |
+
if not isText and node is not subnode:
|
| 162 |
+
pos = list(node).index(subnode) + 1
|
| 163 |
+
else:
|
| 164 |
+
pos = 0
|
| 165 |
+
|
| 166 |
+
childResult.reverse()
|
| 167 |
+
for newChild in childResult:
|
| 168 |
+
node.insert(pos, newChild[0])
|
| 169 |
+
|
| 170 |
+
def __processPlaceholders(self, data, parent, isText=True):
|
| 171 |
+
"""
|
| 172 |
+
Process string with placeholders and generate ElementTree tree.
|
| 173 |
+
|
| 174 |
+
Keyword arguments:
|
| 175 |
+
|
| 176 |
+
* data: string with placeholders instead of ElementTree elements.
|
| 177 |
+
* parent: Element, which contains processing inline data
|
| 178 |
+
|
| 179 |
+
Returns: list with ElementTree elements with applied inline patterns.
|
| 180 |
+
|
| 181 |
+
"""
|
| 182 |
+
def linkText(text):
|
| 183 |
+
if text:
|
| 184 |
+
if result:
|
| 185 |
+
if result[-1][0].tail:
|
| 186 |
+
result[-1][0].tail += text
|
| 187 |
+
else:
|
| 188 |
+
result[-1][0].tail = text
|
| 189 |
+
elif not isText:
|
| 190 |
+
if parent.tail:
|
| 191 |
+
parent.tail += text
|
| 192 |
+
else:
|
| 193 |
+
parent.tail = text
|
| 194 |
+
else:
|
| 195 |
+
if parent.text:
|
| 196 |
+
parent.text += text
|
| 197 |
+
else:
|
| 198 |
+
parent.text = text
|
| 199 |
+
result = []
|
| 200 |
+
strartIndex = 0
|
| 201 |
+
while data:
|
| 202 |
+
index = data.find(self.__placeholder_prefix, strartIndex)
|
| 203 |
+
if index != -1:
|
| 204 |
+
id, phEndIndex = self.__findPlaceholder(data, index)
|
| 205 |
+
|
| 206 |
+
if id in self.stashed_nodes:
|
| 207 |
+
node = self.stashed_nodes.get(id)
|
| 208 |
+
|
| 209 |
+
if index > 0:
|
| 210 |
+
text = data[strartIndex:index]
|
| 211 |
+
linkText(text)
|
| 212 |
+
|
| 213 |
+
if not isString(node): # it's Element
|
| 214 |
+
for child in [node] + list(node):
|
| 215 |
+
if child.tail:
|
| 216 |
+
if child.tail.strip():
|
| 217 |
+
self.__processElementText(
|
| 218 |
+
node, child, False
|
| 219 |
+
)
|
| 220 |
+
if child.text:
|
| 221 |
+
if child.text.strip():
|
| 222 |
+
self.__processElementText(child, child)
|
| 223 |
+
else: # it's just a string
|
| 224 |
+
linkText(node)
|
| 225 |
+
strartIndex = phEndIndex
|
| 226 |
+
continue
|
| 227 |
+
|
| 228 |
+
strartIndex = phEndIndex
|
| 229 |
+
result.append((node, self.ancestors[:]))
|
| 230 |
+
|
| 231 |
+
else: # wrong placeholder
|
| 232 |
+
end = index + len(self.__placeholder_prefix)
|
| 233 |
+
linkText(data[strartIndex:end])
|
| 234 |
+
strartIndex = end
|
| 235 |
+
else:
|
| 236 |
+
text = data[strartIndex:]
|
| 237 |
+
if isinstance(data, util.AtomicString):
|
| 238 |
+
# We don't want to loose the AtomicString
|
| 239 |
+
text = util.AtomicString(text)
|
| 240 |
+
linkText(text)
|
| 241 |
+
data = ""
|
| 242 |
+
|
| 243 |
+
return result
|
| 244 |
+
|
| 245 |
+
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
|
| 246 |
+
"""
|
| 247 |
+
Check if the line fits the pattern, create the necessary
|
| 248 |
+
elements, add it to stashed_nodes.
|
| 249 |
+
|
| 250 |
+
Keyword arguments:
|
| 251 |
+
|
| 252 |
+
* data: the text to be processed
|
| 253 |
+
* pattern: the pattern to be checked
|
| 254 |
+
* patternIndex: index of current pattern
|
| 255 |
+
* startIndex: string index, from which we start searching
|
| 256 |
+
|
| 257 |
+
Returns: String with placeholders instead of ElementTree elements.
|
| 258 |
+
|
| 259 |
+
"""
|
| 260 |
+
new_style = isinstance(pattern, inlinepatterns.InlineProcessor)
|
| 261 |
+
|
| 262 |
+
for exclude in pattern.ANCESTOR_EXCLUDES:
|
| 263 |
+
if exclude.lower() in self.ancestors:
|
| 264 |
+
return data, False, 0
|
| 265 |
+
|
| 266 |
+
if new_style:
|
| 267 |
+
match = None
|
| 268 |
+
# Since handleMatch may reject our first match,
|
| 269 |
+
# we iterate over the buffer looking for matches
|
| 270 |
+
# until we can't find any more.
|
| 271 |
+
for match in pattern.getCompiledRegExp().finditer(data, startIndex):
|
| 272 |
+
node, start, end = pattern.handleMatch(match, data)
|
| 273 |
+
if start is None or end is None:
|
| 274 |
+
startIndex += match.end(0)
|
| 275 |
+
match = None
|
| 276 |
+
continue
|
| 277 |
+
break
|
| 278 |
+
else: # pragma: no cover
|
| 279 |
+
match = pattern.getCompiledRegExp().match(data[startIndex:])
|
| 280 |
+
leftData = data[:startIndex]
|
| 281 |
+
|
| 282 |
+
if not match:
|
| 283 |
+
return data, False, 0
|
| 284 |
+
|
| 285 |
+
if not new_style: # pragma: no cover
|
| 286 |
+
node = pattern.handleMatch(match)
|
| 287 |
+
start = match.start(0)
|
| 288 |
+
end = match.end(0)
|
| 289 |
+
|
| 290 |
+
if node is None:
|
| 291 |
+
return data, True, end
|
| 292 |
+
|
| 293 |
+
if not isString(node):
|
| 294 |
+
if not isinstance(node.text, util.AtomicString):
|
| 295 |
+
# We need to process current node too
|
| 296 |
+
for child in [node] + list(node):
|
| 297 |
+
if not isString(node):
|
| 298 |
+
if child.text:
|
| 299 |
+
self.ancestors.append(child.tag.lower())
|
| 300 |
+
child.text = self.__handleInline(
|
| 301 |
+
child.text, patternIndex + 1
|
| 302 |
+
)
|
| 303 |
+
self.ancestors.pop()
|
| 304 |
+
if child.tail:
|
| 305 |
+
child.tail = self.__handleInline(
|
| 306 |
+
child.tail, patternIndex
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
placeholder = self.__stashNode(node, pattern.type())
|
| 310 |
+
|
| 311 |
+
if new_style:
|
| 312 |
+
return "{}{}{}".format(data[:start],
|
| 313 |
+
placeholder, data[end:]), True, 0
|
| 314 |
+
else: # pragma: no cover
|
| 315 |
+
return "{}{}{}{}".format(leftData,
|
| 316 |
+
match.group(1),
|
| 317 |
+
placeholder, match.groups()[-1]), True, 0
|
| 318 |
+
|
| 319 |
+
def __build_ancestors(self, parent, parents):
|
| 320 |
+
"""Build the ancestor list."""
|
| 321 |
+
ancestors = []
|
| 322 |
+
while parent is not None:
|
| 323 |
+
if parent is not None:
|
| 324 |
+
ancestors.append(parent.tag.lower())
|
| 325 |
+
parent = self.parent_map.get(parent)
|
| 326 |
+
ancestors.reverse()
|
| 327 |
+
parents.extend(ancestors)
|
| 328 |
+
|
| 329 |
+
def run(self, tree, ancestors=None):
|
| 330 |
+
"""Apply inline patterns to a parsed Markdown tree.
|
| 331 |
+
|
| 332 |
+
Iterate over ElementTree, find elements with inline tag, apply inline
|
| 333 |
+
patterns and append newly created Elements to tree. If you don't
|
| 334 |
+
want to process your data with inline patterns, instead of normal
|
| 335 |
+
string, use subclass AtomicString:
|
| 336 |
+
|
| 337 |
+
node.text = markdown.AtomicString("This will not be processed.")
|
| 338 |
+
|
| 339 |
+
Arguments:
|
| 340 |
+
|
| 341 |
+
* tree: ElementTree object, representing Markdown tree.
|
| 342 |
+
* ancestors: List of parent tag names that precede the tree node (if needed).
|
| 343 |
+
|
| 344 |
+
Returns: ElementTree object with applied inline patterns.
|
| 345 |
+
|
| 346 |
+
"""
|
| 347 |
+
self.stashed_nodes = {}
|
| 348 |
+
|
| 349 |
+
# Ensure a valid parent list, but copy passed in lists
|
| 350 |
+
# to ensure we don't have the user accidentally change it on us.
|
| 351 |
+
tree_parents = [] if ancestors is None else ancestors[:]
|
| 352 |
+
|
| 353 |
+
self.parent_map = {c: p for p in tree.iter() for c in p}
|
| 354 |
+
stack = [(tree, tree_parents)]
|
| 355 |
+
|
| 356 |
+
while stack:
|
| 357 |
+
currElement, parents = stack.pop()
|
| 358 |
+
|
| 359 |
+
self.ancestors = parents
|
| 360 |
+
self.__build_ancestors(currElement, self.ancestors)
|
| 361 |
+
|
| 362 |
+
insertQueue = []
|
| 363 |
+
for child in currElement:
|
| 364 |
+
if child.text and not isinstance(
|
| 365 |
+
child.text, util.AtomicString
|
| 366 |
+
):
|
| 367 |
+
self.ancestors.append(child.tag.lower())
|
| 368 |
+
text = child.text
|
| 369 |
+
child.text = None
|
| 370 |
+
lst = self.__processPlaceholders(
|
| 371 |
+
self.__handleInline(text), child
|
| 372 |
+
)
|
| 373 |
+
for item in lst:
|
| 374 |
+
self.parent_map[item[0]] = child
|
| 375 |
+
stack += lst
|
| 376 |
+
insertQueue.append((child, lst))
|
| 377 |
+
self.ancestors.pop()
|
| 378 |
+
if child.tail:
|
| 379 |
+
tail = self.__handleInline(child.tail)
|
| 380 |
+
dumby = etree.Element('d')
|
| 381 |
+
child.tail = None
|
| 382 |
+
tailResult = self.__processPlaceholders(tail, dumby, False)
|
| 383 |
+
if dumby.tail:
|
| 384 |
+
child.tail = dumby.tail
|
| 385 |
+
pos = list(currElement).index(child) + 1
|
| 386 |
+
tailResult.reverse()
|
| 387 |
+
for newChild in tailResult:
|
| 388 |
+
self.parent_map[newChild[0]] = currElement
|
| 389 |
+
currElement.insert(pos, newChild[0])
|
| 390 |
+
if len(child):
|
| 391 |
+
self.parent_map[child] = currElement
|
| 392 |
+
stack.append((child, self.ancestors[:]))
|
| 393 |
+
|
| 394 |
+
for element, lst in insertQueue:
|
| 395 |
+
for i, obj in enumerate(lst):
|
| 396 |
+
newChild = obj[0]
|
| 397 |
+
element.insert(i, newChild)
|
| 398 |
+
return tree
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
class PrettifyTreeprocessor(Treeprocessor):
|
| 402 |
+
""" Add linebreaks to the html document. """
|
| 403 |
+
|
| 404 |
+
def _prettifyETree(self, elem):
|
| 405 |
+
""" Recursively add linebreaks to ElementTree children. """
|
| 406 |
+
|
| 407 |
+
i = "\n"
|
| 408 |
+
if self.md.is_block_level(elem.tag) and elem.tag not in ['code', 'pre']:
|
| 409 |
+
if (not elem.text or not elem.text.strip()) \
|
| 410 |
+
and len(elem) and self.md.is_block_level(elem[0].tag):
|
| 411 |
+
elem.text = i
|
| 412 |
+
for e in elem:
|
| 413 |
+
if self.md.is_block_level(e.tag):
|
| 414 |
+
self._prettifyETree(e)
|
| 415 |
+
if not elem.tail or not elem.tail.strip():
|
| 416 |
+
elem.tail = i
|
| 417 |
+
if not elem.tail or not elem.tail.strip():
|
| 418 |
+
elem.tail = i
|
| 419 |
+
|
| 420 |
+
def run(self, root):
|
| 421 |
+
""" Add linebreaks to ElementTree root object. """
|
| 422 |
+
|
| 423 |
+
self._prettifyETree(root)
|
| 424 |
+
# Do <br />'s separately as they are often in the middle of
|
| 425 |
+
# inline content and missed by _prettifyETree.
|
| 426 |
+
brs = root.iter('br')
|
| 427 |
+
for br in brs:
|
| 428 |
+
if not br.tail or not br.tail.strip():
|
| 429 |
+
br.tail = '\n'
|
| 430 |
+
else:
|
| 431 |
+
br.tail = '\n%s' % br.tail
|
| 432 |
+
# Clean up extra empty lines at end of code blocks.
|
| 433 |
+
pres = root.iter('pre')
|
| 434 |
+
for pre in pres:
|
| 435 |
+
if len(pre) and pre[0].tag == 'code':
|
| 436 |
+
pre[0].text = util.AtomicString(pre[0].text.rstrip() + '\n')
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nisext/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# init for sext package
|
| 2 |
+
""" Setuptools extensions
|
| 3 |
+
|
| 4 |
+
nibabel uses these routines, and houses them, and installs them. nipy-proper
|
| 5 |
+
and dipy use them.
|
| 6 |
+
"""
|
| 7 |
+
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nisext/py3builder.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" distutils utilities for porting to python 3 within 2-compatible tree """
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
try:
|
| 5 |
+
from distutils.command.build_py import build_py_2to3
|
| 6 |
+
except ImportError:
|
| 7 |
+
# 2.x - no parsing of code
|
| 8 |
+
from distutils.command.build_py import build_py
|
| 9 |
+
else: # Python 3
|
| 10 |
+
# Command to also apply 2to3 to doctests
|
| 11 |
+
from distutils import log
|
| 12 |
+
class build_py(build_py_2to3):
|
| 13 |
+
def run_2to3(self, files):
|
| 14 |
+
# Add doctest parsing; this stuff copied from distutils.utils in
|
| 15 |
+
# python 3.2 source
|
| 16 |
+
if not files:
|
| 17 |
+
return
|
| 18 |
+
fixer_names, options, explicit = (self.fixer_names,
|
| 19 |
+
self.options,
|
| 20 |
+
self.explicit)
|
| 21 |
+
# Make this class local, to delay import of 2to3
|
| 22 |
+
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
|
| 23 |
+
class DistutilsRefactoringTool(RefactoringTool):
|
| 24 |
+
def log_error(self, msg, *args, **kw):
|
| 25 |
+
log.error(msg, *args)
|
| 26 |
+
|
| 27 |
+
def log_message(self, msg, *args):
|
| 28 |
+
log.info(msg, *args)
|
| 29 |
+
|
| 30 |
+
def log_debug(self, msg, *args):
|
| 31 |
+
log.debug(msg, *args)
|
| 32 |
+
|
| 33 |
+
if fixer_names is None:
|
| 34 |
+
fixer_names = get_fixers_from_package('lib2to3.fixes')
|
| 35 |
+
r = DistutilsRefactoringTool(fixer_names, options=options)
|
| 36 |
+
r.refactor(files, write=True)
|
| 37 |
+
# Then doctests
|
| 38 |
+
r.refactor(files, write=True, doctests_only=True)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nisext/sexts.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Distutils / setuptools helpers """
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from os.path import join as pjoin, split as psplit, splitext
|
| 5 |
+
|
| 6 |
+
from configparser import ConfigParser
|
| 7 |
+
|
| 8 |
+
from distutils.version import LooseVersion
|
| 9 |
+
from distutils.command.build_py import build_py
|
| 10 |
+
from distutils.command.install_scripts import install_scripts
|
| 11 |
+
|
| 12 |
+
from distutils import log
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_comrec_build(pkg_dir, build_cmd=build_py):
|
| 16 |
+
""" Return extended build command class for recording commit
|
| 17 |
+
|
| 18 |
+
The extended command tries to run git to find the current commit, getting
|
| 19 |
+
the empty string if it fails. It then writes the commit hash into a file
|
| 20 |
+
in the `pkg_dir` path, named ``COMMIT_INFO.txt``.
|
| 21 |
+
|
| 22 |
+
In due course this information can be used by the package after it is
|
| 23 |
+
installed, to tell you what commit it was installed from if known.
|
| 24 |
+
|
| 25 |
+
To make use of this system, you need a package with a COMMIT_INFO.txt file -
|
| 26 |
+
e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this::
|
| 27 |
+
|
| 28 |
+
# This is an ini file that may contain information about the code state
|
| 29 |
+
[commit hash]
|
| 30 |
+
# The line below may contain a valid hash if it has been substituted during 'git archive'
|
| 31 |
+
archive_subst_hash=$Format:%h$
|
| 32 |
+
# This line may be modified by the install process
|
| 33 |
+
install_hash=
|
| 34 |
+
|
| 35 |
+
The COMMIT_INFO file above is also designed to be used with git substitution
|
| 36 |
+
- so you probably also want a ``.gitattributes`` file in the root directory
|
| 37 |
+
of your working tree that contains something like this::
|
| 38 |
+
|
| 39 |
+
myproject/COMMIT_INFO.txt export-subst
|
| 40 |
+
|
| 41 |
+
That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git
|
| 42 |
+
archive`` - useful in case someone makes such an archive - for example with
|
| 43 |
+
via the github 'download source' button.
|
| 44 |
+
|
| 45 |
+
Although all the above will work as is, you might consider having something
|
| 46 |
+
like a ``get_info()`` function in your package to display the commit
|
| 47 |
+
information at the terminal. See the ``pkg_info.py`` module in the nipy
|
| 48 |
+
package for an example.
|
| 49 |
+
"""
|
| 50 |
+
class MyBuildPy(build_cmd):
|
| 51 |
+
""" Subclass to write commit data into installation tree """
|
| 52 |
+
def run(self):
|
| 53 |
+
build_cmd.run(self)
|
| 54 |
+
import subprocess
|
| 55 |
+
proc = subprocess.Popen('git rev-parse --short HEAD',
|
| 56 |
+
stdout=subprocess.PIPE,
|
| 57 |
+
stderr=subprocess.PIPE,
|
| 58 |
+
shell=True)
|
| 59 |
+
repo_commit, _ = proc.communicate()
|
| 60 |
+
# Fix for python 3
|
| 61 |
+
repo_commit = str(repo_commit)
|
| 62 |
+
# We write the installation commit even if it's empty
|
| 63 |
+
cfg_parser = ConfigParser()
|
| 64 |
+
cfg_parser.read(pjoin(pkg_dir, 'COMMIT_INFO.txt'))
|
| 65 |
+
cfg_parser.set('commit hash', 'install_hash', repo_commit)
|
| 66 |
+
out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt')
|
| 67 |
+
cfg_parser.write(open(out_pth, 'wt'))
|
| 68 |
+
return MyBuildPy
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _add_append_key(in_dict, key, value):
|
| 72 |
+
""" Helper for appending dependencies to setuptools args """
|
| 73 |
+
# If in_dict[key] does not exist, create it
|
| 74 |
+
# If in_dict[key] is a string, make it len 1 list of strings
|
| 75 |
+
# Append value to in_dict[key] list
|
| 76 |
+
if key not in in_dict:
|
| 77 |
+
in_dict[key] = []
|
| 78 |
+
elif isinstance(in_dict[key], str):
|
| 79 |
+
in_dict[key] = [in_dict[key]]
|
| 80 |
+
in_dict[key].append(value)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# Dependency checks
|
| 84 |
+
def package_check(pkg_name, version=None,
|
| 85 |
+
optional=False,
|
| 86 |
+
checker=LooseVersion,
|
| 87 |
+
version_getter=None,
|
| 88 |
+
messages=None,
|
| 89 |
+
setuptools_args=None
|
| 90 |
+
):
|
| 91 |
+
""" Check if package `pkg_name` is present and has good enough version
|
| 92 |
+
|
| 93 |
+
Has two modes of operation. If `setuptools_args` is None (the default),
|
| 94 |
+
raise an error for missing non-optional dependencies and log warnings for
|
| 95 |
+
missing optional dependencies. If `setuptools_args` is a dict, then fill
|
| 96 |
+
``install_requires`` key value with any missing non-optional dependencies,
|
| 97 |
+
and the ``extras_requires`` key value with optional dependencies.
|
| 98 |
+
|
| 99 |
+
This allows us to work with and without setuptools. It also means we can
|
| 100 |
+
check for packages that have not been installed with setuptools to avoid
|
| 101 |
+
installing them again.
|
| 102 |
+
|
| 103 |
+
Parameters
|
| 104 |
+
----------
|
| 105 |
+
pkg_name : str
|
| 106 |
+
name of package as imported into python
|
| 107 |
+
version : {None, str}, optional
|
| 108 |
+
minimum version of the package that we require. If None, we don't
|
| 109 |
+
check the version. Default is None
|
| 110 |
+
optional : bool or str, optional
|
| 111 |
+
If ``bool(optional)`` is False, raise error for absent package or wrong
|
| 112 |
+
version; otherwise warn. If ``setuptools_args`` is not None, and
|
| 113 |
+
``bool(optional)`` is not False, then `optional` should be a string
|
| 114 |
+
giving the feature name for the ``extras_require`` argument to setup.
|
| 115 |
+
checker : callable, optional
|
| 116 |
+
callable with which to return comparable thing from version
|
| 117 |
+
string. Default is ``distutils.version.LooseVersion``
|
| 118 |
+
version_getter : {None, callable}:
|
| 119 |
+
Callable that takes `pkg_name` as argument, and returns the
|
| 120 |
+
package version string - as in::
|
| 121 |
+
|
| 122 |
+
``version = version_getter(pkg_name)``
|
| 123 |
+
|
| 124 |
+
If None, equivalent to::
|
| 125 |
+
|
| 126 |
+
mod = __import__(pkg_name); version = mod.__version__``
|
| 127 |
+
messages : None or dict, optional
|
| 128 |
+
dictionary giving output messages
|
| 129 |
+
setuptools_args : None or dict
|
| 130 |
+
If None, raise errors / warnings for missing non-optional / optional
|
| 131 |
+
dependencies. If dict fill key values ``install_requires`` and
|
| 132 |
+
``extras_require`` for non-optional and optional dependencies.
|
| 133 |
+
"""
|
| 134 |
+
setuptools_mode = not setuptools_args is None
|
| 135 |
+
optional_tf = bool(optional)
|
| 136 |
+
if version_getter is None:
|
| 137 |
+
def version_getter(pkg_name):
|
| 138 |
+
mod = __import__(pkg_name)
|
| 139 |
+
return mod.__version__
|
| 140 |
+
if messages is None:
|
| 141 |
+
messages = {}
|
| 142 |
+
msgs = {
|
| 143 |
+
'missing': 'Cannot import package "%s" - is it installed?',
|
| 144 |
+
'missing opt': 'Missing optional package "%s"',
|
| 145 |
+
'opt suffix': '; you may get run-time errors',
|
| 146 |
+
'version too old': 'You have version %s of package "%s"'
|
| 147 |
+
' but we need version >= %s', }
|
| 148 |
+
msgs.update(messages)
|
| 149 |
+
status, have_version = _package_status(pkg_name,
|
| 150 |
+
version,
|
| 151 |
+
version_getter,
|
| 152 |
+
checker)
|
| 153 |
+
if status == 'satisfied':
|
| 154 |
+
return
|
| 155 |
+
if not setuptools_mode:
|
| 156 |
+
if status == 'missing':
|
| 157 |
+
if not optional_tf:
|
| 158 |
+
raise RuntimeError(msgs['missing'] % pkg_name)
|
| 159 |
+
log.warn(msgs['missing opt'] % pkg_name +
|
| 160 |
+
msgs['opt suffix'])
|
| 161 |
+
return
|
| 162 |
+
elif status == 'no-version':
|
| 163 |
+
raise RuntimeError(f'Cannot find version for {pkg_name}')
|
| 164 |
+
assert status == 'low-version'
|
| 165 |
+
if not optional_tf:
|
| 166 |
+
raise RuntimeError(msgs['version too old'] % (have_version,
|
| 167 |
+
pkg_name,
|
| 168 |
+
version))
|
| 169 |
+
log.warn(msgs['version too old'] % (have_version,
|
| 170 |
+
pkg_name,
|
| 171 |
+
version)
|
| 172 |
+
+ msgs['opt suffix'])
|
| 173 |
+
return
|
| 174 |
+
# setuptools mode
|
| 175 |
+
if optional_tf and not isinstance(optional, str):
|
| 176 |
+
raise RuntimeError('Not-False optional arg should be string')
|
| 177 |
+
dependency = pkg_name
|
| 178 |
+
if version:
|
| 179 |
+
dependency += '>=' + version
|
| 180 |
+
if optional_tf:
|
| 181 |
+
if not 'extras_require' in setuptools_args:
|
| 182 |
+
setuptools_args['extras_require'] = {}
|
| 183 |
+
_add_append_key(setuptools_args['extras_require'],
|
| 184 |
+
optional,
|
| 185 |
+
dependency)
|
| 186 |
+
return
|
| 187 |
+
_add_append_key(setuptools_args, 'install_requires', dependency)
|
| 188 |
+
return
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _package_status(pkg_name, version, version_getter, checker):
|
| 192 |
+
try:
|
| 193 |
+
__import__(pkg_name)
|
| 194 |
+
except ImportError:
|
| 195 |
+
return 'missing', None
|
| 196 |
+
if not version:
|
| 197 |
+
return 'satisfied', None
|
| 198 |
+
try:
|
| 199 |
+
have_version = version_getter(pkg_name)
|
| 200 |
+
except AttributeError:
|
| 201 |
+
return 'no-version', None
|
| 202 |
+
if checker(have_version) < checker(version):
|
| 203 |
+
return 'low-version', have_version
|
| 204 |
+
return 'satisfied', have_version
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
BAT_TEMPLATE = \
|
| 208 |
+
r"""@echo off
|
| 209 |
+
REM wrapper to use shebang first line of {FNAME}
|
| 210 |
+
set mypath=%~dp0
|
| 211 |
+
set pyscript="%mypath%{FNAME}"
|
| 212 |
+
set /p line1=<%pyscript%
|
| 213 |
+
if "%line1:~0,2%" == "#!" (goto :goodstart)
|
| 214 |
+
echo First line of %pyscript% does not start with "#!"
|
| 215 |
+
exit /b 1
|
| 216 |
+
:goodstart
|
| 217 |
+
set py_exe=%line1:~2%
|
| 218 |
+
call "%py_exe%" %pyscript% %*
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
class install_scripts_bat(install_scripts):
|
| 222 |
+
""" Make scripts executable on Windows
|
| 223 |
+
|
| 224 |
+
Scripts are bare file names without extension on Unix, fitting (for example)
|
| 225 |
+
Debian rules. They identify as python scripts with the usual ``#!`` first
|
| 226 |
+
line. Unix recognizes and uses this first "shebang" line, but Windows does
|
| 227 |
+
not. So, on Windows only we add a ``.bat`` wrapper of name
|
| 228 |
+
``bare_script_name.bat`` to call ``bare_script_name`` using the python
|
| 229 |
+
interpreter from the #! first line of the script.
|
| 230 |
+
|
| 231 |
+
Notes
|
| 232 |
+
-----
|
| 233 |
+
See discussion at
|
| 234 |
+
https://matthew-brett.github.io/pydagogue/installing_scripts.html and
|
| 235 |
+
example at git://github.com/matthew-brett/myscripter.git for more
|
| 236 |
+
background.
|
| 237 |
+
"""
|
| 238 |
+
def run(self):
|
| 239 |
+
install_scripts.run(self)
|
| 240 |
+
if not os.name == "nt":
|
| 241 |
+
return
|
| 242 |
+
for filepath in self.get_outputs():
|
| 243 |
+
# If we can find an executable name in the #! top line of the script
|
| 244 |
+
# file, make .bat wrapper for script.
|
| 245 |
+
with open(filepath, 'rt') as fobj:
|
| 246 |
+
first_line = fobj.readline()
|
| 247 |
+
if not (first_line.startswith('#!') and
|
| 248 |
+
'python' in first_line.lower()):
|
| 249 |
+
log.info("No #!python executable found, skipping .bat "
|
| 250 |
+
"wrapper")
|
| 251 |
+
continue
|
| 252 |
+
pth, fname = psplit(filepath)
|
| 253 |
+
froot, ext = splitext(fname)
|
| 254 |
+
bat_file = pjoin(pth, froot + '.bat')
|
| 255 |
+
bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)
|
| 256 |
+
log.info(f"Making {bat_file} wrapper for {filepath}")
|
| 257 |
+
if self.dry_run:
|
| 258 |
+
continue
|
| 259 |
+
with open(bat_file, 'wt') as fobj:
|
| 260 |
+
fobj.write(bat_contents)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class Bunch(object):
|
| 264 |
+
def __init__(self, vars):
|
| 265 |
+
for key, name in vars.items():
|
| 266 |
+
if key.startswith('__'):
|
| 267 |
+
continue
|
| 268 |
+
self.__dict__[key] = name
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def read_vars_from(ver_file):
|
| 272 |
+
""" Read variables from Python text file
|
| 273 |
+
|
| 274 |
+
Parameters
|
| 275 |
+
----------
|
| 276 |
+
ver_file : str
|
| 277 |
+
Filename of file to read
|
| 278 |
+
|
| 279 |
+
Returns
|
| 280 |
+
-------
|
| 281 |
+
info_vars : Bunch instance
|
| 282 |
+
Bunch object where variables read from `ver_file` appear as
|
| 283 |
+
attributes
|
| 284 |
+
"""
|
| 285 |
+
# Use exec for compabibility with Python 3
|
| 286 |
+
ns = {}
|
| 287 |
+
with open(ver_file, 'rt') as fobj:
|
| 288 |
+
exec(fobj.read(), ns)
|
| 289 |
+
return Bunch(ns)
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nisext/testers.py
ADDED
|
@@ -0,0 +1,533 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Test package information in various install settings
|
| 2 |
+
|
| 3 |
+
The routines here install the package from source directories, zips or eggs, and
|
| 4 |
+
check these installations by running tests, checking version information,
|
| 5 |
+
looking for files that were not copied over.
|
| 6 |
+
|
| 7 |
+
The typical use for this module is as a Makefile target. For example, here are
|
| 8 |
+
the Makefile targets from nibabel::
|
| 9 |
+
|
| 10 |
+
# Check for files not installed
|
| 11 |
+
check-files:
|
| 12 |
+
$(PYTHON) -c 'from nisext.testers import check_files; check_files("nibabel")'
|
| 13 |
+
|
| 14 |
+
# Print out info for possible install methods
|
| 15 |
+
check-version-info:
|
| 16 |
+
$(PYTHON) -c 'from nisext.testers import info_from_here; info_from_here("nibabel")'
|
| 17 |
+
|
| 18 |
+
# Run tests from installed code
|
| 19 |
+
installed-tests:
|
| 20 |
+
$(PYTHON) -c 'from nisext.testers import tests_installed; tests_installed("nibabel")'
|
| 21 |
+
|
| 22 |
+
# Run tests from installed code
|
| 23 |
+
sdist-tests:
|
| 24 |
+
$(PYTHON) -c 'from nisext.testers import sdist_tests; sdist_tests("nibabel")'
|
| 25 |
+
|
| 26 |
+
# Run tests from binary egg
|
| 27 |
+
bdist-egg-tests:
|
| 28 |
+
$(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nibabel")'
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
import os
|
| 34 |
+
import sys
|
| 35 |
+
from os.path import join as pjoin, abspath
|
| 36 |
+
from glob import glob
|
| 37 |
+
import shutil
|
| 38 |
+
import tempfile
|
| 39 |
+
import zipfile
|
| 40 |
+
import re
|
| 41 |
+
from subprocess import Popen, PIPE
|
| 42 |
+
|
| 43 |
+
NEEDS_SHELL = os.name != 'nt'
|
| 44 |
+
PYTHON=sys.executable
|
| 45 |
+
HAVE_PUTENV = hasattr(os, 'putenv')
|
| 46 |
+
|
| 47 |
+
PY_LIB_SDIR = 'pylib'
|
| 48 |
+
|
| 49 |
+
def back_tick(cmd, ret_err=False, as_str=True):
|
| 50 |
+
""" Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
|
| 51 |
+
|
| 52 |
+
Roughly equivalent to ``check_output`` in Python 2.7
|
| 53 |
+
|
| 54 |
+
Parameters
|
| 55 |
+
----------
|
| 56 |
+
cmd : str
|
| 57 |
+
command to execute
|
| 58 |
+
ret_err : bool, optional
|
| 59 |
+
If True, return stderr in addition to stdout. If False, just return
|
| 60 |
+
stdout
|
| 61 |
+
as_str : bool, optional
|
| 62 |
+
Whether to decode outputs to unicode string on exit.
|
| 63 |
+
|
| 64 |
+
Returns
|
| 65 |
+
-------
|
| 66 |
+
out : str or tuple
|
| 67 |
+
If `ret_err` is False, return stripped string containing stdout from
|
| 68 |
+
`cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where
|
| 69 |
+
``stdout`` is the stripped stdout, and ``stderr`` is the stripped
|
| 70 |
+
stderr.
|
| 71 |
+
|
| 72 |
+
Raises
|
| 73 |
+
------
|
| 74 |
+
RuntimeError
|
| 75 |
+
if command returns non-zero exit code.
|
| 76 |
+
"""
|
| 77 |
+
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=NEEDS_SHELL)
|
| 78 |
+
out, err = proc.communicate()
|
| 79 |
+
retcode = proc.returncode
|
| 80 |
+
if retcode is None:
|
| 81 |
+
proc.terminate()
|
| 82 |
+
raise RuntimeError(cmd + ' process did not terminate')
|
| 83 |
+
if retcode != 0:
|
| 84 |
+
raise RuntimeError(cmd + ' process returned code %d' % retcode)
|
| 85 |
+
out = out.strip()
|
| 86 |
+
if as_str:
|
| 87 |
+
out = out.decode('latin-1')
|
| 88 |
+
if not ret_err:
|
| 89 |
+
return out
|
| 90 |
+
err = err.strip()
|
| 91 |
+
if as_str:
|
| 92 |
+
err = err.decode('latin-1')
|
| 93 |
+
return out, err
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True):
|
| 97 |
+
""" Run command in own process in anonymous path
|
| 98 |
+
|
| 99 |
+
Parameters
|
| 100 |
+
----------
|
| 101 |
+
mod_name : str
|
| 102 |
+
Name of module to import - e.g. 'nibabel'
|
| 103 |
+
pkg_path : str
|
| 104 |
+
directory containing `mod_name` package. Typically that will be the
|
| 105 |
+
directory containing the e.g. 'nibabel' directory.
|
| 106 |
+
cmd : str
|
| 107 |
+
Python command to execute
|
| 108 |
+
script_dir : None or str, optional
|
| 109 |
+
script directory to prepend to PATH
|
| 110 |
+
print_location : bool, optional
|
| 111 |
+
Whether to print the location of the imported `mod_name`
|
| 112 |
+
|
| 113 |
+
Returns
|
| 114 |
+
-------
|
| 115 |
+
stdout : str
|
| 116 |
+
stdout as str
|
| 117 |
+
stderr : str
|
| 118 |
+
stderr as str
|
| 119 |
+
"""
|
| 120 |
+
if script_dir is None:
|
| 121 |
+
paths_add = ''
|
| 122 |
+
else:
|
| 123 |
+
if not HAVE_PUTENV:
|
| 124 |
+
raise RuntimeError('We cannot set environment variables')
|
| 125 |
+
# Need to add the python path for the scripts to pick up our package in
|
| 126 |
+
# their environment, because the scripts will get called via the shell
|
| 127 |
+
# (via `cmd`). Consider that PYTHONPATH may not be set. Because the
|
| 128 |
+
# command might run scripts via the shell, prepend script_dir to the
|
| 129 |
+
# system path also.
|
| 130 |
+
paths_add = \
|
| 131 |
+
r"""
|
| 132 |
+
os.environ['PATH'] = r'"{script_dir}"' + os.path.pathsep + os.environ['PATH']
|
| 133 |
+
PYTHONPATH = os.environ.get('PYTHONPATH')
|
| 134 |
+
if PYTHONPATH is None:
|
| 135 |
+
os.environ['PYTHONPATH'] = r'"{pkg_path}"'
|
| 136 |
+
else:
|
| 137 |
+
os.environ['PYTHONPATH'] = r'"{pkg_path}"' + os.path.pathsep + PYTHONPATH
|
| 138 |
+
""".format(**locals())
|
| 139 |
+
if print_location:
|
| 140 |
+
p_loc = f'print({mod_name}.__file__);'
|
| 141 |
+
else:
|
| 142 |
+
p_loc = ''
|
| 143 |
+
cwd = os.getcwd()
|
| 144 |
+
tmpdir = tempfile.mkdtemp()
|
| 145 |
+
try:
|
| 146 |
+
os.chdir(tmpdir)
|
| 147 |
+
with open('script.py', 'wt') as fobj:
|
| 148 |
+
fobj.write(
|
| 149 |
+
r"""
|
| 150 |
+
import os
|
| 151 |
+
import sys
|
| 152 |
+
sys.path.insert(0, r"{pkg_path}")
|
| 153 |
+
{paths_add}
|
| 154 |
+
import {mod_name}
|
| 155 |
+
{p_loc}
|
| 156 |
+
{cmd}""".format(**locals()))
|
| 157 |
+
res = back_tick(f'{PYTHON} script.py', ret_err=True)
|
| 158 |
+
finally:
|
| 159 |
+
os.chdir(cwd)
|
| 160 |
+
shutil.rmtree(tmpdir)
|
| 161 |
+
return res
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def zip_extract_all(fname, path=None):
|
| 165 |
+
""" Extract all members from zipfile
|
| 166 |
+
|
| 167 |
+
Deals with situation where the directory is stored in the zipfile as a name,
|
| 168 |
+
as well as files that have to go into this directory.
|
| 169 |
+
"""
|
| 170 |
+
zf = zipfile.ZipFile(fname)
|
| 171 |
+
members = zf.namelist()
|
| 172 |
+
# Remove members that are just bare directories
|
| 173 |
+
members = [m for m in members if not m.endswith('/')]
|
| 174 |
+
for zipinfo in members:
|
| 175 |
+
zf.extract(zipinfo, path, None)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'):
|
| 179 |
+
""" Install package in `from_dir` to standard location in `to_dir`
|
| 180 |
+
|
| 181 |
+
Parameters
|
| 182 |
+
----------
|
| 183 |
+
from_dir : str
|
| 184 |
+
path containing files to install with ``python setup.py ...``
|
| 185 |
+
to_dir : str
|
| 186 |
+
prefix path to which files will be installed, as in ``python setup.py
|
| 187 |
+
install --prefix=to_dir``
|
| 188 |
+
py_lib_sdir : str, optional
|
| 189 |
+
subdirectory within `to_dir` to which library code will be installed
|
| 190 |
+
bin_sdir : str, optional
|
| 191 |
+
subdirectory within `to_dir` to which scripts will be installed
|
| 192 |
+
"""
|
| 193 |
+
site_pkgs_path = os.path.join(to_dir, py_lib_sdir)
|
| 194 |
+
py_lib_locs = (f' --install-purelib={site_pkgs_path} '
|
| 195 |
+
f'--install-platlib={site_pkgs_path}')
|
| 196 |
+
pwd = os.path.abspath(os.getcwd())
|
| 197 |
+
cmd = f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}'
|
| 198 |
+
try:
|
| 199 |
+
os.chdir(from_dir)
|
| 200 |
+
back_tick(cmd)
|
| 201 |
+
finally:
|
| 202 |
+
os.chdir(pwd)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def install_from_zip(zip_fname, install_path, pkg_finder=None,
|
| 206 |
+
py_lib_sdir=PY_LIB_SDIR,
|
| 207 |
+
script_sdir='bin'):
|
| 208 |
+
""" Install package from zip file `zip_fname`
|
| 209 |
+
|
| 210 |
+
Parameters
|
| 211 |
+
----------
|
| 212 |
+
zip_fname : str
|
| 213 |
+
filename of zip file containing package code
|
| 214 |
+
install_path : str
|
| 215 |
+
output prefix at which to install package
|
| 216 |
+
pkg_finder : None or callable, optional
|
| 217 |
+
If None, assume zip contains ``setup.py`` at the top level. Otherwise,
|
| 218 |
+
find directory containing ``setup.py`` with ``pth =
|
| 219 |
+
pkg_finder(unzip_path)`` where ``unzip_path`` is the path to which we
|
| 220 |
+
have unzipped the zip file contents.
|
| 221 |
+
py_lib_sdir : str, optional
|
| 222 |
+
subdirectory to which to write the library code from the package. Thus
|
| 223 |
+
if package called ``nibabel``, the written code will be in
|
| 224 |
+
``<install_path>/<py_lib_sdir>/nibabel
|
| 225 |
+
script_sdir : str, optional
|
| 226 |
+
subdirectory to which we write the installed scripts. Thus scripts will
|
| 227 |
+
be written to ``<install_path>/<script_sdir>
|
| 228 |
+
"""
|
| 229 |
+
unzip_path = tempfile.mkdtemp()
|
| 230 |
+
try:
|
| 231 |
+
# Zip may unpack module into current directory
|
| 232 |
+
zip_extract_all(zip_fname, unzip_path)
|
| 233 |
+
if pkg_finder is None:
|
| 234 |
+
from_path = unzip_path
|
| 235 |
+
else:
|
| 236 |
+
from_path = pkg_finder(unzip_path)
|
| 237 |
+
install_from_to(from_path, install_path, py_lib_sdir, script_sdir)
|
| 238 |
+
finally:
|
| 239 |
+
shutil.rmtree(unzip_path)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def contexts_print_info(mod_name, repo_path, install_path):
|
| 243 |
+
""" Print result of get_info from different installation routes
|
| 244 |
+
|
| 245 |
+
Runs installation from:
|
| 246 |
+
|
| 247 |
+
* git archive zip file
|
| 248 |
+
* with setup.py install from repository directory
|
| 249 |
+
* just running code from repository directory
|
| 250 |
+
|
| 251 |
+
and prints out result of get_info in each case. There will be many files
|
| 252 |
+
written into `install_path` that you may want to clean up somehow.
|
| 253 |
+
|
| 254 |
+
Parameters
|
| 255 |
+
----------
|
| 256 |
+
mod_name : str
|
| 257 |
+
package name that will be installed, and tested
|
| 258 |
+
repo_path : str
|
| 259 |
+
path to location of git repository
|
| 260 |
+
install_path : str
|
| 261 |
+
path into which to install temporary installations
|
| 262 |
+
"""
|
| 263 |
+
site_pkgs_path = os.path.join(install_path, PY_LIB_SDIR)
|
| 264 |
+
# first test archive
|
| 265 |
+
pwd = os.path.abspath(os.getcwd())
|
| 266 |
+
out_fname = pjoin(install_path, 'test.zip')
|
| 267 |
+
try:
|
| 268 |
+
os.chdir(repo_path)
|
| 269 |
+
back_tick(f'git archive --format zip -o {out_fname} HEAD')
|
| 270 |
+
finally:
|
| 271 |
+
os.chdir(pwd)
|
| 272 |
+
install_from_zip(out_fname, install_path, None)
|
| 273 |
+
cmd_str = f'print({mod_name}.get_info())'
|
| 274 |
+
print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0])
|
| 275 |
+
# now test install into a directory from the repository
|
| 276 |
+
install_from_to(repo_path, install_path, PY_LIB_SDIR)
|
| 277 |
+
print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0])
|
| 278 |
+
# test from development tree
|
| 279 |
+
print(run_mod_cmd(mod_name, repo_path, cmd_str)[0])
|
| 280 |
+
return
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def info_from_here(mod_name):
|
| 284 |
+
""" Run info context checks starting in working directory
|
| 285 |
+
|
| 286 |
+
Runs checks from current working directory, installing temporary
|
| 287 |
+
installations into a new temporary directory
|
| 288 |
+
|
| 289 |
+
Parameters
|
| 290 |
+
----------
|
| 291 |
+
mod_name : str
|
| 292 |
+
package name that will be installed, and tested
|
| 293 |
+
"""
|
| 294 |
+
repo_path = os.path.abspath(os.getcwd())
|
| 295 |
+
install_path = tempfile.mkdtemp()
|
| 296 |
+
try:
|
| 297 |
+
contexts_print_info(mod_name, repo_path, install_path)
|
| 298 |
+
finally:
|
| 299 |
+
shutil.rmtree(install_path)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def tests_installed(mod_name, source_path=None):
|
| 303 |
+
""" Install from `source_path` into temporary directory; run tests
|
| 304 |
+
|
| 305 |
+
Parameters
|
| 306 |
+
----------
|
| 307 |
+
mod_name : str
|
| 308 |
+
name of module - e.g. 'nibabel'
|
| 309 |
+
source_path : None or str
|
| 310 |
+
Path from which to install. If None, defaults to working directory
|
| 311 |
+
"""
|
| 312 |
+
if source_path is None:
|
| 313 |
+
source_path = os.path.abspath(os.getcwd())
|
| 314 |
+
install_path = tempfile.mkdtemp()
|
| 315 |
+
site_pkgs_path = pjoin(install_path, PY_LIB_SDIR)
|
| 316 |
+
scripts_path = pjoin(install_path, 'bin')
|
| 317 |
+
try:
|
| 318 |
+
install_from_to(source_path, install_path, PY_LIB_SDIR, 'bin')
|
| 319 |
+
stdout, stderr = run_mod_cmd(mod_name,
|
| 320 |
+
site_pkgs_path,
|
| 321 |
+
mod_name + '.test()',
|
| 322 |
+
scripts_path)
|
| 323 |
+
finally:
|
| 324 |
+
shutil.rmtree(install_path)
|
| 325 |
+
print(stdout)
|
| 326 |
+
print(stderr)
|
| 327 |
+
|
| 328 |
+
# Tell nose this is not a test
|
| 329 |
+
tests_installed.__test__ = False
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def check_installed_files(repo_mod_path, install_mod_path):
|
| 333 |
+
""" Check files in `repo_mod_path` are installed at `install_mod_path`
|
| 334 |
+
|
| 335 |
+
At the moment, all this does is check that all the ``*.py`` files in
|
| 336 |
+
`repo_mod_path` are installed at `install_mod_path`.
|
| 337 |
+
|
| 338 |
+
Parameters
|
| 339 |
+
----------
|
| 340 |
+
repo_mod_path : str
|
| 341 |
+
repository path containing package files, e.g. <nibabel-repo>/nibabel>
|
| 342 |
+
install_mod_path : str
|
| 343 |
+
path at which package has been installed. This is the path where the
|
| 344 |
+
root package ``__init__.py`` lives.
|
| 345 |
+
|
| 346 |
+
Return
|
| 347 |
+
------
|
| 348 |
+
uninstalled : list
|
| 349 |
+
list of files that should have been installed, but have not been
|
| 350 |
+
installed
|
| 351 |
+
"""
|
| 352 |
+
return missing_from(repo_mod_path, install_mod_path, filter=r"\.py$")
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def missing_from(path0, path1, filter=None):
|
| 356 |
+
""" Return filenames present in `path0` but not in `path1`
|
| 357 |
+
|
| 358 |
+
Parameters
|
| 359 |
+
----------
|
| 360 |
+
path0 : str
|
| 361 |
+
path which contains all files of interest
|
| 362 |
+
path1 : str
|
| 363 |
+
path which should contain all files of interest
|
| 364 |
+
filter : None or str or regexp, optional
|
| 365 |
+
A successful result from ``filter.search(fname)`` means the file is of
|
| 366 |
+
interest. None means all files are of interest
|
| 367 |
+
|
| 368 |
+
Returns
|
| 369 |
+
-------
|
| 370 |
+
path1_missing : list
|
| 371 |
+
list of all files missing from `path1` that are in `path0` at the same
|
| 372 |
+
relative path.
|
| 373 |
+
"""
|
| 374 |
+
if not filter is None:
|
| 375 |
+
filter = re.compile(filter)
|
| 376 |
+
uninstalled = []
|
| 377 |
+
# Walk directory tree to get py files
|
| 378 |
+
for dirpath, dirnames, filenames in os.walk(path0):
|
| 379 |
+
out_dirpath = dirpath.replace(path0, path1)
|
| 380 |
+
for fname in filenames:
|
| 381 |
+
if not filter is None and filter.search(fname) is None:
|
| 382 |
+
continue
|
| 383 |
+
equiv_fname = os.path.join(out_dirpath, fname)
|
| 384 |
+
if not os.path.isfile(equiv_fname):
|
| 385 |
+
uninstalled.append(pjoin(dirpath, fname))
|
| 386 |
+
return uninstalled
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def check_files(mod_name, repo_path=None, scripts_sdir='bin'):
|
| 390 |
+
""" Print library and script files not picked up during install
|
| 391 |
+
"""
|
| 392 |
+
if repo_path is None:
|
| 393 |
+
repo_path = abspath(os.getcwd())
|
| 394 |
+
install_path = tempfile.mkdtemp()
|
| 395 |
+
repo_mod_path = pjoin(repo_path, mod_name)
|
| 396 |
+
installed_mod_path = pjoin(install_path, PY_LIB_SDIR, mod_name)
|
| 397 |
+
repo_bin = pjoin(repo_path, 'bin')
|
| 398 |
+
installed_bin = pjoin(install_path, 'bin')
|
| 399 |
+
try:
|
| 400 |
+
zip_fname = make_dist(repo_path,
|
| 401 |
+
install_path,
|
| 402 |
+
'sdist --formats=zip',
|
| 403 |
+
'*.zip')
|
| 404 |
+
pf = get_sdist_finder(mod_name)
|
| 405 |
+
install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, scripts_sdir)
|
| 406 |
+
lib_misses = missing_from(repo_mod_path, installed_mod_path, r"\.py$")
|
| 407 |
+
script_misses = missing_from(repo_bin, installed_bin)
|
| 408 |
+
finally:
|
| 409 |
+
shutil.rmtree(install_path)
|
| 410 |
+
if lib_misses:
|
| 411 |
+
print("Missed library files: ", ', '.join(lib_misses))
|
| 412 |
+
else:
|
| 413 |
+
print("You got all the library files")
|
| 414 |
+
if script_misses:
|
| 415 |
+
print("Missed script files: ", ', '.join(script_misses))
|
| 416 |
+
else:
|
| 417 |
+
print("You got all the script files")
|
| 418 |
+
return len(lib_misses) > 0 or len(script_misses) > 0
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def get_sdist_finder(mod_name):
|
| 422 |
+
""" Return function finding sdist source directory for `mod_name`
|
| 423 |
+
"""
|
| 424 |
+
def pf(pth):
|
| 425 |
+
pkg_dirs = glob(pjoin(pth, mod_name + '-*'))
|
| 426 |
+
if len(pkg_dirs) != 1:
|
| 427 |
+
raise OSError('There must be one and only one package dir')
|
| 428 |
+
return pkg_dirs[0]
|
| 429 |
+
return pf
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def sdist_tests(mod_name, repo_path=None, label='fast', doctests=True):
|
| 433 |
+
""" Make sdist zip, install from it, and run tests """
|
| 434 |
+
if repo_path is None:
|
| 435 |
+
repo_path = abspath(os.getcwd())
|
| 436 |
+
install_path = tempfile.mkdtemp()
|
| 437 |
+
try:
|
| 438 |
+
zip_fname = make_dist(repo_path,
|
| 439 |
+
install_path,
|
| 440 |
+
'sdist --formats=zip',
|
| 441 |
+
'*.zip')
|
| 442 |
+
pf = get_sdist_finder(mod_name)
|
| 443 |
+
install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, 'bin')
|
| 444 |
+
site_pkgs_path = pjoin(install_path, PY_LIB_SDIR)
|
| 445 |
+
script_path = pjoin(install_path, 'bin')
|
| 446 |
+
cmd = f"{mod_name}.test(label='{label}', doctests={doctests})"
|
| 447 |
+
stdout, stderr = run_mod_cmd(mod_name,
|
| 448 |
+
site_pkgs_path,
|
| 449 |
+
cmd,
|
| 450 |
+
script_path)
|
| 451 |
+
finally:
|
| 452 |
+
shutil.rmtree(install_path)
|
| 453 |
+
print(stdout)
|
| 454 |
+
print(stderr)
|
| 455 |
+
|
| 456 |
+
sdist_tests.__test__ = False
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True):
|
| 460 |
+
""" Make bdist_egg, unzip it, and run tests from result
|
| 461 |
+
|
| 462 |
+
We've got a problem here, because the egg does not contain the scripts, and
|
| 463 |
+
so, if we are testing the scripts with ``mod.test()``, we won't pick up the
|
| 464 |
+
scripts from the repository we are testing.
|
| 465 |
+
|
| 466 |
+
So, you might need to add a label to the script tests, and use the `label`
|
| 467 |
+
parameter to indicate these should be skipped. As in:
|
| 468 |
+
|
| 469 |
+
bdist_egg_tests('nibabel', None, label='not script_test')
|
| 470 |
+
"""
|
| 471 |
+
if repo_path is None:
|
| 472 |
+
repo_path = abspath(os.getcwd())
|
| 473 |
+
install_path = tempfile.mkdtemp()
|
| 474 |
+
scripts_path = pjoin(install_path, 'bin')
|
| 475 |
+
try:
|
| 476 |
+
zip_fname = make_dist(repo_path,
|
| 477 |
+
install_path,
|
| 478 |
+
'bdist_egg',
|
| 479 |
+
'*.egg')
|
| 480 |
+
zip_extract_all(zip_fname, install_path)
|
| 481 |
+
cmd = f"{mod_name}.test(label='{label}', doctests={doctests})"
|
| 482 |
+
stdout, stderr = run_mod_cmd(mod_name,
|
| 483 |
+
install_path,
|
| 484 |
+
cmd,
|
| 485 |
+
scripts_path)
|
| 486 |
+
finally:
|
| 487 |
+
shutil.rmtree(install_path)
|
| 488 |
+
print(stdout)
|
| 489 |
+
print(stderr)
|
| 490 |
+
|
| 491 |
+
bdist_egg_tests.__test__ = False
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def make_dist(repo_path, out_dir, setup_params, zipglob):
|
| 495 |
+
""" Create distutils distribution file
|
| 496 |
+
|
| 497 |
+
Parameters
|
| 498 |
+
----------
|
| 499 |
+
repo_path : str
|
| 500 |
+
path to repository containing code and ``setup.py``
|
| 501 |
+
out_dir : str
|
| 502 |
+
path to which to write new distribution file
|
| 503 |
+
setup_params: str
|
| 504 |
+
parameters to pass to ``setup.py`` to create distribution.
|
| 505 |
+
zipglob : str
|
| 506 |
+
glob identifying expected output file.
|
| 507 |
+
|
| 508 |
+
Returns
|
| 509 |
+
-------
|
| 510 |
+
out_fname : str
|
| 511 |
+
filename of generated distribution file
|
| 512 |
+
|
| 513 |
+
Examples
|
| 514 |
+
--------
|
| 515 |
+
Make, return a zipped sdist::
|
| 516 |
+
|
| 517 |
+
make_dist('/path/to/repo', '/tmp/path', 'sdist --formats=zip', '*.zip')
|
| 518 |
+
|
| 519 |
+
Make, return a binary egg::
|
| 520 |
+
|
| 521 |
+
make_dist('/path/to/repo', '/tmp/path', 'bdist_egg', '*.egg')
|
| 522 |
+
"""
|
| 523 |
+
pwd = os.path.abspath(os.getcwd())
|
| 524 |
+
try:
|
| 525 |
+
os.chdir(repo_path)
|
| 526 |
+
back_tick(f'{PYTHON} setup.py {setup_params} --dist-dir={out_dir}')
|
| 527 |
+
zips = glob(pjoin(out_dir, zipglob))
|
| 528 |
+
if len(zips) != 1:
|
| 529 |
+
raise OSError(f"There must be one and only one {zipglob} "
|
| 530 |
+
f"file, but I found \"{': '.join(zips)}\"")
|
| 531 |
+
finally:
|
| 532 |
+
os.chdir(pwd)
|
| 533 |
+
return zips[0]
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/oauthlib-3.2.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: oauthlib
|
| 3 |
+
Version: 3.2.0
|
| 4 |
+
Summary: A generic, spec-compliant, thorough implementation of the OAuth request-signing logic
|
| 5 |
+
Home-page: https://github.com/oauthlib/oauthlib
|
| 6 |
+
Author: The OAuthlib Community
|
| 7 |
+
Author-email: idan@gazit.me
|
| 8 |
+
Maintainer: Ib Lundgren
|
| 9 |
+
Maintainer-email: ib.lundgren@gmail.com
|
| 10 |
+
License: BSD
|
| 11 |
+
Platform: any
|
| 12 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 13 |
+
Classifier: Environment :: Web Environment
|
| 14 |
+
Classifier: Intended Audience :: Developers
|
| 15 |
+
Classifier: License :: OSI Approved
|
| 16 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 17 |
+
Classifier: Operating System :: MacOS
|
| 18 |
+
Classifier: Operating System :: POSIX
|
| 19 |
+
Classifier: Operating System :: POSIX :: Linux
|
| 20 |
+
Classifier: Programming Language :: Python
|
| 21 |
+
Classifier: Programming Language :: Python :: 3
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.6
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 25 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 26 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 27 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 28 |
+
Classifier: Programming Language :: Python :: Implementation
|
| 29 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 30 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
| 31 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
| 32 |
+
Requires-Python: >=3.6
|
| 33 |
+
Description-Content-Type: text/x-rst
|
| 34 |
+
License-File: LICENSE
|
| 35 |
+
Provides-Extra: rsa
|
| 36 |
+
Requires-Dist: cryptography (>=3.0.0) ; extra == 'rsa'
|
| 37 |
+
Provides-Extra: signals
|
| 38 |
+
Requires-Dist: blinker (>=1.4.0) ; extra == 'signals'
|
| 39 |
+
Provides-Extra: signedtoken
|
| 40 |
+
Requires-Dist: cryptography (>=3.0.0) ; extra == 'signedtoken'
|
| 41 |
+
Requires-Dist: pyjwt (<3,>=2.0.0) ; extra == 'signedtoken'
|
| 42 |
+
|
| 43 |
+
OAuthLib - Python Framework for OAuth1 & OAuth2
|
| 44 |
+
===============================================
|
| 45 |
+
|
| 46 |
+
*A generic, spec-compliant, thorough implementation of the OAuth request-signing
|
| 47 |
+
logic for Python 3.6+.*
|
| 48 |
+
|
| 49 |
+
.. image:: https://travis-ci.org/oauthlib/oauthlib.svg?branch=master
|
| 50 |
+
:target: https://travis-ci.org/oauthlib/oauthlib
|
| 51 |
+
:alt: Travis
|
| 52 |
+
.. image:: https://coveralls.io/repos/oauthlib/oauthlib/badge.svg?branch=master
|
| 53 |
+
:target: https://coveralls.io/r/oauthlib/oauthlib
|
| 54 |
+
:alt: Coveralls
|
| 55 |
+
.. image:: https://img.shields.io/pypi/pyversions/oauthlib.svg
|
| 56 |
+
:target: https://pypi.org/project/oauthlib/
|
| 57 |
+
:alt: Download from PyPI
|
| 58 |
+
.. image:: https://img.shields.io/pypi/l/oauthlib.svg
|
| 59 |
+
:target: https://pypi.org/project/oauthlib/
|
| 60 |
+
:alt: License
|
| 61 |
+
.. image:: https://app.fossa.io/api/projects/git%2Bgithub.com%2Foauthlib%2Foauthlib.svg?type=shield
|
| 62 |
+
:target: https://app.fossa.io/projects/git%2Bgithub.com%2Foauthlib%2Foauthlib?ref=badge_shield
|
| 63 |
+
:alt: FOSSA Status
|
| 64 |
+
.. image:: https://img.shields.io/readthedocs/oauthlib.svg
|
| 65 |
+
:target: https://oauthlib.readthedocs.io/en/latest/index.html
|
| 66 |
+
:alt: Read the Docs
|
| 67 |
+
.. image:: https://badges.gitter.im/oauthlib/oauthlib.svg
|
| 68 |
+
:target: https://gitter.im/oauthlib/Lobby
|
| 69 |
+
:alt: Chat on Gitter
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
.. image:: https://raw.githubusercontent.com/oauthlib/oauthlib/8d71b161fd145d11c40d55c9ab66ac134a303253/docs/logo/oauthlib-banner-700x192.png
|
| 73 |
+
:target: https://github.com/oauthlib/oauthlib/
|
| 74 |
+
:alt: OAuth + Python = OAuthlib Python Framework
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
OAuth often seems complicated and difficult-to-implement. There are several
|
| 78 |
+
prominent libraries for handling OAuth requests, but they all suffer from one or
|
| 79 |
+
both of the following:
|
| 80 |
+
|
| 81 |
+
1. They predate the `OAuth 1.0 spec`_, AKA RFC 5849.
|
| 82 |
+
2. They predate the `OAuth 2.0 spec`_, AKA RFC 6749.
|
| 83 |
+
3. They assume the usage of a specific HTTP request library.
|
| 84 |
+
|
| 85 |
+
.. _`OAuth 1.0 spec`: https://tools.ietf.org/html/rfc5849
|
| 86 |
+
.. _`OAuth 2.0 spec`: https://tools.ietf.org/html/rfc6749
|
| 87 |
+
|
| 88 |
+
OAuthLib is a framework which implements the logic of OAuth1 or OAuth2 without
|
| 89 |
+
assuming a specific HTTP request object or web framework. Use it to graft OAuth
|
| 90 |
+
client support onto your favorite HTTP library, or provide support onto your
|
| 91 |
+
favourite web framework. If you're a maintainer of such a library, write a thin
|
| 92 |
+
veneer on top of OAuthLib and get OAuth support for very little effort.
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
Documentation
|
| 96 |
+
--------------
|
| 97 |
+
|
| 98 |
+
Full documentation is available on `Read the Docs`_. All contributions are very
|
| 99 |
+
welcome! The documentation is still quite sparse, please open an issue for what
|
| 100 |
+
you'd like to know, or discuss it in our `Gitter community`_, or even better, send a
|
| 101 |
+
pull request!
|
| 102 |
+
|
| 103 |
+
.. _`Gitter community`: https://gitter.im/oauthlib/Lobby
|
| 104 |
+
.. _`Read the Docs`: https://oauthlib.readthedocs.io/en/latest/index.html
|
| 105 |
+
|
| 106 |
+
Interested in making OAuth requests?
|
| 107 |
+
------------------------------------
|
| 108 |
+
|
| 109 |
+
Then you might be more interested in using `requests`_ which has OAuthLib
|
| 110 |
+
powered OAuth support provided by the `requests-oauthlib`_ library.
|
| 111 |
+
|
| 112 |
+
.. _`requests`: https://github.com/requests/requests
|
| 113 |
+
.. _`requests-oauthlib`: https://github.com/requests/requests-oauthlib
|
| 114 |
+
|
| 115 |
+
Which web frameworks are supported?
|
| 116 |
+
-----------------------------------
|
| 117 |
+
|
| 118 |
+
The following packages provide OAuth support using OAuthLib.
|
| 119 |
+
|
| 120 |
+
- For Django there is `django-oauth-toolkit`_, which includes `Django REST framework`_ support.
|
| 121 |
+
- For Flask there is `flask-oauthlib`_ and `Flask-Dance`_.
|
| 122 |
+
- For Pyramid there is `pyramid-oauthlib`_.
|
| 123 |
+
- For Bottle there is `bottle-oauthlib`_.
|
| 124 |
+
|
| 125 |
+
If you have written an OAuthLib package that supports your favorite framework,
|
| 126 |
+
please open a Pull Request, updating the documentation.
|
| 127 |
+
|
| 128 |
+
.. _`django-oauth-toolkit`: https://github.com/evonove/django-oauth-toolkit
|
| 129 |
+
.. _`flask-oauthlib`: https://github.com/lepture/flask-oauthlib
|
| 130 |
+
.. _`Django REST framework`: http://django-rest-framework.org
|
| 131 |
+
.. _`Flask-Dance`: https://github.com/singingwolfboy/flask-dance
|
| 132 |
+
.. _`pyramid-oauthlib`: https://github.com/tilgovi/pyramid-oauthlib
|
| 133 |
+
.. _`bottle-oauthlib`: https://github.com/thomsonreuters/bottle-oauthlib
|
| 134 |
+
|
| 135 |
+
Using OAuthLib? Please get in touch!
|
| 136 |
+
------------------------------------
|
| 137 |
+
Patching OAuth support onto an http request framework? Creating an OAuth
|
| 138 |
+
provider extension for a web framework? Simply using OAuthLib to Get Things Done
|
| 139 |
+
or to learn?
|
| 140 |
+
|
| 141 |
+
No matter which we'd love to hear from you in our `Gitter community`_ or if you have
|
| 142 |
+
anything in particular you would like to have, change or comment on don't
|
| 143 |
+
hesitate for a second to send a pull request or open an issue. We might be quite
|
| 144 |
+
busy and therefore slow to reply but we love feedback!
|
| 145 |
+
|
| 146 |
+
Chances are you have run into something annoying that you wish there was
|
| 147 |
+
documentation for, if you wish to gain eternal fame and glory, and a drink if we
|
| 148 |
+
have the pleasure to run into eachother, please send a docs pull request =)
|
| 149 |
+
|
| 150 |
+
.. _`Gitter community`: https://gitter.im/oauthlib/Lobby
|
| 151 |
+
|
| 152 |
+
License
|
| 153 |
+
-------
|
| 154 |
+
|
| 155 |
+
OAuthLib is yours to use and abuse according to the terms of the BSD license.
|
| 156 |
+
Check the LICENSE file for full details.
|
| 157 |
+
|
| 158 |
+
Credits
|
| 159 |
+
-------
|
| 160 |
+
|
| 161 |
+
OAuthLib has been started and maintained several years by Idan Gazit and other
|
| 162 |
+
amazing `AUTHORS`_. Thanks to their wonderful work, the open-source `community`_
|
| 163 |
+
creation has been possible and the project can stay active and reactive to users
|
| 164 |
+
requests.
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
.. _`AUTHORS`: https://github.com/oauthlib/oauthlib/blob/master/AUTHORS
|
| 168 |
+
.. _`community`: https://github.com/oauthlib/
|
| 169 |
+
|
| 170 |
+
Changelog
|
| 171 |
+
---------
|
| 172 |
+
|
| 173 |
+
*OAuthLib is in active development, with the core of both OAuth1 and OAuth2
|
| 174 |
+
completed, for providers as well as clients.* See `supported features`_ for
|
| 175 |
+
details.
|
| 176 |
+
|
| 177 |
+
.. _`supported features`: https://oauthlib.readthedocs.io/en/latest/feature_matrix.html
|
| 178 |
+
|
| 179 |
+
For a full changelog see ``CHANGELOG.rst``.
|
| 180 |
+
|
| 181 |
+
|
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/oauthlib-3.2.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
oauthlib-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
oauthlib-3.2.0.dist-info/LICENSE,sha256=PR4S2KxSwLbBSK9tKR9yQAuHIO0WwKxKiYaLbRSxyTk,1530
|
| 3 |
+
oauthlib-3.2.0.dist-info/METADATA,sha256=4BfXhM-eajiPmT96iyP4bykMyHf_Xz2pfggvcOyrNv0,7447
|
| 4 |
+
oauthlib-3.2.0.dist-info/RECORD,,
|
| 5 |
+
oauthlib-3.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
oauthlib-3.2.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
| 7 |
+
oauthlib-3.2.0.dist-info/top_level.txt,sha256=gz2py0fFs1AhG1O7KpHPcIXOgXOwdIiCaSnmLkiR12Q,9
|
| 8 |
+
oauthlib/__init__.py,sha256=mhqqb9kZLISCLzTMXwHwdDnS3q_7-NQciRNsQr_1q5M,686
|
| 9 |
+
oauthlib/__pycache__/__init__.cpython-38.pyc,,
|
| 10 |
+
oauthlib/__pycache__/common.cpython-38.pyc,,
|
| 11 |
+
oauthlib/__pycache__/signals.cpython-38.pyc,,
|
| 12 |
+
oauthlib/__pycache__/uri_validate.cpython-38.pyc,,
|
| 13 |
+
oauthlib/common.py,sha256=FimXHIfgk-XVgzu3lq6-wv3Jsq7hzGgsurwGvTm3t3A,13422
|
| 14 |
+
oauthlib/oauth1/__init__.py,sha256=U_6yHcB8jdYPbcas4CBel7tE_YYCN5S1300U3zVlWlE,1187
|
| 15 |
+
oauthlib/oauth1/__pycache__/__init__.cpython-38.pyc,,
|
| 16 |
+
oauthlib/oauth1/rfc5849/__init__.py,sha256=-5sJHDG3JRZQRJYlCjkj3CP2jZgqEg0OY5pVIxE4mxE,16744
|
| 17 |
+
oauthlib/oauth1/rfc5849/__pycache__/__init__.cpython-38.pyc,,
|
| 18 |
+
oauthlib/oauth1/rfc5849/__pycache__/errors.cpython-38.pyc,,
|
| 19 |
+
oauthlib/oauth1/rfc5849/__pycache__/parameters.cpython-38.pyc,,
|
| 20 |
+
oauthlib/oauth1/rfc5849/__pycache__/request_validator.cpython-38.pyc,,
|
| 21 |
+
oauthlib/oauth1/rfc5849/__pycache__/signature.cpython-38.pyc,,
|
| 22 |
+
oauthlib/oauth1/rfc5849/__pycache__/utils.cpython-38.pyc,,
|
| 23 |
+
oauthlib/oauth1/rfc5849/endpoints/__init__.py,sha256=SeIECziJ-Sv_NCGnowG3P9UnX_VdFNldRqRywEaJvxY,327
|
| 24 |
+
oauthlib/oauth1/rfc5849/endpoints/__pycache__/__init__.cpython-38.pyc,,
|
| 25 |
+
oauthlib/oauth1/rfc5849/endpoints/__pycache__/access_token.cpython-38.pyc,,
|
| 26 |
+
oauthlib/oauth1/rfc5849/endpoints/__pycache__/authorization.cpython-38.pyc,,
|
| 27 |
+
oauthlib/oauth1/rfc5849/endpoints/__pycache__/base.cpython-38.pyc,,
|
| 28 |
+
oauthlib/oauth1/rfc5849/endpoints/__pycache__/pre_configured.cpython-38.pyc,,
|
| 29 |
+
oauthlib/oauth1/rfc5849/endpoints/__pycache__/request_token.cpython-38.pyc,,
|
| 30 |
+
oauthlib/oauth1/rfc5849/endpoints/__pycache__/resource.cpython-38.pyc,,
|
| 31 |
+
oauthlib/oauth1/rfc5849/endpoints/__pycache__/signature_only.cpython-38.pyc,,
|
| 32 |
+
oauthlib/oauth1/rfc5849/endpoints/access_token.py,sha256=CRgLV5DqDiwVvbo8MiHySbTEKrxETJV29-VGu-2kQ7Y,9347
|
| 33 |
+
oauthlib/oauth1/rfc5849/endpoints/authorization.py,sha256=zbU7TzO6nB6853UIqtTkhxUV-JTHOdOc-CvdQsIQKWk,6724
|
| 34 |
+
oauthlib/oauth1/rfc5849/endpoints/base.py,sha256=pBOS1MqIrJVxydTD6jnzZcGZRh_NMbRmT1c_c2ISKVs,11643
|
| 35 |
+
oauthlib/oauth1/rfc5849/endpoints/pre_configured.py,sha256=Ie5oBUq_JTsXQdvfWhcMRjhH3OOxS_mRHbKBQ9TpsGg,543
|
| 36 |
+
oauthlib/oauth1/rfc5849/endpoints/request_token.py,sha256=Nz-d7ShVB5Hlh9cAKTp-KHeHH_t5f7khvtCVT8avM9Q,9293
|
| 37 |
+
oauthlib/oauth1/rfc5849/endpoints/resource.py,sha256=rmzBuF0FyrLTpaAehFx0wkj0laAoe7XTdbbeRTrUCZQ,7376
|
| 38 |
+
oauthlib/oauth1/rfc5849/endpoints/signature_only.py,sha256=MX5zV66v4-wrR4cu7OmOd_GF3L8ysM60HmEiHtRR0l8,3327
|
| 39 |
+
oauthlib/oauth1/rfc5849/errors.py,sha256=WPvKVjPlgkCYp6TXvcwC8VETkhsZBzphKCkTJKDPNfM,2474
|
| 40 |
+
oauthlib/oauth1/rfc5849/parameters.py,sha256=Abnxpix_Yy7P3A3vbkrV2bkFxtnR5TRTTKdOu9MKydo,4802
|
| 41 |
+
oauthlib/oauth1/rfc5849/request_validator.py,sha256=x1c8EdFxdvSvilHQ_ZavkiSDjPy6gkYb0gMhXqLr7cw,30988
|
| 42 |
+
oauthlib/oauth1/rfc5849/signature.py,sha256=FBrizlpvzlHjsJbT3AMmYOyBD_OnpOeEdSxXCg-1iAY,32024
|
| 43 |
+
oauthlib/oauth1/rfc5849/utils.py,sha256=IapG_jM6iMe4e0DYWWds1jp-wce2Lf_cuhFrtCP_2ls,2613
|
| 44 |
+
oauthlib/oauth2/__init__.py,sha256=uPkdHF2NEpIM6Ybz-jPPEKU5e56eHptaOz2NPwppyys,1597
|
| 45 |
+
oauthlib/oauth2/__pycache__/__init__.cpython-38.pyc,,
|
| 46 |
+
oauthlib/oauth2/rfc6749/__init__.py,sha256=sJcxfdG6HTloXzhkG8-PTJTVQWoCeNtnw6ODNCJNw58,404
|
| 47 |
+
oauthlib/oauth2/rfc6749/__pycache__/__init__.cpython-38.pyc,,
|
| 48 |
+
oauthlib/oauth2/rfc6749/__pycache__/errors.cpython-38.pyc,,
|
| 49 |
+
oauthlib/oauth2/rfc6749/__pycache__/parameters.cpython-38.pyc,,
|
| 50 |
+
oauthlib/oauth2/rfc6749/__pycache__/request_validator.cpython-38.pyc,,
|
| 51 |
+
oauthlib/oauth2/rfc6749/__pycache__/tokens.cpython-38.pyc,,
|
| 52 |
+
oauthlib/oauth2/rfc6749/__pycache__/utils.cpython-38.pyc,,
|
| 53 |
+
oauthlib/oauth2/rfc6749/clients/__init__.py,sha256=TuYtiErfo0_Ej0816tIv5rBsrwA9BjYz3tu_ZM0X364,504
|
| 54 |
+
oauthlib/oauth2/rfc6749/clients/__pycache__/__init__.cpython-38.pyc,,
|
| 55 |
+
oauthlib/oauth2/rfc6749/clients/__pycache__/backend_application.cpython-38.pyc,,
|
| 56 |
+
oauthlib/oauth2/rfc6749/clients/__pycache__/base.cpython-38.pyc,,
|
| 57 |
+
oauthlib/oauth2/rfc6749/clients/__pycache__/legacy_application.cpython-38.pyc,,
|
| 58 |
+
oauthlib/oauth2/rfc6749/clients/__pycache__/mobile_application.cpython-38.pyc,,
|
| 59 |
+
oauthlib/oauth2/rfc6749/clients/__pycache__/service_application.cpython-38.pyc,,
|
| 60 |
+
oauthlib/oauth2/rfc6749/clients/__pycache__/web_application.cpython-38.pyc,,
|
| 61 |
+
oauthlib/oauth2/rfc6749/clients/backend_application.py,sha256=CAhmHPY3vJ1-McjrMQ4TgJXXqx2Ls9PLfuPWUV0JEtY,3223
|
| 62 |
+
oauthlib/oauth2/rfc6749/clients/base.py,sha256=q1_tKeEE-3KBUxUrIneMXabiBK9T9iGr9aXaowdPB6E,26575
|
| 63 |
+
oauthlib/oauth2/rfc6749/clients/legacy_application.py,sha256=FVDcZNyJK-uSSAPJUQMM3Ud_-EAzJJ8pZ5rr937c7NY,4031
|
| 64 |
+
oauthlib/oauth2/rfc6749/clients/mobile_application.py,sha256=nuM6Gp7jZtY6xP85j1hQUp1nvW_6MpM_dK3wJntKxb4,8877
|
| 65 |
+
oauthlib/oauth2/rfc6749/clients/service_application.py,sha256=fBxB-eY6kG1NKrl5jGfFCJFAJt_PJP8opUdRKY7sojA,7810
|
| 66 |
+
oauthlib/oauth2/rfc6749/clients/web_application.py,sha256=2wtPArVJipR0n3kSCDca-V2SQuVPl4PhGWl57LiOxDo,12086
|
| 67 |
+
oauthlib/oauth2/rfc6749/endpoints/__init__.py,sha256=RL_txhULl35A74dbvlJ7nvqwp3GMCSCpg_4TvjoO-Xk,553
|
| 68 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/__init__.cpython-38.pyc,,
|
| 69 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/authorization.cpython-38.pyc,,
|
| 70 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/base.cpython-38.pyc,,
|
| 71 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/introspect.cpython-38.pyc,,
|
| 72 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/metadata.cpython-38.pyc,,
|
| 73 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/pre_configured.cpython-38.pyc,,
|
| 74 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/resource.cpython-38.pyc,,
|
| 75 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/revocation.cpython-38.pyc,,
|
| 76 |
+
oauthlib/oauth2/rfc6749/endpoints/__pycache__/token.cpython-38.pyc,,
|
| 77 |
+
oauthlib/oauth2/rfc6749/endpoints/authorization.py,sha256=2N2Cb_TQtpUPcqDIclsJnZERtaMKmH9uSgGoMZLFnUI,4584
|
| 78 |
+
oauthlib/oauth2/rfc6749/endpoints/base.py,sha256=fUhCGaftD5bm5PstA6L2CqUNb9kHDpUj4_BsvLRbi4w,4130
|
| 79 |
+
oauthlib/oauth2/rfc6749/endpoints/introspect.py,sha256=SGVI0jI1KmstheTNxMWfngNWCtvdkNTi-H_o3MRQvHE,4983
|
| 80 |
+
oauthlib/oauth2/rfc6749/endpoints/metadata.py,sha256=ySOnnACtwTCjXmojYwZA_EC3EU7OMBlJ117x9P9xxuM,10516
|
| 81 |
+
oauthlib/oauth2/rfc6749/endpoints/pre_configured.py,sha256=ChhORao78XGGlnikJsLb6d_FZvKaLGBUM-te-84NeJ8,11954
|
| 82 |
+
oauthlib/oauth2/rfc6749/endpoints/resource.py,sha256=vpXoovgpmByY-IuW0PDccS5IJGFoFiLVjLLUpGFmXX4,3248
|
| 83 |
+
oauthlib/oauth2/rfc6749/endpoints/revocation.py,sha256=hH3F2G3-QmqCQ1tE4yN2AQfaQ2lEB46YkO5-n1ophGE,5212
|
| 84 |
+
oauthlib/oauth2/rfc6749/endpoints/token.py,sha256=iJDlaSkVR8U6s1_T9fiyVnLgfCgOWsq9PFDcmzL74H4,4595
|
| 85 |
+
oauthlib/oauth2/rfc6749/errors.py,sha256=5EE4Qs3ru34d33wqaFo-WGOofLLYK1jTov9sqG92CW0,12947
|
| 86 |
+
oauthlib/oauth2/rfc6749/grant_types/__init__.py,sha256=im_XwEWmw3dhmzcdfyhkN38xZopBhL3cRShmmCtqQs0,368
|
| 87 |
+
oauthlib/oauth2/rfc6749/grant_types/__pycache__/__init__.cpython-38.pyc,,
|
| 88 |
+
oauthlib/oauth2/rfc6749/grant_types/__pycache__/authorization_code.cpython-38.pyc,,
|
| 89 |
+
oauthlib/oauth2/rfc6749/grant_types/__pycache__/base.cpython-38.pyc,,
|
| 90 |
+
oauthlib/oauth2/rfc6749/grant_types/__pycache__/client_credentials.cpython-38.pyc,,
|
| 91 |
+
oauthlib/oauth2/rfc6749/grant_types/__pycache__/implicit.cpython-38.pyc,,
|
| 92 |
+
oauthlib/oauth2/rfc6749/grant_types/__pycache__/refresh_token.cpython-38.pyc,,
|
| 93 |
+
oauthlib/oauth2/rfc6749/grant_types/__pycache__/resource_owner_password_credentials.cpython-38.pyc,,
|
| 94 |
+
oauthlib/oauth2/rfc6749/grant_types/authorization_code.py,sha256=R2cXCtdGfDkKseYu-raoZkOpc7lnMa2oVRBs-C3jGKs,26858
|
| 95 |
+
oauthlib/oauth2/rfc6749/grant_types/base.py,sha256=BMcu0_Ks0khfp0B0w2t8CgT6hx9YDjUtkIx95fHf8MM,10213
|
| 96 |
+
oauthlib/oauth2/rfc6749/grant_types/client_credentials.py,sha256=Wr0CpWDVmHrIfOBPTYp9RxnISTfYdp5SjSaRAu77vUY,5079
|
| 97 |
+
oauthlib/oauth2/rfc6749/grant_types/implicit.py,sha256=hYAEYOwToxo3eNpGRC9SyJue93tu37jZVL7MYiaErDs,16852
|
| 98 |
+
oauthlib/oauth2/rfc6749/grant_types/refresh_token.py,sha256=OW0mI-sUPfiTD2UaiZEv8kdpp6QGVSRJEvopJyuRELw,5832
|
| 99 |
+
oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py,sha256=9FsDbrSNNylWKkEvgdafJDzlNncTRCOhIZODo-f4ZIM,8516
|
| 100 |
+
oauthlib/oauth2/rfc6749/parameters.py,sha256=WYiyVkEfKS66fXpyczcard3bDaMHy0h1H5l2PUtUY_c,19016
|
| 101 |
+
oauthlib/oauth2/rfc6749/request_validator.py,sha256=CduQ-_Utp1_LrHNDhrC_zx6KMrNFVAVNB8e5_vVziWw,28814
|
| 102 |
+
oauthlib/oauth2/rfc6749/tokens.py,sha256=H6QOIMNGXFEhDeBfOdK2I_UparP5KRrz55SZQJ4WpVY,11097
|
| 103 |
+
oauthlib/oauth2/rfc6749/utils.py,sha256=EKlU_U-FcYkdd8PvXo1irtHTqBXF7gKqdFKBadteZ64,2207
|
| 104 |
+
oauthlib/oauth2/rfc8628/__init__.py,sha256=yfG2QHuDxrp7_9HNKPEeXYXA_qBVZqiRrhI7q2cG4NM,232
|
| 105 |
+
oauthlib/oauth2/rfc8628/__pycache__/__init__.cpython-38.pyc,,
|
| 106 |
+
oauthlib/oauth2/rfc8628/clients/__init__.py,sha256=indCdGycy9cekvLOBxYbCwtyezEVhl3uKZzoShml-aY,201
|
| 107 |
+
oauthlib/oauth2/rfc8628/clients/__pycache__/__init__.cpython-38.pyc,,
|
| 108 |
+
oauthlib/oauth2/rfc8628/clients/__pycache__/device.cpython-38.pyc,,
|
| 109 |
+
oauthlib/oauth2/rfc8628/clients/device.py,sha256=DjDTy51Vxyl28u5sQGB2UsPzr-VAFNO52okuOtw5BcQ,3878
|
| 110 |
+
oauthlib/openid/__init__.py,sha256=qZQCKCdQt40myte_nxSYrWvzf1VVADqRl8om0-t6LzE,162
|
| 111 |
+
oauthlib/openid/__pycache__/__init__.cpython-38.pyc,,
|
| 112 |
+
oauthlib/openid/connect/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 113 |
+
oauthlib/openid/connect/__pycache__/__init__.cpython-38.pyc,,
|
| 114 |
+
oauthlib/openid/connect/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 115 |
+
oauthlib/openid/connect/core/__pycache__/__init__.cpython-38.pyc,,
|
| 116 |
+
oauthlib/openid/connect/core/__pycache__/exceptions.cpython-38.pyc,,
|
| 117 |
+
oauthlib/openid/connect/core/__pycache__/request_validator.cpython-38.pyc,,
|
| 118 |
+
oauthlib/openid/connect/core/__pycache__/tokens.cpython-38.pyc,,
|
| 119 |
+
oauthlib/openid/connect/core/endpoints/__init__.py,sha256=nQ6mGniUaM9X1ENG0tZlPgWgbLdlFESWGK-5_e8mp5Y,229
|
| 120 |
+
oauthlib/openid/connect/core/endpoints/__pycache__/__init__.cpython-38.pyc,,
|
| 121 |
+
oauthlib/openid/connect/core/endpoints/__pycache__/pre_configured.cpython-38.pyc,,
|
| 122 |
+
oauthlib/openid/connect/core/endpoints/__pycache__/userinfo.cpython-38.pyc,,
|
| 123 |
+
oauthlib/openid/connect/core/endpoints/pre_configured.py,sha256=p4Bq4HHUTvCBYXlTHr3PXktABKjHFGC3yBmwxWifzKc,5426
|
| 124 |
+
oauthlib/openid/connect/core/endpoints/userinfo.py,sha256=gTO5BHp4evBqI9CcQKQzJNFs1rx8QJLlBtfo7S67s0I,3847
|
| 125 |
+
oauthlib/openid/connect/core/exceptions.py,sha256=uMMjE7VMc16jyL7TIhpbCx48_MsHD2C_atoMIemBKVA,4790
|
| 126 |
+
oauthlib/openid/connect/core/grant_types/__init__.py,sha256=geSZh6OFlupoC2tg9Bqqsnd31nu1-EheWNobzu86ZqU,426
|
| 127 |
+
oauthlib/openid/connect/core/grant_types/__pycache__/__init__.cpython-38.pyc,,
|
| 128 |
+
oauthlib/openid/connect/core/grant_types/__pycache__/authorization_code.cpython-38.pyc,,
|
| 129 |
+
oauthlib/openid/connect/core/grant_types/__pycache__/base.cpython-38.pyc,,
|
| 130 |
+
oauthlib/openid/connect/core/grant_types/__pycache__/dispatchers.cpython-38.pyc,,
|
| 131 |
+
oauthlib/openid/connect/core/grant_types/__pycache__/hybrid.cpython-38.pyc,,
|
| 132 |
+
oauthlib/openid/connect/core/grant_types/__pycache__/implicit.cpython-38.pyc,,
|
| 133 |
+
oauthlib/openid/connect/core/grant_types/__pycache__/refresh_token.cpython-38.pyc,,
|
| 134 |
+
oauthlib/openid/connect/core/grant_types/authorization_code.py,sha256=WOlS5RlSjIk2VNNmC5O4svxfTeUJiXpL3o5Mqn5EULk,1441
|
| 135 |
+
oauthlib/openid/connect/core/grant_types/base.py,sha256=StMSoCD-lrv-01GZMLKdLMvTlAg-VkQ0lw4jdhVOEms,15386
|
| 136 |
+
oauthlib/openid/connect/core/grant_types/dispatchers.py,sha256=kgYI3SpTZd796N_bnkqdaTDis_WmwhzkcMS9Avw_1zM,3979
|
| 137 |
+
oauthlib/openid/connect/core/grant_types/hybrid.py,sha256=PHWBazxe3qpJq02rpU93jaK7URxI_r1zl0Ee4ibcaPA,2742
|
| 138 |
+
oauthlib/openid/connect/core/grant_types/implicit.py,sha256=UICxnDNoePZfTUbL5QCBWA231o8XIQEnxocSrPp9gzw,1971
|
| 139 |
+
oauthlib/openid/connect/core/grant_types/refresh_token.py,sha256=8X0i1EHLgBIrlqP10rwJ5lXWO3f8iupmfn2E6DlLmnw,1035
|
| 140 |
+
oauthlib/openid/connect/core/request_validator.py,sha256=-lo1BnAhMWVkCj2Qhpn22LbV6CDmx4Nh4tCOntCg9tQ,13767
|
| 141 |
+
oauthlib/openid/connect/core/tokens.py,sha256=uiHwpFiVhaQDFdKizV74AcBPuEiHxZQ9mtZ8n5KfvFU,1596
|
| 142 |
+
oauthlib/signals.py,sha256=_PKDXWqKW6X3IbQUxGqW4eJ5Yi3p8jdOqXPAKfI956E,1489
|
| 143 |
+
oauthlib/uri_validate.py,sha256=XLEO1mSoXK5j9kYA12JxnTktWhsJH40mPJ9m8q7jDYw,6107
|