Matt300209 commited on
Commit
d65ac14
·
verified ·
1 Parent(s): a643941

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/INSTALLER +1 -0
  2. venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/METADATA +197 -0
  3. venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/RECORD +11 -0
  4. venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/WHEEL +5 -0
  5. venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/licenses/LICENSE +24 -0
  6. venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/top_level.txt +1 -0
  7. venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/INSTALLER +1 -0
  8. venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/LICENSE.txt +177 -0
  9. venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/METADATA +149 -0
  10. venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/NOTICE +60 -0
  11. venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/RECORD +0 -0
  12. venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/WHEEL +5 -0
  13. venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/top_level.txt +1 -0
  14. venv/lib/python3.10/site-packages/charset_normalizer/__init__.py +48 -0
  15. venv/lib/python3.10/site-packages/charset_normalizer/__main__.py +6 -0
  16. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__main__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/api.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/cd.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/legacy.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/md.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/utils.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/version.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/charset_normalizer/api.py +669 -0
  27. venv/lib/python3.10/site-packages/charset_normalizer/cd.py +395 -0
  28. venv/lib/python3.10/site-packages/charset_normalizer/cli/__init__.py +8 -0
  29. venv/lib/python3.10/site-packages/charset_normalizer/cli/__main__.py +381 -0
  30. venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/charset_normalizer/constant.py +2015 -0
  33. venv/lib/python3.10/site-packages/charset_normalizer/legacy.py +80 -0
  34. venv/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so +0 -0
  35. venv/lib/python3.10/site-packages/charset_normalizer/md.py +635 -0
  36. venv/lib/python3.10/site-packages/charset_normalizer/models.py +360 -0
  37. venv/lib/python3.10/site-packages/charset_normalizer/py.typed +0 -0
  38. venv/lib/python3.10/site-packages/charset_normalizer/utils.py +414 -0
  39. venv/lib/python3.10/site-packages/charset_normalizer/version.py +8 -0
  40. venv/lib/python3.10/site-packages/click_option_group/__init__.py +33 -0
  41. venv/lib/python3.10/site-packages/click_option_group/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/click_option_group/__pycache__/_core.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/click_option_group/__pycache__/_decorators.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/click_option_group/__pycache__/_helpers.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/click_option_group/__pycache__/_version.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/click_option_group/_core.py +432 -0
  47. venv/lib/python3.10/site-packages/click_option_group/_decorators.py +241 -0
  48. venv/lib/python3.10/site-packages/click_option_group/_helpers.py +41 -0
  49. venv/lib/python3.10/site-packages/click_option_group/_version.py +21 -0
  50. venv/lib/python3.10/site-packages/click_option_group/py.typed +0 -0
venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/METADATA ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: airportsdata
3
+ Version: 20250811
4
+ Summary: Extensive database of location and timezone data for nearly every airport and landing strip in the world.
5
+ Author-email: Mike Borsetti <mike+airportsdata@borsetti.com>
6
+ Maintainer-email: Mike Borsetti <mike+airportsdata@borsetti.com>
7
+ License: The MIT License (MIT)
8
+
9
+ Copyright (c) 2020- Mike Borsetti <mike@borsetti.com>
10
+
11
+ This project includes data from https://github.com/mwgg/Airports Copyright
12
+ (c) 2014 mwgg
13
+
14
+ Permission is hereby granted, free of charge, to any person obtaining a copy
15
+ of this software and associated documentation files (the "Software"), to deal
16
+ in the Software without restriction, including without limitation the rights
17
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18
+ copies of the Software, and to permit persons to whom the Software is
19
+ furnished to do so, subject to the following conditions:
20
+
21
+ The above copyright notice and this permission notice shall be included in all
22
+ copies or substantial portions of the Software.
23
+
24
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
+ SOFTWARE.
31
+
32
+ Project-URL: Documentation, https://github.com/mborsetti/airportsdata/blob/main/README.rst
33
+ Project-URL: Repository, https://github.com/mborsetti/airportsdata/
34
+ Project-URL: Database (csv), https://github.com/mborsetti/airportsdata/blob/main/airportsdata/airports.csv
35
+ Project-URL: Changelog, https://github.com/mborsetti/airportsdata/blob/main/CHANGELOG.rst
36
+ Project-URL: Issues, https://github.com/mborsetti/airportsdata/issues
37
+ Project-URL: CI, https://github.com/mborsetti/airportsdata/actions
38
+ Keywords: airports,aerodromes,ICAO,IATA
39
+ Classifier: Development Status :: 5 - Production/Stable
40
+ Classifier: Programming Language :: Python
41
+ Classifier: Programming Language :: Python :: 3
42
+ Classifier: Programming Language :: Python :: 3.10
43
+ Classifier: Programming Language :: Python :: 3.11
44
+ Classifier: Programming Language :: Python :: 3.12
45
+ Classifier: Programming Language :: Python :: 3.13
46
+ Classifier: Operating System :: OS Independent
47
+ Classifier: Topic :: Database
48
+ Classifier: Intended Audience :: Developers
49
+ Classifier: Typing :: Typed
50
+ Requires-Python: >=3.9
51
+ Description-Content-Type: text/x-rst
52
+ License-File: LICENSE
53
+ Dynamic: license-file
54
+
55
+ ========================
56
+ airportsdata |downloads|
57
+ ========================
58
+
59
+ .. |ICAO| replace:: 28,224
60
+
61
+ .. |IATA| replace:: 7,861
62
+
63
+ .. |LID| replace:: 12,609
64
+
65
+ .. |pyversion| image:: https://img.shields.io/pypi/v/airportsdata.svg
66
+ :target: https://pypi.org/project/airportsdata/
67
+ :alt: pypi version
68
+ .. |support| image:: https://img.shields.io/pypi/pyversions/airportsdata.svg
69
+ :target: https://pypi.org/project/airportsdata/
70
+ :alt: supported Python version
71
+ .. |pypi_version| image:: https://img.shields.io/pypi/v/airportsdata.svg?label=
72
+ :target: https://pypi.org/project/airportsdata/
73
+ :alt: PyPI version
74
+ .. |format| image:: https://img.shields.io/pypi/format/airportsdata.svg
75
+ :target: https://pypi.org/project/airportsdata/
76
+ :alt: Kit format
77
+ .. |downloads| image:: https://static.pepy.tech/badge/airportsdata
78
+ :target: https://www.pepy.tech/project/airportsdata
79
+ :alt: PyPI downloads
80
+ .. |license| image:: https://img.shields.io/pypi/l/airportsdata.svg
81
+ :target: https://pypi.org/project/airportsdata/
82
+ :alt: license
83
+ .. |issues| image:: https://img.shields.io/github/issues-raw/mborsetti/airportsdata
84
+ :target: https://github.com/mborsetti/airportsdata/issues
85
+ :alt: issues
86
+ .. |CI| image:: https://github.com/mborsetti/airportsdata/actions/workflows/ci-cd.yaml/badge.svg?event=push
87
+ :target: https://github.com/mborsetti/airportsdata/actions
88
+ :alt: CI testing status
89
+ .. |coveralls| image:: https://coveralls.io/repos/github/mborsetti/airportsdata/badge.svg?branch=main
90
+ :target: https://coveralls.io/github/mborsetti/airportsdata?branch=main
91
+ :alt: code coverage by Coveralls
92
+ .. |status| image:: https://img.shields.io/pypi/status/airportsdata.svg
93
+ :target: https://pypi.org/project/airportsdata/
94
+ :alt: Package stability
95
+ .. |security| image:: https://img.shields.io/badge/security-bandit-yellow.svg
96
+ :target: https://github.com/PyCQA/bandit
97
+ :alt: Security Status
98
+
99
+ Extensive database of location and timezone data for nearly every operational airport and landing strip in the world,
100
+ with |ICAO| entries.
101
+
102
+ Each entry consists of the following data:
103
+
104
+ * ``icao``: ICAO 4-letter Location Indicator (Doc 7910) or (if none) an internal Pseudo-ICAO Identifier [#]_ (|ICAO|
105
+ entries);
106
+ * ``iata``: IATA 3-letter Location Code (|IATA| entries) or an empty string [#]_;
107
+ * ``name``: Official name (diacritized latin script);
108
+ * ``city``: City (diacritized latin script), ideally using the local language or English;
109
+ * ``subd``: Subdivision (e.g. state, province, region, etc.), ideally using the local-language or English names of
110
+ `ISO 3166-2 <https://en.wikipedia.org/wiki/ISO_3166-2#Current_codes>`__;
111
+ * ``country``: `ISO 3166-1 <https://en.wikipedia.org/wiki/ISO_3166-1#Current_codes>`__ alpha-2 country code
112
+ (plus ``XK`` for Kosovo);
113
+ * ``elevation``: MSL elevation of the highest point of the landing area, in feet (warning: it is often wrong);
114
+ * ``lat``: Latitude (decimal) of the `airport reference point
115
+ <https://en.wikipedia.org/wiki/Airport_reference_point>`__ (max 5 or 6 decimal digits);
116
+ * ``lon``: Longitude (decimal) of the `airport reference point
117
+ <https://en.wikipedia.org/wiki/Airport_reference_point>`__ (max 5 or 6 decimal digits);
118
+ * ``tz``: Timezone expressed as a `tz database name <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones>`__
119
+ (IANA-compliant);
120
+ * ``lid``: U.S. FAA Location Identifier (|LID| entries), or an empty string.
121
+
122
+ .. [#] See `here <https://github.com/mborsetti/airportsdata/blob/main/README_identifiers.rst>`__ for an explanation on
123
+ how the Pseudo-ICAO Identifier is generated for airports and seaplane bases without an ICAO 4-letter Location
124
+ Indicator.
125
+
126
+ .. [#] IATA Multi Airport Cities (MAC) are not not airports and therfore not included, but we provide a database and a
127
+ Python function that returns the above data for all the airports of a IATA MAC. Please see documentation `here
128
+ <https://github.com/mborsetti/airportsdata/blob/main/README_IATA.rst>`__.
129
+
130
+ Best efforts are placed to review all contributions for accuracy, but accuracy cannot be guaranteed nor should be
131
+ expected by users.
132
+
133
+ Important notes:
134
+
135
+ * Timezone was originally sourced from `TimeZoneDB <https://timezonedb.com>`__;
136
+ * No historical data (closed airports are removed);
137
+ * No heliports without a IATA code;
138
+ * No sea bases without a IATA code;
139
+ * No surface transportation stations, even if they have an official IATA code.
140
+
141
+ Please report any issues you may find `here
142
+ <https://github.com/mborsetti/airportsdata/blob/main/CONTRIBUTING.rst>`__.
143
+
144
+ This project is a fork of https://github.com/mwgg/Airports. All new data submitted in this fork have been validated
145
+ against national `Aeronautical Information Publications (AIP) or equivalent
146
+ <https://github.com/mborsetti/airportsdata/blob/main/README_AIP.rst>`__ (or
147
+ ARINC database) and `IATA <https://www.iata.org/en/publications/directories/code-search/>`__ or
148
+ `https://www.ch-aviation.com/airports/{IATA}` before publishing.
149
+
150
+ Raw data
151
+ ========
152
+
153
+ A CSV (comma separated values) file, with headers and encoded in UTF-8, is downloadable from GitHub `here
154
+ <https://github.com/mborsetti/airportsdata/raw/main/airportsdata/airports.csv>`__.
155
+
156
+ Python
157
+ ======
158
+ |pyversion| |support| |format| |status| |security| |CI| |coveralls| |issues|
159
+
160
+ Install from `PyPi <https://pypi.org/project/airportsdata/>`__ using pip:
161
+
162
+ .. code-block:: bash
163
+
164
+ pip install -U airportsdata
165
+
166
+ Once installed, to load the data into a dict:
167
+
168
+ .. code-block:: python
169
+
170
+ import airportsdata
171
+ airports = airportsdata.load() # key is the ICAO identifier (the default)
172
+ print(airports['KJFK'])
173
+
174
+ or
175
+
176
+ .. code-block:: python
177
+
178
+ import airportsdata
179
+ airports = airportsdata.load('IATA') # key is the IATA location code
180
+ print(airports['JFK'])
181
+
182
+ or
183
+
184
+ .. code-block:: python
185
+
186
+ import airportsdata
187
+ airports = airportsdata.load('LID') # key is the FAA LID
188
+ print(airports['01AA'])
189
+
190
+ Older Python versions are supported for 3 years after being obsoleted by a new major release (i.e. about 4 years
191
+ since their original release).
192
+
193
+ License |license|
194
+ =================
195
+
196
+ Released under the `MIT License <https://opensource.org/licenses/MIT>`__ (see license `here
197
+ <https://github.com/mborsetti/airportsdata/blob/main/LICENSE>`__).
venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/RECORD ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ airportsdata-20250811.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ airportsdata-20250811.dist-info/METADATA,sha256=afLhvquKvnEDFOsoWAq8S7O7NhGzzM_ZPYlL7nqfYO0,9069
3
+ airportsdata-20250811.dist-info/RECORD,,
4
+ airportsdata-20250811.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
5
+ airportsdata-20250811.dist-info/licenses/LICENSE,sha256=Gra_Nl4fahiDq2oveX52GyIQgbUqMyc9hsFX_VYhBr4,1191
6
+ airportsdata-20250811.dist-info/top_level.txt,sha256=pwno2lCh8ojWgA5MI3oKaEO0U96yNrH106dZIvhEbgE,13
7
+ airportsdata/__init__.py,sha256=1j_Gtn0v9EMgpUfWPet3YhRlyXV5sLrXDtIRJiy0mpE,4842
8
+ airportsdata/__pycache__/__init__.cpython-310.pyc,,
9
+ airportsdata/airports.csv,sha256=X1t27aP64VQRR077Ohk36mHraRsqv7kyhVP9yqwCwWw,3040500
10
+ airportsdata/iata_macs.csv,sha256=jgeMECojIRDZ5e6JcTYQF0NRzGOCdJK9YCf-nB0u3Iw,4061
11
+ airportsdata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/licenses/LICENSE ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2020- Mike Borsetti <mike@borsetti.com>
4
+
5
+ This project includes data from https://github.com/mwgg/Airports Copyright
6
+ (c) 2014 mwgg
7
+
8
+ Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ of this software and associated documentation files (the "Software"), to deal
10
+ in the Software without restriction, including without limitation the rights
11
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ copies of the Software, and to permit persons to whom the Software is
13
+ furnished to do so, subject to the following conditions:
14
+
15
+ The above copyright notice and this permission notice shall be included in all
16
+ copies or substantial portions of the Software.
17
+
18
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ SOFTWARE.
venv/lib/python3.10/site-packages/airportsdata-20250811.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ airportsdata
venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/METADATA ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: botocore
3
+ Version: 1.34.162
4
+ Summary: Low-level, data-driven core of boto 3.
5
+ Home-page: https://github.com/boto/botocore
6
+ Author: Amazon Web Services
7
+ License: Apache License 2.0
8
+ Platform: UNKNOWN
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Intended Audience :: System Administrators
12
+ Classifier: Natural Language :: English
13
+ Classifier: License :: OSI Approved :: Apache Software License
14
+ Classifier: Programming Language :: Python
15
+ Classifier: Programming Language :: Python :: 3 :: Only
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Requires-Python: >= 3.8
23
+ License-File: LICENSE.txt
24
+ License-File: NOTICE
25
+ Requires-Dist: jmespath (<2.0.0,>=0.7.1)
26
+ Requires-Dist: python-dateutil (<3.0.0,>=2.1)
27
+ Requires-Dist: urllib3 (<1.27,>=1.25.4) ; python_version < "3.10"
28
+ Requires-Dist: urllib3 (!=2.2.0,<3,>=1.25.4) ; python_version >= "3.10"
29
+ Provides-Extra: crt
30
+ Requires-Dist: awscrt (==0.21.2) ; extra == 'crt'
31
+
32
+ botocore
33
+ ========
34
+
35
+ |Version| |Python| |License|
36
+
37
+ A low-level interface to a growing number of Amazon Web Services. The
38
+ botocore package is the foundation for the
39
+ `AWS CLI <https://github.com/aws/aws-cli>`__ as well as
40
+ `boto3 <https://github.com/boto/boto3>`__.
41
+
42
+ Botocore is maintained and published by `Amazon Web Services`_.
43
+
44
+ Notices
45
+ -------
46
+
47
+ On 2023-12-13, support was dropped for Python 3.7. This follows the
48
+ Python Software Foundation `end of support <https://www.python.org/dev/peps/pep-0537/#lifespan>`__
49
+ for the runtime which occurred on 2023-06-27.
50
+ For more information, see this `blog post <https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/>`__.
51
+
52
+ .. _`Amazon Web Services`: https://aws.amazon.com/what-is-aws/
53
+ .. |Python| image:: https://img.shields.io/pypi/pyversions/botocore.svg?style=flat
54
+ :target: https://pypi.python.org/pypi/botocore/
55
+ :alt: Python Versions
56
+ .. |Version| image:: http://img.shields.io/pypi/v/botocore.svg?style=flat
57
+ :target: https://pypi.python.org/pypi/botocore/
58
+ :alt: Package Version
59
+ .. |License| image:: http://img.shields.io/pypi/l/botocore.svg?style=flat
60
+ :target: https://github.com/boto/botocore/blob/develop/LICENSE.txt
61
+ :alt: License
62
+
63
+ Getting Started
64
+ ---------------
65
+ Assuming that you have Python and ``virtualenv`` installed, set up your environment and install the required dependencies like this or you can install the library using ``pip``:
66
+
67
+ .. code-block:: sh
68
+
69
+ $ git clone https://github.com/boto/botocore.git
70
+ $ cd botocore
71
+ $ virtualenv venv
72
+ ...
73
+ $ . venv/bin/activate
74
+ $ pip install -r requirements.txt
75
+ $ pip install -e .
76
+
77
+ .. code-block:: sh
78
+
79
+ $ pip install botocore
80
+
81
+ Using Botocore
82
+ ~~~~~~~~~~~~~~
83
+ After installing botocore
84
+
85
+ Next, set up credentials (in e.g. ``~/.aws/credentials``):
86
+
87
+ .. code-block:: ini
88
+
89
+ [default]
90
+ aws_access_key_id = YOUR_KEY
91
+ aws_secret_access_key = YOUR_SECRET
92
+
93
+ Then, set up a default region (in e.g. ``~/.aws/config``):
94
+
95
+ .. code-block:: ini
96
+
97
+ [default]
98
+ region=us-east-1
99
+
100
+ Other credentials configuration method can be found `here <https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html>`__
101
+
102
+ Then, from a Python interpreter:
103
+
104
+ .. code-block:: python
105
+
106
+ >>> import botocore.session
107
+ >>> session = botocore.session.get_session()
108
+ >>> client = session.create_client('ec2')
109
+ >>> print(client.describe_instances())
110
+
111
+
112
+ Getting Help
113
+ ------------
114
+
115
+ We use GitHub issues for tracking bugs and feature requests and have limited
116
+ bandwidth to address them. Please use these community resources for getting
117
+ help. Please note many of the same resources available for ``boto3`` are
118
+ applicable for ``botocore``:
119
+
120
+ * Ask a question on `Stack Overflow <https://stackoverflow.com/>`__ and tag it with `boto3 <https://stackoverflow.com/questions/tagged/boto3>`__
121
+ * Open a support ticket with `AWS Support <https://console.aws.amazon.com/support/home#/>`__
122
+ * If it turns out that you may have found a bug, please `open an issue <https://github.com/boto/botocore/issues/new/choose>`__
123
+
124
+
125
+ Contributing
126
+ ------------
127
+
128
+ We value feedback and contributions from our community. Whether it's a bug report, new feature, correction, or additional documentation, we welcome your issues and pull requests. Please read through this `CONTRIBUTING <https://github.com/boto/botocore/blob/develop/CONTRIBUTING.rst>`__ document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your contribution.
129
+
130
+
131
+ Maintenance and Support for SDK Major Versions
132
+ ----------------------------------------------
133
+
134
+ Botocore was made generally available on 06/22/2015 and is currently in the full support phase of the availability life cycle.
135
+
136
+ For information about maintenance and support for SDK major versions and their underlying dependencies, see the following in the AWS SDKs and Tools Reference Guide:
137
+
138
+ * `AWS SDKs and Tools Maintenance Policy <https://docs.aws.amazon.com/sdkref/latest/guide/maint-policy.html>`__
139
+ * `AWS SDKs and Tools Version Support Matrix <https://docs.aws.amazon.com/sdkref/latest/guide/version-support-matrix.html>`__
140
+
141
+
142
+ More Resources
143
+ --------------
144
+
145
+ * `NOTICE <https://github.com/boto/botocore/blob/develop/NOTICE>`__
146
+ * `Changelog <https://github.com/boto/botocore/blob/develop/CHANGELOG.rst>`__
147
+ * `License <https://github.com/boto/botocore/blob/develop/LICENSE.txt>`__
148
+
149
+
venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/NOTICE ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Botocore
2
+ Copyright 2012-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
+
4
+ ----
5
+
6
+ Botocore includes vendorized parts of the requests python library for backwards compatibility.
7
+
8
+ Requests License
9
+ ================
10
+
11
+ Copyright 2013 Kenneth Reitz
12
+
13
+ Licensed under the Apache License, Version 2.0 (the "License");
14
+ you may not use this file except in compliance with the License.
15
+ You may obtain a copy of the License at
16
+
17
+ http://www.apache.org/licenses/LICENSE-2.0
18
+
19
+ Unless required by applicable law or agreed to in writing, software
20
+ distributed under the License is distributed on an "AS IS" BASIS,
21
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
+ See the License for the specific language governing permissions and
23
+ limitations under the License.
24
+
25
+ Botocore includes vendorized parts of the urllib3 library for backwards compatibility.
26
+
27
+ Urllib3 License
28
+ ===============
29
+
30
+ This is the MIT license: http://www.opensource.org/licenses/mit-license.php
31
+
32
+ Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt),
33
+ Modifications copyright 2012 Kenneth Reitz.
34
+
35
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this
36
+ software and associated documentation files (the "Software"), to deal in the Software
37
+ without restriction, including without limitation the rights to use, copy, modify, merge,
38
+ publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
39
+ to whom the Software is furnished to do so, subject to the following conditions:
40
+
41
+ The above copyright notice and this permission notice shall be included in all copies or
42
+ substantial portions of the Software.
43
+
44
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
45
+ INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
46
+ PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
47
+ FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
48
+ OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
49
+ DEALINGS IN THE SOFTWARE.
50
+
51
+ Bundle of CA Root Certificates
52
+ ==============================
53
+
54
+ ***** BEGIN LICENSE BLOCK *****
55
+ This Source Code Form is subject to the terms of the
56
+ Mozilla Public License, v. 2.0. If a copy of the MPL
57
+ was not distributed with this file, You can obtain
58
+ one at http://mozilla.org/MPL/2.0/.
59
+
60
+ ***** END LICENSE BLOCK *****
venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/RECORD ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
venv/lib/python3.10/site-packages/botocore-1.34.162.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ botocore
venv/lib/python3.10/site-packages/charset_normalizer/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Charset-Normalizer
3
+ ~~~~~~~~~~~~~~
4
+ The Real First Universal Charset Detector.
5
+ A library that helps you read text from an unknown charset encoding.
6
+ Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
7
+ All IANA character set names for which the Python core library provides codecs are supported.
8
+
9
+ Basic usage:
10
+ >>> from charset_normalizer import from_bytes
11
+ >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
12
+ >>> best_guess = results.best()
13
+ >>> str(best_guess)
14
+ 'Bсеки човек има право на образование. Oбразованието!'
15
+
16
+ Others methods and usages are available - see the full documentation
17
+ at <https://github.com/Ousret/charset_normalizer>.
18
+ :copyright: (c) 2021 by Ahmed TAHRI
19
+ :license: MIT, see LICENSE for more details.
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ import logging
25
+
26
+ from .api import from_bytes, from_fp, from_path, is_binary
27
+ from .legacy import detect
28
+ from .models import CharsetMatch, CharsetMatches
29
+ from .utils import set_logging_handler
30
+ from .version import VERSION, __version__
31
+
32
+ __all__ = (
33
+ "from_fp",
34
+ "from_path",
35
+ "from_bytes",
36
+ "is_binary",
37
+ "detect",
38
+ "CharsetMatch",
39
+ "CharsetMatches",
40
+ "__version__",
41
+ "VERSION",
42
+ "set_logging_handler",
43
+ )
44
+
45
+ # Attach a NullHandler to the top level logger by default
46
+ # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
47
+
48
+ logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
venv/lib/python3.10/site-packages/charset_normalizer/__main__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from .cli import cli_detect
4
+
5
+ if __name__ == "__main__":
6
+ cli_detect()
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (387 Bytes). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/api.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/cd.cpython-310.pyc ADDED
Binary file (9.8 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc ADDED
Binary file (32.6 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/legacy.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/md.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/utils.cpython-310.pyc ADDED
Binary file (9.06 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/version.cpython-310.pyc ADDED
Binary file (389 Bytes). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/api.py ADDED
@@ -0,0 +1,669 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from os import PathLike
5
+ from typing import BinaryIO
6
+
7
+ from .cd import (
8
+ coherence_ratio,
9
+ encoding_languages,
10
+ mb_encoding_languages,
11
+ merge_coherence_ratios,
12
+ )
13
+ from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
14
+ from .md import mess_ratio
15
+ from .models import CharsetMatch, CharsetMatches
16
+ from .utils import (
17
+ any_specified_encoding,
18
+ cut_sequence_chunks,
19
+ iana_name,
20
+ identify_sig_or_bom,
21
+ is_cp_similar,
22
+ is_multi_byte_encoding,
23
+ should_strip_sig_or_bom,
24
+ )
25
+
26
+ logger = logging.getLogger("charset_normalizer")
27
+ explain_handler = logging.StreamHandler()
28
+ explain_handler.setFormatter(
29
+ logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
30
+ )
31
+
32
+
33
+ def from_bytes(
34
+ sequences: bytes | bytearray,
35
+ steps: int = 5,
36
+ chunk_size: int = 512,
37
+ threshold: float = 0.2,
38
+ cp_isolation: list[str] | None = None,
39
+ cp_exclusion: list[str] | None = None,
40
+ preemptive_behaviour: bool = True,
41
+ explain: bool = False,
42
+ language_threshold: float = 0.1,
43
+ enable_fallback: bool = True,
44
+ ) -> CharsetMatches:
45
+ """
46
+ Given a raw bytes sequence, return the best possibles charset usable to render str objects.
47
+ If there is no results, it is a strong indicator that the source is binary/not text.
48
+ By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
49
+ And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
50
+
51
+ The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
52
+ but never take it for granted. Can improve the performance.
53
+
54
+ You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
55
+ purpose.
56
+
57
+ This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
58
+ By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
59
+ toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
60
+ Custom logging format and handler can be set manually.
61
+ """
62
+
63
+ if not isinstance(sequences, (bytearray, bytes)):
64
+ raise TypeError(
65
+ "Expected object of type bytes or bytearray, got: {}".format(
66
+ type(sequences)
67
+ )
68
+ )
69
+
70
+ if explain:
71
+ previous_logger_level: int = logger.level
72
+ logger.addHandler(explain_handler)
73
+ logger.setLevel(TRACE)
74
+
75
+ length: int = len(sequences)
76
+
77
+ if length == 0:
78
+ logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
79
+ if explain: # Defensive: ensure exit path clean handler
80
+ logger.removeHandler(explain_handler)
81
+ logger.setLevel(previous_logger_level or logging.WARNING)
82
+ return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
83
+
84
+ if cp_isolation is not None:
85
+ logger.log(
86
+ TRACE,
87
+ "cp_isolation is set. use this flag for debugging purpose. "
88
+ "limited list of encoding allowed : %s.",
89
+ ", ".join(cp_isolation),
90
+ )
91
+ cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
92
+ else:
93
+ cp_isolation = []
94
+
95
+ if cp_exclusion is not None:
96
+ logger.log(
97
+ TRACE,
98
+ "cp_exclusion is set. use this flag for debugging purpose. "
99
+ "limited list of encoding excluded : %s.",
100
+ ", ".join(cp_exclusion),
101
+ )
102
+ cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
103
+ else:
104
+ cp_exclusion = []
105
+
106
+ if length <= (chunk_size * steps):
107
+ logger.log(
108
+ TRACE,
109
+ "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
110
+ steps,
111
+ chunk_size,
112
+ length,
113
+ )
114
+ steps = 1
115
+ chunk_size = length
116
+
117
+ if steps > 1 and length / steps < chunk_size:
118
+ chunk_size = int(length / steps)
119
+
120
+ is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
121
+ is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
122
+
123
+ if is_too_small_sequence:
124
+ logger.log(
125
+ TRACE,
126
+ "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
127
+ length
128
+ ),
129
+ )
130
+ elif is_too_large_sequence:
131
+ logger.log(
132
+ TRACE,
133
+ "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
134
+ length
135
+ ),
136
+ )
137
+
138
+ prioritized_encodings: list[str] = []
139
+
140
+ specified_encoding: str | None = (
141
+ any_specified_encoding(sequences) if preemptive_behaviour else None
142
+ )
143
+
144
+ if specified_encoding is not None:
145
+ prioritized_encodings.append(specified_encoding)
146
+ logger.log(
147
+ TRACE,
148
+ "Detected declarative mark in sequence. Priority +1 given for %s.",
149
+ specified_encoding,
150
+ )
151
+
152
+ tested: set[str] = set()
153
+ tested_but_hard_failure: list[str] = []
154
+ tested_but_soft_failure: list[str] = []
155
+
156
+ fallback_ascii: CharsetMatch | None = None
157
+ fallback_u8: CharsetMatch | None = None
158
+ fallback_specified: CharsetMatch | None = None
159
+
160
+ results: CharsetMatches = CharsetMatches()
161
+
162
+ early_stop_results: CharsetMatches = CharsetMatches()
163
+
164
+ sig_encoding, sig_payload = identify_sig_or_bom(sequences)
165
+
166
+ if sig_encoding is not None:
167
+ prioritized_encodings.append(sig_encoding)
168
+ logger.log(
169
+ TRACE,
170
+ "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
171
+ len(sig_payload),
172
+ sig_encoding,
173
+ )
174
+
175
+ prioritized_encodings.append("ascii")
176
+
177
+ if "utf_8" not in prioritized_encodings:
178
+ prioritized_encodings.append("utf_8")
179
+
180
+ for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
181
+ if cp_isolation and encoding_iana not in cp_isolation:
182
+ continue
183
+
184
+ if cp_exclusion and encoding_iana in cp_exclusion:
185
+ continue
186
+
187
+ if encoding_iana in tested:
188
+ continue
189
+
190
+ tested.add(encoding_iana)
191
+
192
+ decoded_payload: str | None = None
193
+ bom_or_sig_available: bool = sig_encoding == encoding_iana
194
+ strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
195
+ encoding_iana
196
+ )
197
+
198
+ if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
199
+ logger.log(
200
+ TRACE,
201
+ "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
202
+ encoding_iana,
203
+ )
204
+ continue
205
+ if encoding_iana in {"utf_7"} and not bom_or_sig_available:
206
+ logger.log(
207
+ TRACE,
208
+ "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
209
+ encoding_iana,
210
+ )
211
+ continue
212
+
213
+ try:
214
+ is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
215
+ except (ModuleNotFoundError, ImportError):
216
+ logger.log(
217
+ TRACE,
218
+ "Encoding %s does not provide an IncrementalDecoder",
219
+ encoding_iana,
220
+ )
221
+ continue
222
+
223
+ try:
224
+ if is_too_large_sequence and is_multi_byte_decoder is False:
225
+ str(
226
+ (
227
+ sequences[: int(50e4)]
228
+ if strip_sig_or_bom is False
229
+ else sequences[len(sig_payload) : int(50e4)]
230
+ ),
231
+ encoding=encoding_iana,
232
+ )
233
+ else:
234
+ decoded_payload = str(
235
+ (
236
+ sequences
237
+ if strip_sig_or_bom is False
238
+ else sequences[len(sig_payload) :]
239
+ ),
240
+ encoding=encoding_iana,
241
+ )
242
+ except (UnicodeDecodeError, LookupError) as e:
243
+ if not isinstance(e, LookupError):
244
+ logger.log(
245
+ TRACE,
246
+ "Code page %s does not fit given bytes sequence at ALL. %s",
247
+ encoding_iana,
248
+ str(e),
249
+ )
250
+ tested_but_hard_failure.append(encoding_iana)
251
+ continue
252
+
253
+ similar_soft_failure_test: bool = False
254
+
255
+ for encoding_soft_failed in tested_but_soft_failure:
256
+ if is_cp_similar(encoding_iana, encoding_soft_failed):
257
+ similar_soft_failure_test = True
258
+ break
259
+
260
+ if similar_soft_failure_test:
261
+ logger.log(
262
+ TRACE,
263
+ "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
264
+ encoding_iana,
265
+ encoding_soft_failed,
266
+ )
267
+ continue
268
+
269
+ r_ = range(
270
+ 0 if not bom_or_sig_available else len(sig_payload),
271
+ length,
272
+ int(length / steps),
273
+ )
274
+
275
+ multi_byte_bonus: bool = (
276
+ is_multi_byte_decoder
277
+ and decoded_payload is not None
278
+ and len(decoded_payload) < length
279
+ )
280
+
281
+ if multi_byte_bonus:
282
+ logger.log(
283
+ TRACE,
284
+ "Code page %s is a multi byte encoding table and it appear that at least one character "
285
+ "was encoded using n-bytes.",
286
+ encoding_iana,
287
+ )
288
+
289
+ max_chunk_gave_up: int = int(len(r_) / 4)
290
+
291
+ max_chunk_gave_up = max(max_chunk_gave_up, 2)
292
+ early_stop_count: int = 0
293
+ lazy_str_hard_failure = False
294
+
295
+ md_chunks: list[str] = []
296
+ md_ratios = []
297
+
298
+ try:
299
+ for chunk in cut_sequence_chunks(
300
+ sequences,
301
+ encoding_iana,
302
+ r_,
303
+ chunk_size,
304
+ bom_or_sig_available,
305
+ strip_sig_or_bom,
306
+ sig_payload,
307
+ is_multi_byte_decoder,
308
+ decoded_payload,
309
+ ):
310
+ md_chunks.append(chunk)
311
+
312
+ md_ratios.append(
313
+ mess_ratio(
314
+ chunk,
315
+ threshold,
316
+ explain is True and 1 <= len(cp_isolation) <= 2,
317
+ )
318
+ )
319
+
320
+ if md_ratios[-1] >= threshold:
321
+ early_stop_count += 1
322
+
323
+ if (early_stop_count >= max_chunk_gave_up) or (
324
+ bom_or_sig_available and strip_sig_or_bom is False
325
+ ):
326
+ break
327
+ except (
328
+ UnicodeDecodeError
329
+ ) as e: # Lazy str loading may have missed something there
330
+ logger.log(
331
+ TRACE,
332
+ "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
333
+ encoding_iana,
334
+ str(e),
335
+ )
336
+ early_stop_count = max_chunk_gave_up
337
+ lazy_str_hard_failure = True
338
+
339
+ # We might want to check the sequence again with the whole content
340
+ # Only if initial MD tests passes
341
+ if (
342
+ not lazy_str_hard_failure
343
+ and is_too_large_sequence
344
+ and not is_multi_byte_decoder
345
+ ):
346
+ try:
347
+ sequences[int(50e3) :].decode(encoding_iana, errors="strict")
348
+ except UnicodeDecodeError as e:
349
+ logger.log(
350
+ TRACE,
351
+ "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
352
+ encoding_iana,
353
+ str(e),
354
+ )
355
+ tested_but_hard_failure.append(encoding_iana)
356
+ continue
357
+
358
+ mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
359
+ if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
360
+ tested_but_soft_failure.append(encoding_iana)
361
+ logger.log(
362
+ TRACE,
363
+ "%s was excluded because of initial chaos probing. Gave up %i time(s). "
364
+ "Computed mean chaos is %f %%.",
365
+ encoding_iana,
366
+ early_stop_count,
367
+ round(mean_mess_ratio * 100, ndigits=3),
368
+ )
369
+ # Preparing those fallbacks in case we got nothing.
370
+ if (
371
+ enable_fallback
372
+ and encoding_iana
373
+ in ["ascii", "utf_8", specified_encoding, "utf_16", "utf_32"]
374
+ and not lazy_str_hard_failure
375
+ ):
376
+ fallback_entry = CharsetMatch(
377
+ sequences,
378
+ encoding_iana,
379
+ threshold,
380
+ bom_or_sig_available,
381
+ [],
382
+ decoded_payload,
383
+ preemptive_declaration=specified_encoding,
384
+ )
385
+ if encoding_iana == specified_encoding:
386
+ fallback_specified = fallback_entry
387
+ elif encoding_iana == "ascii":
388
+ fallback_ascii = fallback_entry
389
+ else:
390
+ fallback_u8 = fallback_entry
391
+ continue
392
+
393
+ logger.log(
394
+ TRACE,
395
+ "%s passed initial chaos probing. Mean measured chaos is %f %%",
396
+ encoding_iana,
397
+ round(mean_mess_ratio * 100, ndigits=3),
398
+ )
399
+
400
+ if not is_multi_byte_decoder:
401
+ target_languages: list[str] = encoding_languages(encoding_iana)
402
+ else:
403
+ target_languages = mb_encoding_languages(encoding_iana)
404
+
405
+ if target_languages:
406
+ logger.log(
407
+ TRACE,
408
+ "{} should target any language(s) of {}".format(
409
+ encoding_iana, str(target_languages)
410
+ ),
411
+ )
412
+
413
+ cd_ratios = []
414
+
415
+ # We shall skip the CD when its about ASCII
416
+ # Most of the time its not relevant to run "language-detection" on it.
417
+ if encoding_iana != "ascii":
418
+ for chunk in md_chunks:
419
+ chunk_languages = coherence_ratio(
420
+ chunk,
421
+ language_threshold,
422
+ ",".join(target_languages) if target_languages else None,
423
+ )
424
+
425
+ cd_ratios.append(chunk_languages)
426
+
427
+ cd_ratios_merged = merge_coherence_ratios(cd_ratios)
428
+
429
+ if cd_ratios_merged:
430
+ logger.log(
431
+ TRACE,
432
+ "We detected language {} using {}".format(
433
+ cd_ratios_merged, encoding_iana
434
+ ),
435
+ )
436
+
437
+ current_match = CharsetMatch(
438
+ sequences,
439
+ encoding_iana,
440
+ mean_mess_ratio,
441
+ bom_or_sig_available,
442
+ cd_ratios_merged,
443
+ (
444
+ decoded_payload
445
+ if (
446
+ is_too_large_sequence is False
447
+ or encoding_iana in [specified_encoding, "ascii", "utf_8"]
448
+ )
449
+ else None
450
+ ),
451
+ preemptive_declaration=specified_encoding,
452
+ )
453
+
454
+ results.append(current_match)
455
+
456
+ if (
457
+ encoding_iana in [specified_encoding, "ascii", "utf_8"]
458
+ and mean_mess_ratio < 0.1
459
+ ):
460
+ # If md says nothing to worry about, then... stop immediately!
461
+ if mean_mess_ratio == 0.0:
462
+ logger.debug(
463
+ "Encoding detection: %s is most likely the one.",
464
+ current_match.encoding,
465
+ )
466
+ if explain: # Defensive: ensure exit path clean handler
467
+ logger.removeHandler(explain_handler)
468
+ logger.setLevel(previous_logger_level)
469
+ return CharsetMatches([current_match])
470
+
471
+ early_stop_results.append(current_match)
472
+
473
+ if (
474
+ len(early_stop_results)
475
+ and (specified_encoding is None or specified_encoding in tested)
476
+ and "ascii" in tested
477
+ and "utf_8" in tested
478
+ ):
479
+ probable_result: CharsetMatch = early_stop_results.best() # type: ignore[assignment]
480
+ logger.debug(
481
+ "Encoding detection: %s is most likely the one.",
482
+ probable_result.encoding,
483
+ )
484
+ if explain: # Defensive: ensure exit path clean handler
485
+ logger.removeHandler(explain_handler)
486
+ logger.setLevel(previous_logger_level)
487
+
488
+ return CharsetMatches([probable_result])
489
+
490
+ if encoding_iana == sig_encoding:
491
+ logger.debug(
492
+ "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
493
+ "the beginning of the sequence.",
494
+ encoding_iana,
495
+ )
496
+ if explain: # Defensive: ensure exit path clean handler
497
+ logger.removeHandler(explain_handler)
498
+ logger.setLevel(previous_logger_level)
499
+ return CharsetMatches([results[encoding_iana]])
500
+
501
+ if len(results) == 0:
502
+ if fallback_u8 or fallback_ascii or fallback_specified:
503
+ logger.log(
504
+ TRACE,
505
+ "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
506
+ )
507
+
508
+ if fallback_specified:
509
+ logger.debug(
510
+ "Encoding detection: %s will be used as a fallback match",
511
+ fallback_specified.encoding,
512
+ )
513
+ results.append(fallback_specified)
514
+ elif (
515
+ (fallback_u8 and fallback_ascii is None)
516
+ or (
517
+ fallback_u8
518
+ and fallback_ascii
519
+ and fallback_u8.fingerprint != fallback_ascii.fingerprint
520
+ )
521
+ or (fallback_u8 is not None)
522
+ ):
523
+ logger.debug("Encoding detection: utf_8 will be used as a fallback match")
524
+ results.append(fallback_u8)
525
+ elif fallback_ascii:
526
+ logger.debug("Encoding detection: ascii will be used as a fallback match")
527
+ results.append(fallback_ascii)
528
+
529
+ if results:
530
+ logger.debug(
531
+ "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
532
+ results.best().encoding, # type: ignore
533
+ len(results) - 1,
534
+ )
535
+ else:
536
+ logger.debug("Encoding detection: Unable to determine any suitable charset.")
537
+
538
+ if explain:
539
+ logger.removeHandler(explain_handler)
540
+ logger.setLevel(previous_logger_level)
541
+
542
+ return results
543
+
544
+
545
+ def from_fp(
546
+ fp: BinaryIO,
547
+ steps: int = 5,
548
+ chunk_size: int = 512,
549
+ threshold: float = 0.20,
550
+ cp_isolation: list[str] | None = None,
551
+ cp_exclusion: list[str] | None = None,
552
+ preemptive_behaviour: bool = True,
553
+ explain: bool = False,
554
+ language_threshold: float = 0.1,
555
+ enable_fallback: bool = True,
556
+ ) -> CharsetMatches:
557
+ """
558
+ Same thing than the function from_bytes but using a file pointer that is already ready.
559
+ Will not close the file pointer.
560
+ """
561
+ return from_bytes(
562
+ fp.read(),
563
+ steps,
564
+ chunk_size,
565
+ threshold,
566
+ cp_isolation,
567
+ cp_exclusion,
568
+ preemptive_behaviour,
569
+ explain,
570
+ language_threshold,
571
+ enable_fallback,
572
+ )
573
+
574
+
575
+ def from_path(
576
+ path: str | bytes | PathLike, # type: ignore[type-arg]
577
+ steps: int = 5,
578
+ chunk_size: int = 512,
579
+ threshold: float = 0.20,
580
+ cp_isolation: list[str] | None = None,
581
+ cp_exclusion: list[str] | None = None,
582
+ preemptive_behaviour: bool = True,
583
+ explain: bool = False,
584
+ language_threshold: float = 0.1,
585
+ enable_fallback: bool = True,
586
+ ) -> CharsetMatches:
587
+ """
588
+ Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
589
+ Can raise IOError.
590
+ """
591
+ with open(path, "rb") as fp:
592
+ return from_fp(
593
+ fp,
594
+ steps,
595
+ chunk_size,
596
+ threshold,
597
+ cp_isolation,
598
+ cp_exclusion,
599
+ preemptive_behaviour,
600
+ explain,
601
+ language_threshold,
602
+ enable_fallback,
603
+ )
604
+
605
+
606
+ def is_binary(
607
+ fp_or_path_or_payload: PathLike | str | BinaryIO | bytes, # type: ignore[type-arg]
608
+ steps: int = 5,
609
+ chunk_size: int = 512,
610
+ threshold: float = 0.20,
611
+ cp_isolation: list[str] | None = None,
612
+ cp_exclusion: list[str] | None = None,
613
+ preemptive_behaviour: bool = True,
614
+ explain: bool = False,
615
+ language_threshold: float = 0.1,
616
+ enable_fallback: bool = False,
617
+ ) -> bool:
618
+ """
619
+ Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
620
+ Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
621
+ are disabled to be stricter around ASCII-compatible but unlikely to be a string.
622
+ """
623
+ if isinstance(fp_or_path_or_payload, (str, PathLike)):
624
+ guesses = from_path(
625
+ fp_or_path_or_payload,
626
+ steps=steps,
627
+ chunk_size=chunk_size,
628
+ threshold=threshold,
629
+ cp_isolation=cp_isolation,
630
+ cp_exclusion=cp_exclusion,
631
+ preemptive_behaviour=preemptive_behaviour,
632
+ explain=explain,
633
+ language_threshold=language_threshold,
634
+ enable_fallback=enable_fallback,
635
+ )
636
+ elif isinstance(
637
+ fp_or_path_or_payload,
638
+ (
639
+ bytes,
640
+ bytearray,
641
+ ),
642
+ ):
643
+ guesses = from_bytes(
644
+ fp_or_path_or_payload,
645
+ steps=steps,
646
+ chunk_size=chunk_size,
647
+ threshold=threshold,
648
+ cp_isolation=cp_isolation,
649
+ cp_exclusion=cp_exclusion,
650
+ preemptive_behaviour=preemptive_behaviour,
651
+ explain=explain,
652
+ language_threshold=language_threshold,
653
+ enable_fallback=enable_fallback,
654
+ )
655
+ else:
656
+ guesses = from_fp(
657
+ fp_or_path_or_payload,
658
+ steps=steps,
659
+ chunk_size=chunk_size,
660
+ threshold=threshold,
661
+ cp_isolation=cp_isolation,
662
+ cp_exclusion=cp_exclusion,
663
+ preemptive_behaviour=preemptive_behaviour,
664
+ explain=explain,
665
+ language_threshold=language_threshold,
666
+ enable_fallback=enable_fallback,
667
+ )
668
+
669
+ return not guesses
venv/lib/python3.10/site-packages/charset_normalizer/cd.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ from codecs import IncrementalDecoder
5
+ from collections import Counter
6
+ from functools import lru_cache
7
+ from typing import Counter as TypeCounter
8
+
9
+ from .constant import (
10
+ FREQUENCIES,
11
+ KO_NAMES,
12
+ LANGUAGE_SUPPORTED_COUNT,
13
+ TOO_SMALL_SEQUENCE,
14
+ ZH_NAMES,
15
+ )
16
+ from .md import is_suspiciously_successive_range
17
+ from .models import CoherenceMatches
18
+ from .utils import (
19
+ is_accentuated,
20
+ is_latin,
21
+ is_multi_byte_encoding,
22
+ is_unicode_range_secondary,
23
+ unicode_range,
24
+ )
25
+
26
+
27
+ def encoding_unicode_range(iana_name: str) -> list[str]:
28
+ """
29
+ Return associated unicode ranges in a single byte code page.
30
+ """
31
+ if is_multi_byte_encoding(iana_name):
32
+ raise OSError("Function not supported on multi-byte code page")
33
+
34
+ decoder = importlib.import_module(f"encodings.{iana_name}").IncrementalDecoder
35
+
36
+ p: IncrementalDecoder = decoder(errors="ignore")
37
+ seen_ranges: dict[str, int] = {}
38
+ character_count: int = 0
39
+
40
+ for i in range(0x40, 0xFF):
41
+ chunk: str = p.decode(bytes([i]))
42
+
43
+ if chunk:
44
+ character_range: str | None = unicode_range(chunk)
45
+
46
+ if character_range is None:
47
+ continue
48
+
49
+ if is_unicode_range_secondary(character_range) is False:
50
+ if character_range not in seen_ranges:
51
+ seen_ranges[character_range] = 0
52
+ seen_ranges[character_range] += 1
53
+ character_count += 1
54
+
55
+ return sorted(
56
+ [
57
+ character_range
58
+ for character_range in seen_ranges
59
+ if seen_ranges[character_range] / character_count >= 0.15
60
+ ]
61
+ )
62
+
63
+
64
+ def unicode_range_languages(primary_range: str) -> list[str]:
65
+ """
66
+ Return inferred languages used with a unicode range.
67
+ """
68
+ languages: list[str] = []
69
+
70
+ for language, characters in FREQUENCIES.items():
71
+ for character in characters:
72
+ if unicode_range(character) == primary_range:
73
+ languages.append(language)
74
+ break
75
+
76
+ return languages
77
+
78
+
79
+ @lru_cache()
80
+ def encoding_languages(iana_name: str) -> list[str]:
81
+ """
82
+ Single-byte encoding language association. Some code page are heavily linked to particular language(s).
83
+ This function does the correspondence.
84
+ """
85
+ unicode_ranges: list[str] = encoding_unicode_range(iana_name)
86
+ primary_range: str | None = None
87
+
88
+ for specified_range in unicode_ranges:
89
+ if "Latin" not in specified_range:
90
+ primary_range = specified_range
91
+ break
92
+
93
+ if primary_range is None:
94
+ return ["Latin Based"]
95
+
96
+ return unicode_range_languages(primary_range)
97
+
98
+
99
+ @lru_cache()
100
+ def mb_encoding_languages(iana_name: str) -> list[str]:
101
+ """
102
+ Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
103
+ This function does the correspondence.
104
+ """
105
+ if (
106
+ iana_name.startswith("shift_")
107
+ or iana_name.startswith("iso2022_jp")
108
+ or iana_name.startswith("euc_j")
109
+ or iana_name == "cp932"
110
+ ):
111
+ return ["Japanese"]
112
+ if iana_name.startswith("gb") or iana_name in ZH_NAMES:
113
+ return ["Chinese"]
114
+ if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
115
+ return ["Korean"]
116
+
117
+ return []
118
+
119
+
120
+ @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
121
+ def get_target_features(language: str) -> tuple[bool, bool]:
122
+ """
123
+ Determine main aspects from a supported language if it contains accents and if is pure Latin.
124
+ """
125
+ target_have_accents: bool = False
126
+ target_pure_latin: bool = True
127
+
128
+ for character in FREQUENCIES[language]:
129
+ if not target_have_accents and is_accentuated(character):
130
+ target_have_accents = True
131
+ if target_pure_latin and is_latin(character) is False:
132
+ target_pure_latin = False
133
+
134
+ return target_have_accents, target_pure_latin
135
+
136
+
137
+ def alphabet_languages(
138
+ characters: list[str], ignore_non_latin: bool = False
139
+ ) -> list[str]:
140
+ """
141
+ Return associated languages associated to given characters.
142
+ """
143
+ languages: list[tuple[str, float]] = []
144
+
145
+ source_have_accents = any(is_accentuated(character) for character in characters)
146
+
147
+ for language, language_characters in FREQUENCIES.items():
148
+ target_have_accents, target_pure_latin = get_target_features(language)
149
+
150
+ if ignore_non_latin and target_pure_latin is False:
151
+ continue
152
+
153
+ if target_have_accents is False and source_have_accents:
154
+ continue
155
+
156
+ character_count: int = len(language_characters)
157
+
158
+ character_match_count: int = len(
159
+ [c for c in language_characters if c in characters]
160
+ )
161
+
162
+ ratio: float = character_match_count / character_count
163
+
164
+ if ratio >= 0.2:
165
+ languages.append((language, ratio))
166
+
167
+ languages = sorted(languages, key=lambda x: x[1], reverse=True)
168
+
169
+ return [compatible_language[0] for compatible_language in languages]
170
+
171
+
172
+ def characters_popularity_compare(
173
+ language: str, ordered_characters: list[str]
174
+ ) -> float:
175
+ """
176
+ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
177
+ The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
178
+ Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
179
+ """
180
+ if language not in FREQUENCIES:
181
+ raise ValueError(f"{language} not available")
182
+
183
+ character_approved_count: int = 0
184
+ FREQUENCIES_language_set = set(FREQUENCIES[language])
185
+
186
+ ordered_characters_count: int = len(ordered_characters)
187
+ target_language_characters_count: int = len(FREQUENCIES[language])
188
+
189
+ large_alphabet: bool = target_language_characters_count > 26
190
+
191
+ for character, character_rank in zip(
192
+ ordered_characters, range(0, ordered_characters_count)
193
+ ):
194
+ if character not in FREQUENCIES_language_set:
195
+ continue
196
+
197
+ character_rank_in_language: int = FREQUENCIES[language].index(character)
198
+ expected_projection_ratio: float = (
199
+ target_language_characters_count / ordered_characters_count
200
+ )
201
+ character_rank_projection: int = int(character_rank * expected_projection_ratio)
202
+
203
+ if (
204
+ large_alphabet is False
205
+ and abs(character_rank_projection - character_rank_in_language) > 4
206
+ ):
207
+ continue
208
+
209
+ if (
210
+ large_alphabet is True
211
+ and abs(character_rank_projection - character_rank_in_language)
212
+ < target_language_characters_count / 3
213
+ ):
214
+ character_approved_count += 1
215
+ continue
216
+
217
+ characters_before_source: list[str] = FREQUENCIES[language][
218
+ 0:character_rank_in_language
219
+ ]
220
+ characters_after_source: list[str] = FREQUENCIES[language][
221
+ character_rank_in_language:
222
+ ]
223
+ characters_before: list[str] = ordered_characters[0:character_rank]
224
+ characters_after: list[str] = ordered_characters[character_rank:]
225
+
226
+ before_match_count: int = len(
227
+ set(characters_before) & set(characters_before_source)
228
+ )
229
+
230
+ after_match_count: int = len(
231
+ set(characters_after) & set(characters_after_source)
232
+ )
233
+
234
+ if len(characters_before_source) == 0 and before_match_count <= 4:
235
+ character_approved_count += 1
236
+ continue
237
+
238
+ if len(characters_after_source) == 0 and after_match_count <= 4:
239
+ character_approved_count += 1
240
+ continue
241
+
242
+ if (
243
+ before_match_count / len(characters_before_source) >= 0.4
244
+ or after_match_count / len(characters_after_source) >= 0.4
245
+ ):
246
+ character_approved_count += 1
247
+ continue
248
+
249
+ return character_approved_count / len(ordered_characters)
250
+
251
+
252
+ def alpha_unicode_split(decoded_sequence: str) -> list[str]:
253
+ """
254
+ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
255
+ Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
256
+ One containing the latin letters and the other hebrew.
257
+ """
258
+ layers: dict[str, str] = {}
259
+
260
+ for character in decoded_sequence:
261
+ if character.isalpha() is False:
262
+ continue
263
+
264
+ character_range: str | None = unicode_range(character)
265
+
266
+ if character_range is None:
267
+ continue
268
+
269
+ layer_target_range: str | None = None
270
+
271
+ for discovered_range in layers:
272
+ if (
273
+ is_suspiciously_successive_range(discovered_range, character_range)
274
+ is False
275
+ ):
276
+ layer_target_range = discovered_range
277
+ break
278
+
279
+ if layer_target_range is None:
280
+ layer_target_range = character_range
281
+
282
+ if layer_target_range not in layers:
283
+ layers[layer_target_range] = character.lower()
284
+ continue
285
+
286
+ layers[layer_target_range] += character.lower()
287
+
288
+ return list(layers.values())
289
+
290
+
291
+ def merge_coherence_ratios(results: list[CoherenceMatches]) -> CoherenceMatches:
292
+ """
293
+ This function merge results previously given by the function coherence_ratio.
294
+ The return type is the same as coherence_ratio.
295
+ """
296
+ per_language_ratios: dict[str, list[float]] = {}
297
+ for result in results:
298
+ for sub_result in result:
299
+ language, ratio = sub_result
300
+ if language not in per_language_ratios:
301
+ per_language_ratios[language] = [ratio]
302
+ continue
303
+ per_language_ratios[language].append(ratio)
304
+
305
+ merge = [
306
+ (
307
+ language,
308
+ round(
309
+ sum(per_language_ratios[language]) / len(per_language_ratios[language]),
310
+ 4,
311
+ ),
312
+ )
313
+ for language in per_language_ratios
314
+ ]
315
+
316
+ return sorted(merge, key=lambda x: x[1], reverse=True)
317
+
318
+
319
+ def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
320
+ """
321
+ We shall NOT return "English—" in CoherenceMatches because it is an alternative
322
+ of "English". This function only keeps the best match and remove the em-dash in it.
323
+ """
324
+ index_results: dict[str, list[float]] = dict()
325
+
326
+ for result in results:
327
+ language, ratio = result
328
+ no_em_name: str = language.replace("—", "")
329
+
330
+ if no_em_name not in index_results:
331
+ index_results[no_em_name] = []
332
+
333
+ index_results[no_em_name].append(ratio)
334
+
335
+ if any(len(index_results[e]) > 1 for e in index_results):
336
+ filtered_results: CoherenceMatches = []
337
+
338
+ for language in index_results:
339
+ filtered_results.append((language, max(index_results[language])))
340
+
341
+ return filtered_results
342
+
343
+ return results
344
+
345
+
346
+ @lru_cache(maxsize=2048)
347
+ def coherence_ratio(
348
+ decoded_sequence: str, threshold: float = 0.1, lg_inclusion: str | None = None
349
+ ) -> CoherenceMatches:
350
+ """
351
+ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
352
+ A layer = Character extraction by alphabets/ranges.
353
+ """
354
+
355
+ results: list[tuple[str, float]] = []
356
+ ignore_non_latin: bool = False
357
+
358
+ sufficient_match_count: int = 0
359
+
360
+ lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
361
+ if "Latin Based" in lg_inclusion_list:
362
+ ignore_non_latin = True
363
+ lg_inclusion_list.remove("Latin Based")
364
+
365
+ for layer in alpha_unicode_split(decoded_sequence):
366
+ sequence_frequencies: TypeCounter[str] = Counter(layer)
367
+ most_common = sequence_frequencies.most_common()
368
+
369
+ character_count: int = sum(o for c, o in most_common)
370
+
371
+ if character_count <= TOO_SMALL_SEQUENCE:
372
+ continue
373
+
374
+ popular_character_ordered: list[str] = [c for c, o in most_common]
375
+
376
+ for language in lg_inclusion_list or alphabet_languages(
377
+ popular_character_ordered, ignore_non_latin
378
+ ):
379
+ ratio: float = characters_popularity_compare(
380
+ language, popular_character_ordered
381
+ )
382
+
383
+ if ratio < threshold:
384
+ continue
385
+ elif ratio >= 0.8:
386
+ sufficient_match_count += 1
387
+
388
+ results.append((language, round(ratio, 4)))
389
+
390
+ if sufficient_match_count >= 3:
391
+ break
392
+
393
+ return sorted(
394
+ filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
395
+ )
venv/lib/python3.10/site-packages/charset_normalizer/cli/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from .__main__ import cli_detect, query_yes_no
4
+
5
+ __all__ = (
6
+ "cli_detect",
7
+ "query_yes_no",
8
+ )
venv/lib/python3.10/site-packages/charset_normalizer/cli/__main__.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import sys
5
+ import typing
6
+ from json import dumps
7
+ from os.path import abspath, basename, dirname, join, realpath
8
+ from platform import python_version
9
+ from unicodedata import unidata_version
10
+
11
+ import charset_normalizer.md as md_module
12
+ from charset_normalizer import from_fp
13
+ from charset_normalizer.models import CliDetectionResult
14
+ from charset_normalizer.version import __version__
15
+
16
+
17
+ def query_yes_no(question: str, default: str = "yes") -> bool:
18
+ """Ask a yes/no question via input() and return their answer.
19
+
20
+ "question" is a string that is presented to the user.
21
+ "default" is the presumed answer if the user just hits <Enter>.
22
+ It must be "yes" (the default), "no" or None (meaning
23
+ an answer is required of the user).
24
+
25
+ The "answer" return value is True for "yes" or False for "no".
26
+
27
+ Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
28
+ """
29
+ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
30
+ if default is None:
31
+ prompt = " [y/n] "
32
+ elif default == "yes":
33
+ prompt = " [Y/n] "
34
+ elif default == "no":
35
+ prompt = " [y/N] "
36
+ else:
37
+ raise ValueError("invalid default answer: '%s'" % default)
38
+
39
+ while True:
40
+ sys.stdout.write(question + prompt)
41
+ choice = input().lower()
42
+ if default is not None and choice == "":
43
+ return valid[default]
44
+ elif choice in valid:
45
+ return valid[choice]
46
+ else:
47
+ sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
48
+
49
+
50
+ class FileType:
51
+ """Factory for creating file object types
52
+
53
+ Instances of FileType are typically passed as type= arguments to the
54
+ ArgumentParser add_argument() method.
55
+
56
+ Keyword Arguments:
57
+ - mode -- A string indicating how the file is to be opened. Accepts the
58
+ same values as the builtin open() function.
59
+ - bufsize -- The file's desired buffer size. Accepts the same values as
60
+ the builtin open() function.
61
+ - encoding -- The file's encoding. Accepts the same values as the
62
+ builtin open() function.
63
+ - errors -- A string indicating how encoding and decoding errors are to
64
+ be handled. Accepts the same value as the builtin open() function.
65
+
66
+ Backported from CPython 3.12
67
+ """
68
+
69
+ def __init__(
70
+ self,
71
+ mode: str = "r",
72
+ bufsize: int = -1,
73
+ encoding: str | None = None,
74
+ errors: str | None = None,
75
+ ):
76
+ self._mode = mode
77
+ self._bufsize = bufsize
78
+ self._encoding = encoding
79
+ self._errors = errors
80
+
81
+ def __call__(self, string: str) -> typing.IO: # type: ignore[type-arg]
82
+ # the special argument "-" means sys.std{in,out}
83
+ if string == "-":
84
+ if "r" in self._mode:
85
+ return sys.stdin.buffer if "b" in self._mode else sys.stdin
86
+ elif any(c in self._mode for c in "wax"):
87
+ return sys.stdout.buffer if "b" in self._mode else sys.stdout
88
+ else:
89
+ msg = f'argument "-" with mode {self._mode}'
90
+ raise ValueError(msg)
91
+
92
+ # all other arguments are used as file names
93
+ try:
94
+ return open(string, self._mode, self._bufsize, self._encoding, self._errors)
95
+ except OSError as e:
96
+ message = f"can't open '{string}': {e}"
97
+ raise argparse.ArgumentTypeError(message)
98
+
99
+ def __repr__(self) -> str:
100
+ args = self._mode, self._bufsize
101
+ kwargs = [("encoding", self._encoding), ("errors", self._errors)]
102
+ args_str = ", ".join(
103
+ [repr(arg) for arg in args if arg != -1]
104
+ + [f"{kw}={arg!r}" for kw, arg in kwargs if arg is not None]
105
+ )
106
+ return f"{type(self).__name__}({args_str})"
107
+
108
+
109
+ def cli_detect(argv: list[str] | None = None) -> int:
110
+ """
111
+ CLI assistant using ARGV and ArgumentParser
112
+ :param argv:
113
+ :return: 0 if everything is fine, anything else equal trouble
114
+ """
115
+ parser = argparse.ArgumentParser(
116
+ description="The Real First Universal Charset Detector. "
117
+ "Discover originating encoding used on text file. "
118
+ "Normalize text to unicode."
119
+ )
120
+
121
+ parser.add_argument(
122
+ "files", type=FileType("rb"), nargs="+", help="File(s) to be analysed"
123
+ )
124
+ parser.add_argument(
125
+ "-v",
126
+ "--verbose",
127
+ action="store_true",
128
+ default=False,
129
+ dest="verbose",
130
+ help="Display complementary information about file if any. "
131
+ "Stdout will contain logs about the detection process.",
132
+ )
133
+ parser.add_argument(
134
+ "-a",
135
+ "--with-alternative",
136
+ action="store_true",
137
+ default=False,
138
+ dest="alternatives",
139
+ help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
140
+ )
141
+ parser.add_argument(
142
+ "-n",
143
+ "--normalize",
144
+ action="store_true",
145
+ default=False,
146
+ dest="normalize",
147
+ help="Permit to normalize input file. If not set, program does not write anything.",
148
+ )
149
+ parser.add_argument(
150
+ "-m",
151
+ "--minimal",
152
+ action="store_true",
153
+ default=False,
154
+ dest="minimal",
155
+ help="Only output the charset detected to STDOUT. Disabling JSON output.",
156
+ )
157
+ parser.add_argument(
158
+ "-r",
159
+ "--replace",
160
+ action="store_true",
161
+ default=False,
162
+ dest="replace",
163
+ help="Replace file when trying to normalize it instead of creating a new one.",
164
+ )
165
+ parser.add_argument(
166
+ "-f",
167
+ "--force",
168
+ action="store_true",
169
+ default=False,
170
+ dest="force",
171
+ help="Replace file without asking if you are sure, use this flag with caution.",
172
+ )
173
+ parser.add_argument(
174
+ "-i",
175
+ "--no-preemptive",
176
+ action="store_true",
177
+ default=False,
178
+ dest="no_preemptive",
179
+ help="Disable looking at a charset declaration to hint the detector.",
180
+ )
181
+ parser.add_argument(
182
+ "-t",
183
+ "--threshold",
184
+ action="store",
185
+ default=0.2,
186
+ type=float,
187
+ dest="threshold",
188
+ help="Define a custom maximum amount of noise allowed in decoded content. 0. <= noise <= 1.",
189
+ )
190
+ parser.add_argument(
191
+ "--version",
192
+ action="version",
193
+ version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format(
194
+ __version__,
195
+ python_version(),
196
+ unidata_version,
197
+ "OFF" if md_module.__file__.lower().endswith(".py") else "ON",
198
+ ),
199
+ help="Show version information and exit.",
200
+ )
201
+
202
+ args = parser.parse_args(argv)
203
+
204
+ if args.replace is True and args.normalize is False:
205
+ if args.files:
206
+ for my_file in args.files:
207
+ my_file.close()
208
+ print("Use --replace in addition of --normalize only.", file=sys.stderr)
209
+ return 1
210
+
211
+ if args.force is True and args.replace is False:
212
+ if args.files:
213
+ for my_file in args.files:
214
+ my_file.close()
215
+ print("Use --force in addition of --replace only.", file=sys.stderr)
216
+ return 1
217
+
218
+ if args.threshold < 0.0 or args.threshold > 1.0:
219
+ if args.files:
220
+ for my_file in args.files:
221
+ my_file.close()
222
+ print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
223
+ return 1
224
+
225
+ x_ = []
226
+
227
+ for my_file in args.files:
228
+ matches = from_fp(
229
+ my_file,
230
+ threshold=args.threshold,
231
+ explain=args.verbose,
232
+ preemptive_behaviour=args.no_preemptive is False,
233
+ )
234
+
235
+ best_guess = matches.best()
236
+
237
+ if best_guess is None:
238
+ print(
239
+ 'Unable to identify originating encoding for "{}". {}'.format(
240
+ my_file.name,
241
+ (
242
+ "Maybe try increasing maximum amount of chaos."
243
+ if args.threshold < 1.0
244
+ else ""
245
+ ),
246
+ ),
247
+ file=sys.stderr,
248
+ )
249
+ x_.append(
250
+ CliDetectionResult(
251
+ abspath(my_file.name),
252
+ None,
253
+ [],
254
+ [],
255
+ "Unknown",
256
+ [],
257
+ False,
258
+ 1.0,
259
+ 0.0,
260
+ None,
261
+ True,
262
+ )
263
+ )
264
+ else:
265
+ x_.append(
266
+ CliDetectionResult(
267
+ abspath(my_file.name),
268
+ best_guess.encoding,
269
+ best_guess.encoding_aliases,
270
+ [
271
+ cp
272
+ for cp in best_guess.could_be_from_charset
273
+ if cp != best_guess.encoding
274
+ ],
275
+ best_guess.language,
276
+ best_guess.alphabets,
277
+ best_guess.bom,
278
+ best_guess.percent_chaos,
279
+ best_guess.percent_coherence,
280
+ None,
281
+ True,
282
+ )
283
+ )
284
+
285
+ if len(matches) > 1 and args.alternatives:
286
+ for el in matches:
287
+ if el != best_guess:
288
+ x_.append(
289
+ CliDetectionResult(
290
+ abspath(my_file.name),
291
+ el.encoding,
292
+ el.encoding_aliases,
293
+ [
294
+ cp
295
+ for cp in el.could_be_from_charset
296
+ if cp != el.encoding
297
+ ],
298
+ el.language,
299
+ el.alphabets,
300
+ el.bom,
301
+ el.percent_chaos,
302
+ el.percent_coherence,
303
+ None,
304
+ False,
305
+ )
306
+ )
307
+
308
+ if args.normalize is True:
309
+ if best_guess.encoding.startswith("utf") is True:
310
+ print(
311
+ '"{}" file does not need to be normalized, as it already came from unicode.'.format(
312
+ my_file.name
313
+ ),
314
+ file=sys.stderr,
315
+ )
316
+ if my_file.closed is False:
317
+ my_file.close()
318
+ continue
319
+
320
+ dir_path = dirname(realpath(my_file.name))
321
+ file_name = basename(realpath(my_file.name))
322
+
323
+ o_: list[str] = file_name.split(".")
324
+
325
+ if args.replace is False:
326
+ o_.insert(-1, best_guess.encoding)
327
+ if my_file.closed is False:
328
+ my_file.close()
329
+ elif (
330
+ args.force is False
331
+ and query_yes_no(
332
+ 'Are you sure to normalize "{}" by replacing it ?'.format(
333
+ my_file.name
334
+ ),
335
+ "no",
336
+ )
337
+ is False
338
+ ):
339
+ if my_file.closed is False:
340
+ my_file.close()
341
+ continue
342
+
343
+ try:
344
+ x_[0].unicode_path = join(dir_path, ".".join(o_))
345
+
346
+ with open(x_[0].unicode_path, "wb") as fp:
347
+ fp.write(best_guess.output())
348
+ except OSError as e:
349
+ print(str(e), file=sys.stderr)
350
+ if my_file.closed is False:
351
+ my_file.close()
352
+ return 2
353
+
354
+ if my_file.closed is False:
355
+ my_file.close()
356
+
357
+ if args.minimal is False:
358
+ print(
359
+ dumps(
360
+ [el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
361
+ ensure_ascii=True,
362
+ indent=4,
363
+ )
364
+ )
365
+ else:
366
+ for my_file in args.files:
367
+ print(
368
+ ", ".join(
369
+ [
370
+ el.encoding or "undefined"
371
+ for el in x_
372
+ if el.path == abspath(my_file.name)
373
+ ]
374
+ )
375
+ )
376
+
377
+ return 0
378
+
379
+
380
+ if __name__ == "__main__":
381
+ cli_detect()
venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (390 Bytes). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (9.4 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/constant.py ADDED
@@ -0,0 +1,2015 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
4
+ from encodings.aliases import aliases
5
+ from re import IGNORECASE
6
+ from re import compile as re_compile
7
+
8
+ # Contain for each eligible encoding a list of/item bytes SIG/BOM
9
+ ENCODING_MARKS: dict[str, bytes | list[bytes]] = {
10
+ "utf_8": BOM_UTF8,
11
+ "utf_7": [
12
+ b"\x2b\x2f\x76\x38",
13
+ b"\x2b\x2f\x76\x39",
14
+ b"\x2b\x2f\x76\x2b",
15
+ b"\x2b\x2f\x76\x2f",
16
+ b"\x2b\x2f\x76\x38\x2d",
17
+ ],
18
+ "gb18030": b"\x84\x31\x95\x33",
19
+ "utf_32": [BOM_UTF32_BE, BOM_UTF32_LE],
20
+ "utf_16": [BOM_UTF16_BE, BOM_UTF16_LE],
21
+ }
22
+
23
+ TOO_SMALL_SEQUENCE: int = 32
24
+ TOO_BIG_SEQUENCE: int = int(10e6)
25
+
26
+ UTF8_MAXIMAL_ALLOCATION: int = 1_112_064
27
+
28
+ # Up-to-date Unicode ucd/15.0.0
29
+ UNICODE_RANGES_COMBINED: dict[str, range] = {
30
+ "Control character": range(32),
31
+ "Basic Latin": range(32, 128),
32
+ "Latin-1 Supplement": range(128, 256),
33
+ "Latin Extended-A": range(256, 384),
34
+ "Latin Extended-B": range(384, 592),
35
+ "IPA Extensions": range(592, 688),
36
+ "Spacing Modifier Letters": range(688, 768),
37
+ "Combining Diacritical Marks": range(768, 880),
38
+ "Greek and Coptic": range(880, 1024),
39
+ "Cyrillic": range(1024, 1280),
40
+ "Cyrillic Supplement": range(1280, 1328),
41
+ "Armenian": range(1328, 1424),
42
+ "Hebrew": range(1424, 1536),
43
+ "Arabic": range(1536, 1792),
44
+ "Syriac": range(1792, 1872),
45
+ "Arabic Supplement": range(1872, 1920),
46
+ "Thaana": range(1920, 1984),
47
+ "NKo": range(1984, 2048),
48
+ "Samaritan": range(2048, 2112),
49
+ "Mandaic": range(2112, 2144),
50
+ "Syriac Supplement": range(2144, 2160),
51
+ "Arabic Extended-B": range(2160, 2208),
52
+ "Arabic Extended-A": range(2208, 2304),
53
+ "Devanagari": range(2304, 2432),
54
+ "Bengali": range(2432, 2560),
55
+ "Gurmukhi": range(2560, 2688),
56
+ "Gujarati": range(2688, 2816),
57
+ "Oriya": range(2816, 2944),
58
+ "Tamil": range(2944, 3072),
59
+ "Telugu": range(3072, 3200),
60
+ "Kannada": range(3200, 3328),
61
+ "Malayalam": range(3328, 3456),
62
+ "Sinhala": range(3456, 3584),
63
+ "Thai": range(3584, 3712),
64
+ "Lao": range(3712, 3840),
65
+ "Tibetan": range(3840, 4096),
66
+ "Myanmar": range(4096, 4256),
67
+ "Georgian": range(4256, 4352),
68
+ "Hangul Jamo": range(4352, 4608),
69
+ "Ethiopic": range(4608, 4992),
70
+ "Ethiopic Supplement": range(4992, 5024),
71
+ "Cherokee": range(5024, 5120),
72
+ "Unified Canadian Aboriginal Syllabics": range(5120, 5760),
73
+ "Ogham": range(5760, 5792),
74
+ "Runic": range(5792, 5888),
75
+ "Tagalog": range(5888, 5920),
76
+ "Hanunoo": range(5920, 5952),
77
+ "Buhid": range(5952, 5984),
78
+ "Tagbanwa": range(5984, 6016),
79
+ "Khmer": range(6016, 6144),
80
+ "Mongolian": range(6144, 6320),
81
+ "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6400),
82
+ "Limbu": range(6400, 6480),
83
+ "Tai Le": range(6480, 6528),
84
+ "New Tai Lue": range(6528, 6624),
85
+ "Khmer Symbols": range(6624, 6656),
86
+ "Buginese": range(6656, 6688),
87
+ "Tai Tham": range(6688, 6832),
88
+ "Combining Diacritical Marks Extended": range(6832, 6912),
89
+ "Balinese": range(6912, 7040),
90
+ "Sundanese": range(7040, 7104),
91
+ "Batak": range(7104, 7168),
92
+ "Lepcha": range(7168, 7248),
93
+ "Ol Chiki": range(7248, 7296),
94
+ "Cyrillic Extended-C": range(7296, 7312),
95
+ "Georgian Extended": range(7312, 7360),
96
+ "Sundanese Supplement": range(7360, 7376),
97
+ "Vedic Extensions": range(7376, 7424),
98
+ "Phonetic Extensions": range(7424, 7552),
99
+ "Phonetic Extensions Supplement": range(7552, 7616),
100
+ "Combining Diacritical Marks Supplement": range(7616, 7680),
101
+ "Latin Extended Additional": range(7680, 7936),
102
+ "Greek Extended": range(7936, 8192),
103
+ "General Punctuation": range(8192, 8304),
104
+ "Superscripts and Subscripts": range(8304, 8352),
105
+ "Currency Symbols": range(8352, 8400),
106
+ "Combining Diacritical Marks for Symbols": range(8400, 8448),
107
+ "Letterlike Symbols": range(8448, 8528),
108
+ "Number Forms": range(8528, 8592),
109
+ "Arrows": range(8592, 8704),
110
+ "Mathematical Operators": range(8704, 8960),
111
+ "Miscellaneous Technical": range(8960, 9216),
112
+ "Control Pictures": range(9216, 9280),
113
+ "Optical Character Recognition": range(9280, 9312),
114
+ "Enclosed Alphanumerics": range(9312, 9472),
115
+ "Box Drawing": range(9472, 9600),
116
+ "Block Elements": range(9600, 9632),
117
+ "Geometric Shapes": range(9632, 9728),
118
+ "Miscellaneous Symbols": range(9728, 9984),
119
+ "Dingbats": range(9984, 10176),
120
+ "Miscellaneous Mathematical Symbols-A": range(10176, 10224),
121
+ "Supplemental Arrows-A": range(10224, 10240),
122
+ "Braille Patterns": range(10240, 10496),
123
+ "Supplemental Arrows-B": range(10496, 10624),
124
+ "Miscellaneous Mathematical Symbols-B": range(10624, 10752),
125
+ "Supplemental Mathematical Operators": range(10752, 11008),
126
+ "Miscellaneous Symbols and Arrows": range(11008, 11264),
127
+ "Glagolitic": range(11264, 11360),
128
+ "Latin Extended-C": range(11360, 11392),
129
+ "Coptic": range(11392, 11520),
130
+ "Georgian Supplement": range(11520, 11568),
131
+ "Tifinagh": range(11568, 11648),
132
+ "Ethiopic Extended": range(11648, 11744),
133
+ "Cyrillic Extended-A": range(11744, 11776),
134
+ "Supplemental Punctuation": range(11776, 11904),
135
+ "CJK Radicals Supplement": range(11904, 12032),
136
+ "Kangxi Radicals": range(12032, 12256),
137
+ "Ideographic Description Characters": range(12272, 12288),
138
+ "CJK Symbols and Punctuation": range(12288, 12352),
139
+ "Hiragana": range(12352, 12448),
140
+ "Katakana": range(12448, 12544),
141
+ "Bopomofo": range(12544, 12592),
142
+ "Hangul Compatibility Jamo": range(12592, 12688),
143
+ "Kanbun": range(12688, 12704),
144
+ "Bopomofo Extended": range(12704, 12736),
145
+ "CJK Strokes": range(12736, 12784),
146
+ "Katakana Phonetic Extensions": range(12784, 12800),
147
+ "Enclosed CJK Letters and Months": range(12800, 13056),
148
+ "CJK Compatibility": range(13056, 13312),
149
+ "CJK Unified Ideographs Extension A": range(13312, 19904),
150
+ "Yijing Hexagram Symbols": range(19904, 19968),
151
+ "CJK Unified Ideographs": range(19968, 40960),
152
+ "Yi Syllables": range(40960, 42128),
153
+ "Yi Radicals": range(42128, 42192),
154
+ "Lisu": range(42192, 42240),
155
+ "Vai": range(42240, 42560),
156
+ "Cyrillic Extended-B": range(42560, 42656),
157
+ "Bamum": range(42656, 42752),
158
+ "Modifier Tone Letters": range(42752, 42784),
159
+ "Latin Extended-D": range(42784, 43008),
160
+ "Syloti Nagri": range(43008, 43056),
161
+ "Common Indic Number Forms": range(43056, 43072),
162
+ "Phags-pa": range(43072, 43136),
163
+ "Saurashtra": range(43136, 43232),
164
+ "Devanagari Extended": range(43232, 43264),
165
+ "Kayah Li": range(43264, 43312),
166
+ "Rejang": range(43312, 43360),
167
+ "Hangul Jamo Extended-A": range(43360, 43392),
168
+ "Javanese": range(43392, 43488),
169
+ "Myanmar Extended-B": range(43488, 43520),
170
+ "Cham": range(43520, 43616),
171
+ "Myanmar Extended-A": range(43616, 43648),
172
+ "Tai Viet": range(43648, 43744),
173
+ "Meetei Mayek Extensions": range(43744, 43776),
174
+ "Ethiopic Extended-A": range(43776, 43824),
175
+ "Latin Extended-E": range(43824, 43888),
176
+ "Cherokee Supplement": range(43888, 43968),
177
+ "Meetei Mayek": range(43968, 44032),
178
+ "Hangul Syllables": range(44032, 55216),
179
+ "Hangul Jamo Extended-B": range(55216, 55296),
180
+ "High Surrogates": range(55296, 56192),
181
+ "High Private Use Surrogates": range(56192, 56320),
182
+ "Low Surrogates": range(56320, 57344),
183
+ "Private Use Area": range(57344, 63744),
184
+ "CJK Compatibility Ideographs": range(63744, 64256),
185
+ "Alphabetic Presentation Forms": range(64256, 64336),
186
+ "Arabic Presentation Forms-A": range(64336, 65024),
187
+ "Variation Selectors": range(65024, 65040),
188
+ "Vertical Forms": range(65040, 65056),
189
+ "Combining Half Marks": range(65056, 65072),
190
+ "CJK Compatibility Forms": range(65072, 65104),
191
+ "Small Form Variants": range(65104, 65136),
192
+ "Arabic Presentation Forms-B": range(65136, 65280),
193
+ "Halfwidth and Fullwidth Forms": range(65280, 65520),
194
+ "Specials": range(65520, 65536),
195
+ "Linear B Syllabary": range(65536, 65664),
196
+ "Linear B Ideograms": range(65664, 65792),
197
+ "Aegean Numbers": range(65792, 65856),
198
+ "Ancient Greek Numbers": range(65856, 65936),
199
+ "Ancient Symbols": range(65936, 66000),
200
+ "Phaistos Disc": range(66000, 66048),
201
+ "Lycian": range(66176, 66208),
202
+ "Carian": range(66208, 66272),
203
+ "Coptic Epact Numbers": range(66272, 66304),
204
+ "Old Italic": range(66304, 66352),
205
+ "Gothic": range(66352, 66384),
206
+ "Old Permic": range(66384, 66432),
207
+ "Ugaritic": range(66432, 66464),
208
+ "Old Persian": range(66464, 66528),
209
+ "Deseret": range(66560, 66640),
210
+ "Shavian": range(66640, 66688),
211
+ "Osmanya": range(66688, 66736),
212
+ "Osage": range(66736, 66816),
213
+ "Elbasan": range(66816, 66864),
214
+ "Caucasian Albanian": range(66864, 66928),
215
+ "Vithkuqi": range(66928, 67008),
216
+ "Linear A": range(67072, 67456),
217
+ "Latin Extended-F": range(67456, 67520),
218
+ "Cypriot Syllabary": range(67584, 67648),
219
+ "Imperial Aramaic": range(67648, 67680),
220
+ "Palmyrene": range(67680, 67712),
221
+ "Nabataean": range(67712, 67760),
222
+ "Hatran": range(67808, 67840),
223
+ "Phoenician": range(67840, 67872),
224
+ "Lydian": range(67872, 67904),
225
+ "Meroitic Hieroglyphs": range(67968, 68000),
226
+ "Meroitic Cursive": range(68000, 68096),
227
+ "Kharoshthi": range(68096, 68192),
228
+ "Old South Arabian": range(68192, 68224),
229
+ "Old North Arabian": range(68224, 68256),
230
+ "Manichaean": range(68288, 68352),
231
+ "Avestan": range(68352, 68416),
232
+ "Inscriptional Parthian": range(68416, 68448),
233
+ "Inscriptional Pahlavi": range(68448, 68480),
234
+ "Psalter Pahlavi": range(68480, 68528),
235
+ "Old Turkic": range(68608, 68688),
236
+ "Old Hungarian": range(68736, 68864),
237
+ "Hanifi Rohingya": range(68864, 68928),
238
+ "Rumi Numeral Symbols": range(69216, 69248),
239
+ "Yezidi": range(69248, 69312),
240
+ "Arabic Extended-C": range(69312, 69376),
241
+ "Old Sogdian": range(69376, 69424),
242
+ "Sogdian": range(69424, 69488),
243
+ "Old Uyghur": range(69488, 69552),
244
+ "Chorasmian": range(69552, 69600),
245
+ "Elymaic": range(69600, 69632),
246
+ "Brahmi": range(69632, 69760),
247
+ "Kaithi": range(69760, 69840),
248
+ "Sora Sompeng": range(69840, 69888),
249
+ "Chakma": range(69888, 69968),
250
+ "Mahajani": range(69968, 70016),
251
+ "Sharada": range(70016, 70112),
252
+ "Sinhala Archaic Numbers": range(70112, 70144),
253
+ "Khojki": range(70144, 70224),
254
+ "Multani": range(70272, 70320),
255
+ "Khudawadi": range(70320, 70400),
256
+ "Grantha": range(70400, 70528),
257
+ "Newa": range(70656, 70784),
258
+ "Tirhuta": range(70784, 70880),
259
+ "Siddham": range(71040, 71168),
260
+ "Modi": range(71168, 71264),
261
+ "Mongolian Supplement": range(71264, 71296),
262
+ "Takri": range(71296, 71376),
263
+ "Ahom": range(71424, 71504),
264
+ "Dogra": range(71680, 71760),
265
+ "Warang Citi": range(71840, 71936),
266
+ "Dives Akuru": range(71936, 72032),
267
+ "Nandinagari": range(72096, 72192),
268
+ "Zanabazar Square": range(72192, 72272),
269
+ "Soyombo": range(72272, 72368),
270
+ "Unified Canadian Aboriginal Syllabics Extended-A": range(72368, 72384),
271
+ "Pau Cin Hau": range(72384, 72448),
272
+ "Devanagari Extended-A": range(72448, 72544),
273
+ "Bhaiksuki": range(72704, 72816),
274
+ "Marchen": range(72816, 72896),
275
+ "Masaram Gondi": range(72960, 73056),
276
+ "Gunjala Gondi": range(73056, 73136),
277
+ "Makasar": range(73440, 73472),
278
+ "Kawi": range(73472, 73568),
279
+ "Lisu Supplement": range(73648, 73664),
280
+ "Tamil Supplement": range(73664, 73728),
281
+ "Cuneiform": range(73728, 74752),
282
+ "Cuneiform Numbers and Punctuation": range(74752, 74880),
283
+ "Early Dynastic Cuneiform": range(74880, 75088),
284
+ "Cypro-Minoan": range(77712, 77824),
285
+ "Egyptian Hieroglyphs": range(77824, 78896),
286
+ "Egyptian Hieroglyph Format Controls": range(78896, 78944),
287
+ "Anatolian Hieroglyphs": range(82944, 83584),
288
+ "Bamum Supplement": range(92160, 92736),
289
+ "Mro": range(92736, 92784),
290
+ "Tangsa": range(92784, 92880),
291
+ "Bassa Vah": range(92880, 92928),
292
+ "Pahawh Hmong": range(92928, 93072),
293
+ "Medefaidrin": range(93760, 93856),
294
+ "Miao": range(93952, 94112),
295
+ "Ideographic Symbols and Punctuation": range(94176, 94208),
296
+ "Tangut": range(94208, 100352),
297
+ "Tangut Components": range(100352, 101120),
298
+ "Khitan Small Script": range(101120, 101632),
299
+ "Tangut Supplement": range(101632, 101760),
300
+ "Kana Extended-B": range(110576, 110592),
301
+ "Kana Supplement": range(110592, 110848),
302
+ "Kana Extended-A": range(110848, 110896),
303
+ "Small Kana Extension": range(110896, 110960),
304
+ "Nushu": range(110960, 111360),
305
+ "Duployan": range(113664, 113824),
306
+ "Shorthand Format Controls": range(113824, 113840),
307
+ "Znamenny Musical Notation": range(118528, 118736),
308
+ "Byzantine Musical Symbols": range(118784, 119040),
309
+ "Musical Symbols": range(119040, 119296),
310
+ "Ancient Greek Musical Notation": range(119296, 119376),
311
+ "Kaktovik Numerals": range(119488, 119520),
312
+ "Mayan Numerals": range(119520, 119552),
313
+ "Tai Xuan Jing Symbols": range(119552, 119648),
314
+ "Counting Rod Numerals": range(119648, 119680),
315
+ "Mathematical Alphanumeric Symbols": range(119808, 120832),
316
+ "Sutton SignWriting": range(120832, 121520),
317
+ "Latin Extended-G": range(122624, 122880),
318
+ "Glagolitic Supplement": range(122880, 122928),
319
+ "Cyrillic Extended-D": range(122928, 123024),
320
+ "Nyiakeng Puachue Hmong": range(123136, 123216),
321
+ "Toto": range(123536, 123584),
322
+ "Wancho": range(123584, 123648),
323
+ "Nag Mundari": range(124112, 124160),
324
+ "Ethiopic Extended-B": range(124896, 124928),
325
+ "Mende Kikakui": range(124928, 125152),
326
+ "Adlam": range(125184, 125280),
327
+ "Indic Siyaq Numbers": range(126064, 126144),
328
+ "Ottoman Siyaq Numbers": range(126208, 126288),
329
+ "Arabic Mathematical Alphabetic Symbols": range(126464, 126720),
330
+ "Mahjong Tiles": range(126976, 127024),
331
+ "Domino Tiles": range(127024, 127136),
332
+ "Playing Cards": range(127136, 127232),
333
+ "Enclosed Alphanumeric Supplement": range(127232, 127488),
334
+ "Enclosed Ideographic Supplement": range(127488, 127744),
335
+ "Miscellaneous Symbols and Pictographs": range(127744, 128512),
336
+ "Emoticons range(Emoji)": range(128512, 128592),
337
+ "Ornamental Dingbats": range(128592, 128640),
338
+ "Transport and Map Symbols": range(128640, 128768),
339
+ "Alchemical Symbols": range(128768, 128896),
340
+ "Geometric Shapes Extended": range(128896, 129024),
341
+ "Supplemental Arrows-C": range(129024, 129280),
342
+ "Supplemental Symbols and Pictographs": range(129280, 129536),
343
+ "Chess Symbols": range(129536, 129648),
344
+ "Symbols and Pictographs Extended-A": range(129648, 129792),
345
+ "Symbols for Legacy Computing": range(129792, 130048),
346
+ "CJK Unified Ideographs Extension B": range(131072, 173792),
347
+ "CJK Unified Ideographs Extension C": range(173824, 177984),
348
+ "CJK Unified Ideographs Extension D": range(177984, 178208),
349
+ "CJK Unified Ideographs Extension E": range(178208, 183984),
350
+ "CJK Unified Ideographs Extension F": range(183984, 191472),
351
+ "CJK Compatibility Ideographs Supplement": range(194560, 195104),
352
+ "CJK Unified Ideographs Extension G": range(196608, 201552),
353
+ "CJK Unified Ideographs Extension H": range(201552, 205744),
354
+ "Tags": range(917504, 917632),
355
+ "Variation Selectors Supplement": range(917760, 918000),
356
+ "Supplementary Private Use Area-A": range(983040, 1048576),
357
+ "Supplementary Private Use Area-B": range(1048576, 1114112),
358
+ }
359
+
360
+
361
+ UNICODE_SECONDARY_RANGE_KEYWORD: list[str] = [
362
+ "Supplement",
363
+ "Extended",
364
+ "Extensions",
365
+ "Modifier",
366
+ "Marks",
367
+ "Punctuation",
368
+ "Symbols",
369
+ "Forms",
370
+ "Operators",
371
+ "Miscellaneous",
372
+ "Drawing",
373
+ "Block",
374
+ "Shapes",
375
+ "Supplemental",
376
+ "Tags",
377
+ ]
378
+
379
+ RE_POSSIBLE_ENCODING_INDICATION = re_compile(
380
+ r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
381
+ IGNORECASE,
382
+ )
383
+
384
+ IANA_NO_ALIASES = [
385
+ "cp720",
386
+ "cp737",
387
+ "cp856",
388
+ "cp874",
389
+ "cp875",
390
+ "cp1006",
391
+ "koi8_r",
392
+ "koi8_t",
393
+ "koi8_u",
394
+ ]
395
+
396
+ IANA_SUPPORTED: list[str] = sorted(
397
+ filter(
398
+ lambda x: x.endswith("_codec") is False
399
+ and x not in {"rot_13", "tactis", "mbcs"},
400
+ list(set(aliases.values())) + IANA_NO_ALIASES,
401
+ )
402
+ )
403
+
404
+ IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED)
405
+
406
+ # pre-computed code page that are similar using the function cp_similarity.
407
+ IANA_SUPPORTED_SIMILAR: dict[str, list[str]] = {
408
+ "cp037": ["cp1026", "cp1140", "cp273", "cp500"],
409
+ "cp1026": ["cp037", "cp1140", "cp273", "cp500"],
410
+ "cp1125": ["cp866"],
411
+ "cp1140": ["cp037", "cp1026", "cp273", "cp500"],
412
+ "cp1250": ["iso8859_2"],
413
+ "cp1251": ["kz1048", "ptcp154"],
414
+ "cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
415
+ "cp1253": ["iso8859_7"],
416
+ "cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
417
+ "cp1257": ["iso8859_13"],
418
+ "cp273": ["cp037", "cp1026", "cp1140", "cp500"],
419
+ "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
420
+ "cp500": ["cp037", "cp1026", "cp1140", "cp273"],
421
+ "cp850": ["cp437", "cp857", "cp858", "cp865"],
422
+ "cp857": ["cp850", "cp858", "cp865"],
423
+ "cp858": ["cp437", "cp850", "cp857", "cp865"],
424
+ "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
425
+ "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
426
+ "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
427
+ "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
428
+ "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
429
+ "cp866": ["cp1125"],
430
+ "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
431
+ "iso8859_11": ["tis_620"],
432
+ "iso8859_13": ["cp1257"],
433
+ "iso8859_14": [
434
+ "iso8859_10",
435
+ "iso8859_15",
436
+ "iso8859_16",
437
+ "iso8859_3",
438
+ "iso8859_9",
439
+ "latin_1",
440
+ ],
441
+ "iso8859_15": [
442
+ "cp1252",
443
+ "cp1254",
444
+ "iso8859_10",
445
+ "iso8859_14",
446
+ "iso8859_16",
447
+ "iso8859_3",
448
+ "iso8859_9",
449
+ "latin_1",
450
+ ],
451
+ "iso8859_16": [
452
+ "iso8859_14",
453
+ "iso8859_15",
454
+ "iso8859_2",
455
+ "iso8859_3",
456
+ "iso8859_9",
457
+ "latin_1",
458
+ ],
459
+ "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
460
+ "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
461
+ "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
462
+ "iso8859_7": ["cp1253"],
463
+ "iso8859_9": [
464
+ "cp1252",
465
+ "cp1254",
466
+ "cp1258",
467
+ "iso8859_10",
468
+ "iso8859_14",
469
+ "iso8859_15",
470
+ "iso8859_16",
471
+ "iso8859_3",
472
+ "iso8859_4",
473
+ "latin_1",
474
+ ],
475
+ "kz1048": ["cp1251", "ptcp154"],
476
+ "latin_1": [
477
+ "cp1252",
478
+ "cp1254",
479
+ "cp1258",
480
+ "iso8859_10",
481
+ "iso8859_14",
482
+ "iso8859_15",
483
+ "iso8859_16",
484
+ "iso8859_3",
485
+ "iso8859_4",
486
+ "iso8859_9",
487
+ ],
488
+ "mac_iceland": ["mac_roman", "mac_turkish"],
489
+ "mac_roman": ["mac_iceland", "mac_turkish"],
490
+ "mac_turkish": ["mac_iceland", "mac_roman"],
491
+ "ptcp154": ["cp1251", "kz1048"],
492
+ "tis_620": ["iso8859_11"],
493
+ }
494
+
495
+
496
+ CHARDET_CORRESPONDENCE: dict[str, str] = {
497
+ "iso2022_kr": "ISO-2022-KR",
498
+ "iso2022_jp": "ISO-2022-JP",
499
+ "euc_kr": "EUC-KR",
500
+ "tis_620": "TIS-620",
501
+ "utf_32": "UTF-32",
502
+ "euc_jp": "EUC-JP",
503
+ "koi8_r": "KOI8-R",
504
+ "iso8859_1": "ISO-8859-1",
505
+ "iso8859_2": "ISO-8859-2",
506
+ "iso8859_5": "ISO-8859-5",
507
+ "iso8859_6": "ISO-8859-6",
508
+ "iso8859_7": "ISO-8859-7",
509
+ "iso8859_8": "ISO-8859-8",
510
+ "utf_16": "UTF-16",
511
+ "cp855": "IBM855",
512
+ "mac_cyrillic": "MacCyrillic",
513
+ "gb2312": "GB2312",
514
+ "gb18030": "GB18030",
515
+ "cp932": "CP932",
516
+ "cp866": "IBM866",
517
+ "utf_8": "utf-8",
518
+ "utf_8_sig": "UTF-8-SIG",
519
+ "shift_jis": "SHIFT_JIS",
520
+ "big5": "Big5",
521
+ "cp1250": "windows-1250",
522
+ "cp1251": "windows-1251",
523
+ "cp1252": "Windows-1252",
524
+ "cp1253": "windows-1253",
525
+ "cp1255": "windows-1255",
526
+ "cp1256": "windows-1256",
527
+ "cp1254": "Windows-1254",
528
+ "cp949": "CP949",
529
+ }
530
+
531
+
532
+ COMMON_SAFE_ASCII_CHARACTERS: set[str] = {
533
+ "<",
534
+ ">",
535
+ "=",
536
+ ":",
537
+ "/",
538
+ "&",
539
+ ";",
540
+ "{",
541
+ "}",
542
+ "[",
543
+ "]",
544
+ ",",
545
+ "|",
546
+ '"',
547
+ "-",
548
+ "(",
549
+ ")",
550
+ }
551
+
552
+ # Sample character sets — replace with full lists if needed
553
+ COMMON_CHINESE_CHARACTERS = "的一是在不了有和人这中大为上个国我以要他时来用们生到作地于出就分对成会可主发年动同工也能下过子说产种面而方后多定行学法所民得经十三之进着等部度家电力里如水化高自二理起小物现实加量都两体制机当使点从业本去把性好应开它合还因由其些然前外天政四日那社义事平形相全表间样与关各重新线内数正心反你明看原又么利比或但质气第向道命此变条只没结解问意建月公无系军很情者最立代想已通并提直题党程展五果料象员革位入常文总次品式活设及管特件长求老头基资边流路级少图山统接知较将组见计别她手角期根论运农指几九区强放决西被干做必战先回则任取据处队南给色光门即保治北造百规热领七海口东导器压志世金增争济阶油思术极交受联什认六共权收证改清己美再采转更单风切打白教速花带安场身车例真务具万每目至达走积示议声报斗完类八离华名确才科张信马节话米整空元况今集温传土许步群广石记需段研界拉林律叫且究观越织装影算低持音众书布复容儿须际商非验连断深难近矿千周委素技备半办青省列习响约支般史感劳便团往酸历市克何除消构府太准精值号率族维划选标写存候毛亲快效斯院查江型眼王按格养易置派层片始却专状育厂京识适属圆包火住调满县局照参红细引听该铁价严龙飞"
554
+
555
+ COMMON_JAPANESE_CHARACTERS = "日一国年大十二本中長出三時行見月分後前生五間上東四今金九入学高円子外八六下来気小七山話女北午百書先名川千水半男西電校語土木聞食車何南万毎白天母火右読友左休父雨"
556
+
557
+ COMMON_KOREAN_CHARACTERS = "一二三四五六七八九十百千萬上下左右中人女子大小山川日月火水木金土父母天地國名年時文校學生"
558
+
559
+ # Combine all into a set
560
+ COMMON_CJK_CHARACTERS = set(
561
+ "".join(
562
+ [
563
+ COMMON_CHINESE_CHARACTERS,
564
+ COMMON_JAPANESE_CHARACTERS,
565
+ COMMON_KOREAN_CHARACTERS,
566
+ ]
567
+ )
568
+ )
569
+
570
+ KO_NAMES: set[str] = {"johab", "cp949", "euc_kr"}
571
+ ZH_NAMES: set[str] = {"big5", "cp950", "big5hkscs", "hz"}
572
+
573
+ # Logging LEVEL below DEBUG
574
+ TRACE: int = 5
575
+
576
+
577
+ # Language label that contain the em dash "—"
578
+ # character are to be considered alternative seq to origin
579
+ FREQUENCIES: dict[str, list[str]] = {
580
+ "English": [
581
+ "e",
582
+ "a",
583
+ "t",
584
+ "i",
585
+ "o",
586
+ "n",
587
+ "s",
588
+ "r",
589
+ "h",
590
+ "l",
591
+ "d",
592
+ "c",
593
+ "u",
594
+ "m",
595
+ "f",
596
+ "p",
597
+ "g",
598
+ "w",
599
+ "y",
600
+ "b",
601
+ "v",
602
+ "k",
603
+ "x",
604
+ "j",
605
+ "z",
606
+ "q",
607
+ ],
608
+ "English—": [
609
+ "e",
610
+ "a",
611
+ "t",
612
+ "i",
613
+ "o",
614
+ "n",
615
+ "s",
616
+ "r",
617
+ "h",
618
+ "l",
619
+ "d",
620
+ "c",
621
+ "m",
622
+ "u",
623
+ "f",
624
+ "p",
625
+ "g",
626
+ "w",
627
+ "b",
628
+ "y",
629
+ "v",
630
+ "k",
631
+ "j",
632
+ "x",
633
+ "z",
634
+ "q",
635
+ ],
636
+ "German": [
637
+ "e",
638
+ "n",
639
+ "i",
640
+ "r",
641
+ "s",
642
+ "t",
643
+ "a",
644
+ "d",
645
+ "h",
646
+ "u",
647
+ "l",
648
+ "g",
649
+ "o",
650
+ "c",
651
+ "m",
652
+ "b",
653
+ "f",
654
+ "k",
655
+ "w",
656
+ "z",
657
+ "p",
658
+ "v",
659
+ "ü",
660
+ "ä",
661
+ "ö",
662
+ "j",
663
+ ],
664
+ "French": [
665
+ "e",
666
+ "a",
667
+ "s",
668
+ "n",
669
+ "i",
670
+ "t",
671
+ "r",
672
+ "l",
673
+ "u",
674
+ "o",
675
+ "d",
676
+ "c",
677
+ "p",
678
+ "m",
679
+ "é",
680
+ "v",
681
+ "g",
682
+ "f",
683
+ "b",
684
+ "h",
685
+ "q",
686
+ "à",
687
+ "x",
688
+ "è",
689
+ "y",
690
+ "j",
691
+ ],
692
+ "Dutch": [
693
+ "e",
694
+ "n",
695
+ "a",
696
+ "i",
697
+ "r",
698
+ "t",
699
+ "o",
700
+ "d",
701
+ "s",
702
+ "l",
703
+ "g",
704
+ "h",
705
+ "v",
706
+ "m",
707
+ "u",
708
+ "k",
709
+ "c",
710
+ "p",
711
+ "b",
712
+ "w",
713
+ "j",
714
+ "z",
715
+ "f",
716
+ "y",
717
+ "x",
718
+ "ë",
719
+ ],
720
+ "Italian": [
721
+ "e",
722
+ "i",
723
+ "a",
724
+ "o",
725
+ "n",
726
+ "l",
727
+ "t",
728
+ "r",
729
+ "s",
730
+ "c",
731
+ "d",
732
+ "u",
733
+ "p",
734
+ "m",
735
+ "g",
736
+ "v",
737
+ "f",
738
+ "b",
739
+ "z",
740
+ "h",
741
+ "q",
742
+ "è",
743
+ "à",
744
+ "k",
745
+ "y",
746
+ "ò",
747
+ ],
748
+ "Polish": [
749
+ "a",
750
+ "i",
751
+ "o",
752
+ "e",
753
+ "n",
754
+ "r",
755
+ "z",
756
+ "w",
757
+ "s",
758
+ "c",
759
+ "t",
760
+ "k",
761
+ "y",
762
+ "d",
763
+ "p",
764
+ "m",
765
+ "u",
766
+ "l",
767
+ "j",
768
+ "ł",
769
+ "g",
770
+ "b",
771
+ "h",
772
+ "ą",
773
+ "ę",
774
+ "ó",
775
+ ],
776
+ "Spanish": [
777
+ "e",
778
+ "a",
779
+ "o",
780
+ "n",
781
+ "s",
782
+ "r",
783
+ "i",
784
+ "l",
785
+ "d",
786
+ "t",
787
+ "c",
788
+ "u",
789
+ "m",
790
+ "p",
791
+ "b",
792
+ "g",
793
+ "v",
794
+ "f",
795
+ "y",
796
+ "ó",
797
+ "h",
798
+ "q",
799
+ "í",
800
+ "j",
801
+ "z",
802
+ "á",
803
+ ],
804
+ "Russian": [
805
+ "о",
806
+ "а",
807
+ "е",
808
+ "и",
809
+ "н",
810
+ "с",
811
+ "т",
812
+ "р",
813
+ "в",
814
+ "л",
815
+ "к",
816
+ "м",
817
+ "д",
818
+ "п",
819
+ "у",
820
+ "г",
821
+ "я",
822
+ "ы",
823
+ "з",
824
+ "б",
825
+ "й",
826
+ "ь",
827
+ "ч",
828
+ "х",
829
+ "ж",
830
+ "ц",
831
+ ],
832
+ # Jap-Kanji
833
+ "Japanese": [
834
+ "人",
835
+ "一",
836
+ "大",
837
+ "亅",
838
+ "丁",
839
+ "丨",
840
+ "竹",
841
+ "笑",
842
+ "口",
843
+ "日",
844
+ "今",
845
+ "二",
846
+ "彳",
847
+ "行",
848
+ "十",
849
+ "土",
850
+ "丶",
851
+ "寸",
852
+ "寺",
853
+ "時",
854
+ "乙",
855
+ "丿",
856
+ "乂",
857
+ "气",
858
+ "気",
859
+ "冂",
860
+ "巾",
861
+ "亠",
862
+ "市",
863
+ "目",
864
+ "儿",
865
+ "見",
866
+ "八",
867
+ "小",
868
+ "凵",
869
+ "県",
870
+ "月",
871
+ "彐",
872
+ "門",
873
+ "間",
874
+ "木",
875
+ "東",
876
+ "山",
877
+ "出",
878
+ "本",
879
+ "中",
880
+ "刀",
881
+ "分",
882
+ "耳",
883
+ "又",
884
+ "取",
885
+ "最",
886
+ "言",
887
+ "田",
888
+ "心",
889
+ "思",
890
+ "刂",
891
+ "前",
892
+ "京",
893
+ "尹",
894
+ "事",
895
+ "生",
896
+ "厶",
897
+ "云",
898
+ "会",
899
+ "未",
900
+ "来",
901
+ "白",
902
+ "冫",
903
+ "楽",
904
+ "灬",
905
+ "馬",
906
+ "尸",
907
+ "尺",
908
+ "駅",
909
+ "明",
910
+ "耂",
911
+ "者",
912
+ "了",
913
+ "阝",
914
+ "都",
915
+ "高",
916
+ "卜",
917
+ "占",
918
+ "厂",
919
+ "广",
920
+ "店",
921
+ "子",
922
+ "申",
923
+ "奄",
924
+ "亻",
925
+ "俺",
926
+ "上",
927
+ "方",
928
+ "冖",
929
+ "学",
930
+ "衣",
931
+ "艮",
932
+ "食",
933
+ "自",
934
+ ],
935
+ # Jap-Katakana
936
+ "Japanese—": [
937
+ "ー",
938
+ "ン",
939
+ "ス",
940
+ "・",
941
+ "ル",
942
+ "ト",
943
+ "リ",
944
+ "イ",
945
+ "ア",
946
+ "ラ",
947
+ "ッ",
948
+ "ク",
949
+ "ド",
950
+ "シ",
951
+ "レ",
952
+ "ジ",
953
+ "タ",
954
+ "フ",
955
+ "ロ",
956
+ "カ",
957
+ "テ",
958
+ "マ",
959
+ "ィ",
960
+ "グ",
961
+ "バ",
962
+ "ム",
963
+ "プ",
964
+ "オ",
965
+ "コ",
966
+ "デ",
967
+ "ニ",
968
+ "ウ",
969
+ "メ",
970
+ "サ",
971
+ "ビ",
972
+ "ナ",
973
+ "ブ",
974
+ "ャ",
975
+ "エ",
976
+ "ュ",
977
+ "チ",
978
+ "キ",
979
+ "ズ",
980
+ "ダ",
981
+ "パ",
982
+ "ミ",
983
+ "ェ",
984
+ "ョ",
985
+ "ハ",
986
+ "セ",
987
+ "ベ",
988
+ "ガ",
989
+ "モ",
990
+ "ツ",
991
+ "ネ",
992
+ "ボ",
993
+ "ソ",
994
+ "ノ",
995
+ "ァ",
996
+ "ヴ",
997
+ "ワ",
998
+ "ポ",
999
+ "ペ",
1000
+ "ピ",
1001
+ "ケ",
1002
+ "ゴ",
1003
+ "ギ",
1004
+ "ザ",
1005
+ "ホ",
1006
+ "ゲ",
1007
+ "ォ",
1008
+ "ヤ",
1009
+ "ヒ",
1010
+ "ユ",
1011
+ "ヨ",
1012
+ "ヘ",
1013
+ "ゼ",
1014
+ "ヌ",
1015
+ "ゥ",
1016
+ "ゾ",
1017
+ "ヶ",
1018
+ "ヂ",
1019
+ "ヲ",
1020
+ "ヅ",
1021
+ "ヵ",
1022
+ "ヱ",
1023
+ "ヰ",
1024
+ "ヮ",
1025
+ "ヽ",
1026
+ "゠",
1027
+ "ヾ",
1028
+ "ヷ",
1029
+ "ヿ",
1030
+ "ヸ",
1031
+ "ヹ",
1032
+ "ヺ",
1033
+ ],
1034
+ # Jap-Hiragana
1035
+ "Japanese——": [
1036
+ "の",
1037
+ "に",
1038
+ "る",
1039
+ "た",
1040
+ "と",
1041
+ "は",
1042
+ "し",
1043
+ "い",
1044
+ "を",
1045
+ "で",
1046
+ "て",
1047
+ "が",
1048
+ "な",
1049
+ "れ",
1050
+ "か",
1051
+ "ら",
1052
+ "さ",
1053
+ "っ",
1054
+ "り",
1055
+ "す",
1056
+ "あ",
1057
+ "も",
1058
+ "こ",
1059
+ "ま",
1060
+ "う",
1061
+ "く",
1062
+ "よ",
1063
+ "き",
1064
+ "ん",
1065
+ "め",
1066
+ "お",
1067
+ "け",
1068
+ "そ",
1069
+ "つ",
1070
+ "だ",
1071
+ "や",
1072
+ "え",
1073
+ "ど",
1074
+ "わ",
1075
+ "ち",
1076
+ "み",
1077
+ "せ",
1078
+ "じ",
1079
+ "ば",
1080
+ "へ",
1081
+ "び",
1082
+ "ず",
1083
+ "ろ",
1084
+ "ほ",
1085
+ "げ",
1086
+ "む",
1087
+ "べ",
1088
+ "ひ",
1089
+ "ょ",
1090
+ "ゆ",
1091
+ "ぶ",
1092
+ "ご",
1093
+ "ゃ",
1094
+ "ね",
1095
+ "ふ",
1096
+ "ぐ",
1097
+ "ぎ",
1098
+ "ぼ",
1099
+ "ゅ",
1100
+ "づ",
1101
+ "ざ",
1102
+ "ぞ",
1103
+ "ぬ",
1104
+ "ぜ",
1105
+ "ぱ",
1106
+ "ぽ",
1107
+ "ぷ",
1108
+ "ぴ",
1109
+ "ぃ",
1110
+ "ぁ",
1111
+ "ぇ",
1112
+ "ぺ",
1113
+ "ゞ",
1114
+ "ぢ",
1115
+ "ぉ",
1116
+ "ぅ",
1117
+ "ゐ",
1118
+ "ゝ",
1119
+ "ゑ",
1120
+ "゛",
1121
+ "゜",
1122
+ "ゎ",
1123
+ "ゔ",
1124
+ "゚",
1125
+ "ゟ",
1126
+ "゙",
1127
+ "ゕ",
1128
+ "ゖ",
1129
+ ],
1130
+ "Portuguese": [
1131
+ "a",
1132
+ "e",
1133
+ "o",
1134
+ "s",
1135
+ "i",
1136
+ "r",
1137
+ "d",
1138
+ "n",
1139
+ "t",
1140
+ "m",
1141
+ "u",
1142
+ "c",
1143
+ "l",
1144
+ "p",
1145
+ "g",
1146
+ "v",
1147
+ "b",
1148
+ "f",
1149
+ "h",
1150
+ "ã",
1151
+ "q",
1152
+ "é",
1153
+ "ç",
1154
+ "á",
1155
+ "z",
1156
+ "í",
1157
+ ],
1158
+ "Swedish": [
1159
+ "e",
1160
+ "a",
1161
+ "n",
1162
+ "r",
1163
+ "t",
1164
+ "s",
1165
+ "i",
1166
+ "l",
1167
+ "d",
1168
+ "o",
1169
+ "m",
1170
+ "k",
1171
+ "g",
1172
+ "v",
1173
+ "h",
1174
+ "f",
1175
+ "u",
1176
+ "p",
1177
+ "ä",
1178
+ "c",
1179
+ "b",
1180
+ "ö",
1181
+ "å",
1182
+ "y",
1183
+ "j",
1184
+ "x",
1185
+ ],
1186
+ "Chinese": [
1187
+ "的",
1188
+ "一",
1189
+ "是",
1190
+ "不",
1191
+ "了",
1192
+ "在",
1193
+ "人",
1194
+ "有",
1195
+ "我",
1196
+ "他",
1197
+ "这",
1198
+ "个",
1199
+ "们",
1200
+ "中",
1201
+ "来",
1202
+ "上",
1203
+ "大",
1204
+ "为",
1205
+ "和",
1206
+ "国",
1207
+ "地",
1208
+ "到",
1209
+ "以",
1210
+ "说",
1211
+ "时",
1212
+ "要",
1213
+ "就",
1214
+ "出",
1215
+ "会",
1216
+ "可",
1217
+ "也",
1218
+ "你",
1219
+ "对",
1220
+ "生",
1221
+ "能",
1222
+ "而",
1223
+ "子",
1224
+ "那",
1225
+ "得",
1226
+ "于",
1227
+ "着",
1228
+ "下",
1229
+ "自",
1230
+ "之",
1231
+ "年",
1232
+ "过",
1233
+ "发",
1234
+ "后",
1235
+ "作",
1236
+ "里",
1237
+ "用",
1238
+ "道",
1239
+ "行",
1240
+ "所",
1241
+ "然",
1242
+ "家",
1243
+ "种",
1244
+ "事",
1245
+ "成",
1246
+ "方",
1247
+ "多",
1248
+ "经",
1249
+ "么",
1250
+ "去",
1251
+ "法",
1252
+ "学",
1253
+ "如",
1254
+ "都",
1255
+ "同",
1256
+ "现",
1257
+ "当",
1258
+ "没",
1259
+ "动",
1260
+ "面",
1261
+ "起",
1262
+ "看",
1263
+ "定",
1264
+ "天",
1265
+ "分",
1266
+ "还",
1267
+ "进",
1268
+ "好",
1269
+ "小",
1270
+ "部",
1271
+ "其",
1272
+ "些",
1273
+ "主",
1274
+ "样",
1275
+ "理",
1276
+ "心",
1277
+ "她",
1278
+ "本",
1279
+ "前",
1280
+ "开",
1281
+ "但",
1282
+ "因",
1283
+ "只",
1284
+ "从",
1285
+ "想",
1286
+ "实",
1287
+ ],
1288
+ "Ukrainian": [
1289
+ "о",
1290
+ "а",
1291
+ "н",
1292
+ "і",
1293
+ "и",
1294
+ "р",
1295
+ "в",
1296
+ "т",
1297
+ "е",
1298
+ "с",
1299
+ "к",
1300
+ "л",
1301
+ "у",
1302
+ "д",
1303
+ "м",
1304
+ "п",
1305
+ "з",
1306
+ "я",
1307
+ "ь",
1308
+ "б",
1309
+ "г",
1310
+ "й",
1311
+ "ч",
1312
+ "х",
1313
+ "ц",
1314
+ "ї",
1315
+ ],
1316
+ "Norwegian": [
1317
+ "e",
1318
+ "r",
1319
+ "n",
1320
+ "t",
1321
+ "a",
1322
+ "s",
1323
+ "i",
1324
+ "o",
1325
+ "l",
1326
+ "d",
1327
+ "g",
1328
+ "k",
1329
+ "m",
1330
+ "v",
1331
+ "f",
1332
+ "p",
1333
+ "u",
1334
+ "b",
1335
+ "h",
1336
+ "å",
1337
+ "y",
1338
+ "j",
1339
+ "ø",
1340
+ "c",
1341
+ "æ",
1342
+ "w",
1343
+ ],
1344
+ "Finnish": [
1345
+ "a",
1346
+ "i",
1347
+ "n",
1348
+ "t",
1349
+ "e",
1350
+ "s",
1351
+ "l",
1352
+ "o",
1353
+ "u",
1354
+ "k",
1355
+ "ä",
1356
+ "m",
1357
+ "r",
1358
+ "v",
1359
+ "j",
1360
+ "h",
1361
+ "p",
1362
+ "y",
1363
+ "d",
1364
+ "ö",
1365
+ "g",
1366
+ "c",
1367
+ "b",
1368
+ "f",
1369
+ "w",
1370
+ "z",
1371
+ ],
1372
+ "Vietnamese": [
1373
+ "n",
1374
+ "h",
1375
+ "t",
1376
+ "i",
1377
+ "c",
1378
+ "g",
1379
+ "a",
1380
+ "o",
1381
+ "u",
1382
+ "m",
1383
+ "l",
1384
+ "r",
1385
+ "à",
1386
+ "đ",
1387
+ "s",
1388
+ "e",
1389
+ "v",
1390
+ "p",
1391
+ "b",
1392
+ "y",
1393
+ "ư",
1394
+ "d",
1395
+ "á",
1396
+ "k",
1397
+ "ộ",
1398
+ "ế",
1399
+ ],
1400
+ "Czech": [
1401
+ "o",
1402
+ "e",
1403
+ "a",
1404
+ "n",
1405
+ "t",
1406
+ "s",
1407
+ "i",
1408
+ "l",
1409
+ "v",
1410
+ "r",
1411
+ "k",
1412
+ "d",
1413
+ "u",
1414
+ "m",
1415
+ "p",
1416
+ "í",
1417
+ "c",
1418
+ "h",
1419
+ "z",
1420
+ "á",
1421
+ "y",
1422
+ "j",
1423
+ "b",
1424
+ "ě",
1425
+ "é",
1426
+ "ř",
1427
+ ],
1428
+ "Hungarian": [
1429
+ "e",
1430
+ "a",
1431
+ "t",
1432
+ "l",
1433
+ "s",
1434
+ "n",
1435
+ "k",
1436
+ "r",
1437
+ "i",
1438
+ "o",
1439
+ "z",
1440
+ "á",
1441
+ "é",
1442
+ "g",
1443
+ "m",
1444
+ "b",
1445
+ "y",
1446
+ "v",
1447
+ "d",
1448
+ "h",
1449
+ "u",
1450
+ "p",
1451
+ "j",
1452
+ "ö",
1453
+ "f",
1454
+ "c",
1455
+ ],
1456
+ "Korean": [
1457
+ "이",
1458
+ "다",
1459
+ "에",
1460
+ "의",
1461
+ "는",
1462
+ "로",
1463
+ "하",
1464
+ "을",
1465
+ "가",
1466
+ "고",
1467
+ "지",
1468
+ "서",
1469
+ "한",
1470
+ "은",
1471
+ "기",
1472
+ "으",
1473
+ "년",
1474
+ "대",
1475
+ "사",
1476
+ "시",
1477
+ "를",
1478
+ "리",
1479
+ "도",
1480
+ "인",
1481
+ "스",
1482
+ "일",
1483
+ ],
1484
+ "Indonesian": [
1485
+ "a",
1486
+ "n",
1487
+ "e",
1488
+ "i",
1489
+ "r",
1490
+ "t",
1491
+ "u",
1492
+ "s",
1493
+ "d",
1494
+ "k",
1495
+ "m",
1496
+ "l",
1497
+ "g",
1498
+ "p",
1499
+ "b",
1500
+ "o",
1501
+ "h",
1502
+ "y",
1503
+ "j",
1504
+ "c",
1505
+ "w",
1506
+ "f",
1507
+ "v",
1508
+ "z",
1509
+ "x",
1510
+ "q",
1511
+ ],
1512
+ "Turkish": [
1513
+ "a",
1514
+ "e",
1515
+ "i",
1516
+ "n",
1517
+ "r",
1518
+ "l",
1519
+ "ı",
1520
+ "k",
1521
+ "d",
1522
+ "t",
1523
+ "s",
1524
+ "m",
1525
+ "y",
1526
+ "u",
1527
+ "o",
1528
+ "b",
1529
+ "ü",
1530
+ "ş",
1531
+ "v",
1532
+ "g",
1533
+ "z",
1534
+ "h",
1535
+ "c",
1536
+ "p",
1537
+ "ç",
1538
+ "ğ",
1539
+ ],
1540
+ "Romanian": [
1541
+ "e",
1542
+ "i",
1543
+ "a",
1544
+ "r",
1545
+ "n",
1546
+ "t",
1547
+ "u",
1548
+ "l",
1549
+ "o",
1550
+ "c",
1551
+ "s",
1552
+ "d",
1553
+ "p",
1554
+ "m",
1555
+ "ă",
1556
+ "f",
1557
+ "v",
1558
+ "î",
1559
+ "g",
1560
+ "b",
1561
+ "ș",
1562
+ "ț",
1563
+ "z",
1564
+ "h",
1565
+ "â",
1566
+ "j",
1567
+ ],
1568
+ "Farsi": [
1569
+ "ا",
1570
+ "ی",
1571
+ "ر",
1572
+ "د",
1573
+ "ن",
1574
+ "ه",
1575
+ "و",
1576
+ "م",
1577
+ "ت",
1578
+ "ب",
1579
+ "س",
1580
+ "ل",
1581
+ "ک",
1582
+ "ش",
1583
+ "ز",
1584
+ "ف",
1585
+ "گ",
1586
+ "ع",
1587
+ "خ",
1588
+ "ق",
1589
+ "ج",
1590
+ "آ",
1591
+ "پ",
1592
+ "ح",
1593
+ "ط",
1594
+ "ص",
1595
+ ],
1596
+ "Arabic": [
1597
+ "ا",
1598
+ "ل",
1599
+ "ي",
1600
+ "م",
1601
+ "و",
1602
+ "ن",
1603
+ "ر",
1604
+ "ت",
1605
+ "ب",
1606
+ "ة",
1607
+ "ع",
1608
+ "د",
1609
+ "س",
1610
+ "ف",
1611
+ "ه",
1612
+ "ك",
1613
+ "ق",
1614
+ "أ",
1615
+ "ح",
1616
+ "ج",
1617
+ "ش",
1618
+ "ط",
1619
+ "ص",
1620
+ "ى",
1621
+ "خ",
1622
+ "إ",
1623
+ ],
1624
+ "Danish": [
1625
+ "e",
1626
+ "r",
1627
+ "n",
1628
+ "t",
1629
+ "a",
1630
+ "i",
1631
+ "s",
1632
+ "d",
1633
+ "l",
1634
+ "o",
1635
+ "g",
1636
+ "m",
1637
+ "k",
1638
+ "f",
1639
+ "v",
1640
+ "u",
1641
+ "b",
1642
+ "h",
1643
+ "p",
1644
+ "å",
1645
+ "y",
1646
+ "ø",
1647
+ "æ",
1648
+ "c",
1649
+ "j",
1650
+ "w",
1651
+ ],
1652
+ "Serbian": [
1653
+ "а",
1654
+ "и",
1655
+ "о",
1656
+ "е",
1657
+ "н",
1658
+ "р",
1659
+ "с",
1660
+ "у",
1661
+ "т",
1662
+ "к",
1663
+ "ј",
1664
+ "в",
1665
+ "д",
1666
+ "м",
1667
+ "п",
1668
+ "л",
1669
+ "г",
1670
+ "з",
1671
+ "б",
1672
+ "a",
1673
+ "i",
1674
+ "e",
1675
+ "o",
1676
+ "n",
1677
+ "ц",
1678
+ "ш",
1679
+ ],
1680
+ "Lithuanian": [
1681
+ "i",
1682
+ "a",
1683
+ "s",
1684
+ "o",
1685
+ "r",
1686
+ "e",
1687
+ "t",
1688
+ "n",
1689
+ "u",
1690
+ "k",
1691
+ "m",
1692
+ "l",
1693
+ "p",
1694
+ "v",
1695
+ "d",
1696
+ "j",
1697
+ "g",
1698
+ "ė",
1699
+ "b",
1700
+ "y",
1701
+ "ų",
1702
+ "š",
1703
+ "ž",
1704
+ "c",
1705
+ "ą",
1706
+ "į",
1707
+ ],
1708
+ "Slovene": [
1709
+ "e",
1710
+ "a",
1711
+ "i",
1712
+ "o",
1713
+ "n",
1714
+ "r",
1715
+ "s",
1716
+ "l",
1717
+ "t",
1718
+ "j",
1719
+ "v",
1720
+ "k",
1721
+ "d",
1722
+ "p",
1723
+ "m",
1724
+ "u",
1725
+ "z",
1726
+ "b",
1727
+ "g",
1728
+ "h",
1729
+ "č",
1730
+ "c",
1731
+ "š",
1732
+ "ž",
1733
+ "f",
1734
+ "y",
1735
+ ],
1736
+ "Slovak": [
1737
+ "o",
1738
+ "a",
1739
+ "e",
1740
+ "n",
1741
+ "i",
1742
+ "r",
1743
+ "v",
1744
+ "t",
1745
+ "s",
1746
+ "l",
1747
+ "k",
1748
+ "d",
1749
+ "m",
1750
+ "p",
1751
+ "u",
1752
+ "c",
1753
+ "h",
1754
+ "j",
1755
+ "b",
1756
+ "z",
1757
+ "á",
1758
+ "y",
1759
+ "ý",
1760
+ "í",
1761
+ "č",
1762
+ "é",
1763
+ ],
1764
+ "Hebrew": [
1765
+ "י",
1766
+ "ו",
1767
+ "ה",
1768
+ "ל",
1769
+ "ר",
1770
+ "ב",
1771
+ "ת",
1772
+ "מ",
1773
+ "א",
1774
+ "ש",
1775
+ "נ",
1776
+ "ע",
1777
+ "ם",
1778
+ "ד",
1779
+ "ק",
1780
+ "ח",
1781
+ "פ",
1782
+ "ס",
1783
+ "כ",
1784
+ "ג",
1785
+ "ט",
1786
+ "צ",
1787
+ "ן",
1788
+ "ז",
1789
+ "ך",
1790
+ ],
1791
+ "Bulgarian": [
1792
+ "а",
1793
+ "и",
1794
+ "о",
1795
+ "е",
1796
+ "н",
1797
+ "т",
1798
+ "р",
1799
+ "с",
1800
+ "в",
1801
+ "л",
1802
+ "к",
1803
+ "д",
1804
+ "п",
1805
+ "м",
1806
+ "з",
1807
+ "г",
1808
+ "я",
1809
+ "ъ",
1810
+ "у",
1811
+ "б",
1812
+ "ч",
1813
+ "ц",
1814
+ "й",
1815
+ "ж",
1816
+ "щ",
1817
+ "х",
1818
+ ],
1819
+ "Croatian": [
1820
+ "a",
1821
+ "i",
1822
+ "o",
1823
+ "e",
1824
+ "n",
1825
+ "r",
1826
+ "j",
1827
+ "s",
1828
+ "t",
1829
+ "u",
1830
+ "k",
1831
+ "l",
1832
+ "v",
1833
+ "d",
1834
+ "m",
1835
+ "p",
1836
+ "g",
1837
+ "z",
1838
+ "b",
1839
+ "c",
1840
+ "č",
1841
+ "h",
1842
+ "š",
1843
+ "ž",
1844
+ "ć",
1845
+ "f",
1846
+ ],
1847
+ "Hindi": [
1848
+ "क",
1849
+ "र",
1850
+ "स",
1851
+ "न",
1852
+ "त",
1853
+ "म",
1854
+ "ह",
1855
+ "प",
1856
+ "य",
1857
+ "ल",
1858
+ "व",
1859
+ "ज",
1860
+ "द",
1861
+ "ग",
1862
+ "ब",
1863
+ "श",
1864
+ "ट",
1865
+ "अ",
1866
+ "ए",
1867
+ "थ",
1868
+ "भ",
1869
+ "ड",
1870
+ "च",
1871
+ "ध",
1872
+ "ष",
1873
+ "इ",
1874
+ ],
1875
+ "Estonian": [
1876
+ "a",
1877
+ "i",
1878
+ "e",
1879
+ "s",
1880
+ "t",
1881
+ "l",
1882
+ "u",
1883
+ "n",
1884
+ "o",
1885
+ "k",
1886
+ "r",
1887
+ "d",
1888
+ "m",
1889
+ "v",
1890
+ "g",
1891
+ "p",
1892
+ "j",
1893
+ "h",
1894
+ "ä",
1895
+ "b",
1896
+ "õ",
1897
+ "ü",
1898
+ "f",
1899
+ "c",
1900
+ "ö",
1901
+ "y",
1902
+ ],
1903
+ "Thai": [
1904
+ "า",
1905
+ "น",
1906
+ "ร",
1907
+ "อ",
1908
+ "ก",
1909
+ "เ",
1910
+ "ง",
1911
+ "ม",
1912
+ "ย",
1913
+ "ล",
1914
+ "ว",
1915
+ "ด",
1916
+ "ท",
1917
+ "ส",
1918
+ "ต",
1919
+ "ะ",
1920
+ "ป",
1921
+ "บ",
1922
+ "ค",
1923
+ "ห",
1924
+ "แ",
1925
+ "จ",
1926
+ "พ",
1927
+ "ช",
1928
+ "ข",
1929
+ "ใ",
1930
+ ],
1931
+ "Greek": [
1932
+ "α",
1933
+ "τ",
1934
+ "ο",
1935
+ "ι",
1936
+ "ε",
1937
+ "ν",
1938
+ "ρ",
1939
+ "σ",
1940
+ "κ",
1941
+ "η",
1942
+ "π",
1943
+ "ς",
1944
+ "υ",
1945
+ "μ",
1946
+ "λ",
1947
+ "ί",
1948
+ "ό",
1949
+ "ά",
1950
+ "γ",
1951
+ "έ",
1952
+ "δ",
1953
+ "ή",
1954
+ "ω",
1955
+ "χ",
1956
+ "θ",
1957
+ "ύ",
1958
+ ],
1959
+ "Tamil": [
1960
+ "க",
1961
+ "த",
1962
+ "ப",
1963
+ "ட",
1964
+ "ர",
1965
+ "ம",
1966
+ "ல",
1967
+ "ன",
1968
+ "வ",
1969
+ "ற",
1970
+ "ய",
1971
+ "ள",
1972
+ "ச",
1973
+ "ந",
1974
+ "இ",
1975
+ "ண",
1976
+ "அ",
1977
+ "ஆ",
1978
+ "ழ",
1979
+ "ங",
1980
+ "எ",
1981
+ "உ",
1982
+ "ஒ",
1983
+ "ஸ",
1984
+ ],
1985
+ "Kazakh": [
1986
+ "а",
1987
+ "ы",
1988
+ "е",
1989
+ "н",
1990
+ "т",
1991
+ "р",
1992
+ "л",
1993
+ "і",
1994
+ "д",
1995
+ "с",
1996
+ "м",
1997
+ "қ",
1998
+ "к",
1999
+ "о",
2000
+ "б",
2001
+ "и",
2002
+ "у",
2003
+ "ғ",
2004
+ "ж",
2005
+ "ң",
2006
+ "з",
2007
+ "ш",
2008
+ "й",
2009
+ "п",
2010
+ "г",
2011
+ "ө",
2012
+ ],
2013
+ }
2014
+
2015
+ LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)
venv/lib/python3.10/site-packages/charset_normalizer/legacy.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any
4
+ from warnings import warn
5
+
6
+ from .api import from_bytes
7
+ from .constant import CHARDET_CORRESPONDENCE, TOO_SMALL_SEQUENCE
8
+
9
+ # TODO: remove this check when dropping Python 3.7 support
10
+ if TYPE_CHECKING:
11
+ from typing_extensions import TypedDict
12
+
13
+ class ResultDict(TypedDict):
14
+ encoding: str | None
15
+ language: str
16
+ confidence: float | None
17
+
18
+
19
+ def detect(
20
+ byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any
21
+ ) -> ResultDict:
22
+ """
23
+ chardet legacy method
24
+ Detect the encoding of the given byte string. It should be mostly backward-compatible.
25
+ Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
26
+ This function is deprecated and should be used to migrate your project easily, consult the documentation for
27
+ further information. Not planned for removal.
28
+
29
+ :param byte_str: The byte sequence to examine.
30
+ :param should_rename_legacy: Should we rename legacy encodings
31
+ to their more modern equivalents?
32
+ """
33
+ if len(kwargs):
34
+ warn(
35
+ f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()"
36
+ )
37
+
38
+ if not isinstance(byte_str, (bytearray, bytes)):
39
+ raise TypeError( # pragma: nocover
40
+ f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
41
+ )
42
+
43
+ if isinstance(byte_str, bytearray):
44
+ byte_str = bytes(byte_str)
45
+
46
+ r = from_bytes(byte_str).best()
47
+
48
+ encoding = r.encoding if r is not None else None
49
+ language = r.language if r is not None and r.language != "Unknown" else ""
50
+ confidence = 1.0 - r.chaos if r is not None else None
51
+
52
+ # automatically lower confidence
53
+ # on small bytes samples.
54
+ # https://github.com/jawah/charset_normalizer/issues/391
55
+ if (
56
+ confidence is not None
57
+ and confidence >= 0.9
58
+ and encoding
59
+ not in {
60
+ "utf_8",
61
+ "ascii",
62
+ }
63
+ and r.bom is False # type: ignore[union-attr]
64
+ and len(byte_str) < TOO_SMALL_SEQUENCE
65
+ ):
66
+ confidence -= 0.2
67
+
68
+ # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
69
+ # but chardet does return 'utf-8-sig' and it is a valid codec name.
70
+ if r is not None and encoding == "utf_8" and r.bom:
71
+ encoding += "_sig"
72
+
73
+ if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE:
74
+ encoding = CHARDET_CORRESPONDENCE[encoding]
75
+
76
+ return {
77
+ "encoding": encoding,
78
+ "language": language,
79
+ "confidence": confidence,
80
+ }
venv/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (15.9 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/md.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import lru_cache
4
+ from logging import getLogger
5
+
6
+ from .constant import (
7
+ COMMON_SAFE_ASCII_CHARACTERS,
8
+ TRACE,
9
+ UNICODE_SECONDARY_RANGE_KEYWORD,
10
+ )
11
+ from .utils import (
12
+ is_accentuated,
13
+ is_arabic,
14
+ is_arabic_isolated_form,
15
+ is_case_variable,
16
+ is_cjk,
17
+ is_emoticon,
18
+ is_hangul,
19
+ is_hiragana,
20
+ is_katakana,
21
+ is_latin,
22
+ is_punctuation,
23
+ is_separator,
24
+ is_symbol,
25
+ is_thai,
26
+ is_unprintable,
27
+ remove_accent,
28
+ unicode_range,
29
+ is_cjk_uncommon,
30
+ )
31
+
32
+
33
+ class MessDetectorPlugin:
34
+ """
35
+ Base abstract class used for mess detection plugins.
36
+ All detectors MUST extend and implement given methods.
37
+ """
38
+
39
+ def eligible(self, character: str) -> bool:
40
+ """
41
+ Determine if given character should be fed in.
42
+ """
43
+ raise NotImplementedError # pragma: nocover
44
+
45
+ def feed(self, character: str) -> None:
46
+ """
47
+ The main routine to be executed upon character.
48
+ Insert the logic in witch the text would be considered chaotic.
49
+ """
50
+ raise NotImplementedError # pragma: nocover
51
+
52
+ def reset(self) -> None: # pragma: no cover
53
+ """
54
+ Permit to reset the plugin to the initial state.
55
+ """
56
+ raise NotImplementedError
57
+
58
+ @property
59
+ def ratio(self) -> float:
60
+ """
61
+ Compute the chaos ratio based on what your feed() has seen.
62
+ Must NOT be lower than 0.; No restriction gt 0.
63
+ """
64
+ raise NotImplementedError # pragma: nocover
65
+
66
+
67
+ class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
68
+ def __init__(self) -> None:
69
+ self._punctuation_count: int = 0
70
+ self._symbol_count: int = 0
71
+ self._character_count: int = 0
72
+
73
+ self._last_printable_char: str | None = None
74
+ self._frenzy_symbol_in_word: bool = False
75
+
76
+ def eligible(self, character: str) -> bool:
77
+ return character.isprintable()
78
+
79
+ def feed(self, character: str) -> None:
80
+ self._character_count += 1
81
+
82
+ if (
83
+ character != self._last_printable_char
84
+ and character not in COMMON_SAFE_ASCII_CHARACTERS
85
+ ):
86
+ if is_punctuation(character):
87
+ self._punctuation_count += 1
88
+ elif (
89
+ character.isdigit() is False
90
+ and is_symbol(character)
91
+ and is_emoticon(character) is False
92
+ ):
93
+ self._symbol_count += 2
94
+
95
+ self._last_printable_char = character
96
+
97
+ def reset(self) -> None: # Abstract
98
+ self._punctuation_count = 0
99
+ self._character_count = 0
100
+ self._symbol_count = 0
101
+
102
+ @property
103
+ def ratio(self) -> float:
104
+ if self._character_count == 0:
105
+ return 0.0
106
+
107
+ ratio_of_punctuation: float = (
108
+ self._punctuation_count + self._symbol_count
109
+ ) / self._character_count
110
+
111
+ return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
112
+
113
+
114
+ class TooManyAccentuatedPlugin(MessDetectorPlugin):
115
+ def __init__(self) -> None:
116
+ self._character_count: int = 0
117
+ self._accentuated_count: int = 0
118
+
119
+ def eligible(self, character: str) -> bool:
120
+ return character.isalpha()
121
+
122
+ def feed(self, character: str) -> None:
123
+ self._character_count += 1
124
+
125
+ if is_accentuated(character):
126
+ self._accentuated_count += 1
127
+
128
+ def reset(self) -> None: # Abstract
129
+ self._character_count = 0
130
+ self._accentuated_count = 0
131
+
132
+ @property
133
+ def ratio(self) -> float:
134
+ if self._character_count < 8:
135
+ return 0.0
136
+
137
+ ratio_of_accentuation: float = self._accentuated_count / self._character_count
138
+ return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
139
+
140
+
141
+ class UnprintablePlugin(MessDetectorPlugin):
142
+ def __init__(self) -> None:
143
+ self._unprintable_count: int = 0
144
+ self._character_count: int = 0
145
+
146
+ def eligible(self, character: str) -> bool:
147
+ return True
148
+
149
+ def feed(self, character: str) -> None:
150
+ if is_unprintable(character):
151
+ self._unprintable_count += 1
152
+ self._character_count += 1
153
+
154
+ def reset(self) -> None: # Abstract
155
+ self._unprintable_count = 0
156
+
157
+ @property
158
+ def ratio(self) -> float:
159
+ if self._character_count == 0:
160
+ return 0.0
161
+
162
+ return (self._unprintable_count * 8) / self._character_count
163
+
164
+
165
+ class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
166
+ def __init__(self) -> None:
167
+ self._successive_count: int = 0
168
+ self._character_count: int = 0
169
+
170
+ self._last_latin_character: str | None = None
171
+
172
+ def eligible(self, character: str) -> bool:
173
+ return character.isalpha() and is_latin(character)
174
+
175
+ def feed(self, character: str) -> None:
176
+ self._character_count += 1
177
+ if (
178
+ self._last_latin_character is not None
179
+ and is_accentuated(character)
180
+ and is_accentuated(self._last_latin_character)
181
+ ):
182
+ if character.isupper() and self._last_latin_character.isupper():
183
+ self._successive_count += 1
184
+ # Worse if its the same char duplicated with different accent.
185
+ if remove_accent(character) == remove_accent(self._last_latin_character):
186
+ self._successive_count += 1
187
+ self._last_latin_character = character
188
+
189
+ def reset(self) -> None: # Abstract
190
+ self._successive_count = 0
191
+ self._character_count = 0
192
+ self._last_latin_character = None
193
+
194
+ @property
195
+ def ratio(self) -> float:
196
+ if self._character_count == 0:
197
+ return 0.0
198
+
199
+ return (self._successive_count * 2) / self._character_count
200
+
201
+
202
+ class SuspiciousRange(MessDetectorPlugin):
203
+ def __init__(self) -> None:
204
+ self._suspicious_successive_range_count: int = 0
205
+ self._character_count: int = 0
206
+ self._last_printable_seen: str | None = None
207
+
208
+ def eligible(self, character: str) -> bool:
209
+ return character.isprintable()
210
+
211
+ def feed(self, character: str) -> None:
212
+ self._character_count += 1
213
+
214
+ if (
215
+ character.isspace()
216
+ or is_punctuation(character)
217
+ or character in COMMON_SAFE_ASCII_CHARACTERS
218
+ ):
219
+ self._last_printable_seen = None
220
+ return
221
+
222
+ if self._last_printable_seen is None:
223
+ self._last_printable_seen = character
224
+ return
225
+
226
+ unicode_range_a: str | None = unicode_range(self._last_printable_seen)
227
+ unicode_range_b: str | None = unicode_range(character)
228
+
229
+ if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
230
+ self._suspicious_successive_range_count += 1
231
+
232
+ self._last_printable_seen = character
233
+
234
+ def reset(self) -> None: # Abstract
235
+ self._character_count = 0
236
+ self._suspicious_successive_range_count = 0
237
+ self._last_printable_seen = None
238
+
239
+ @property
240
+ def ratio(self) -> float:
241
+ if self._character_count <= 13:
242
+ return 0.0
243
+
244
+ ratio_of_suspicious_range_usage: float = (
245
+ self._suspicious_successive_range_count * 2
246
+ ) / self._character_count
247
+
248
+ return ratio_of_suspicious_range_usage
249
+
250
+
251
+ class SuperWeirdWordPlugin(MessDetectorPlugin):
252
+ def __init__(self) -> None:
253
+ self._word_count: int = 0
254
+ self._bad_word_count: int = 0
255
+ self._foreign_long_count: int = 0
256
+
257
+ self._is_current_word_bad: bool = False
258
+ self._foreign_long_watch: bool = False
259
+
260
+ self._character_count: int = 0
261
+ self._bad_character_count: int = 0
262
+
263
+ self._buffer: str = ""
264
+ self._buffer_accent_count: int = 0
265
+ self._buffer_glyph_count: int = 0
266
+
267
+ def eligible(self, character: str) -> bool:
268
+ return True
269
+
270
+ def feed(self, character: str) -> None:
271
+ if character.isalpha():
272
+ self._buffer += character
273
+ if is_accentuated(character):
274
+ self._buffer_accent_count += 1
275
+ if (
276
+ self._foreign_long_watch is False
277
+ and (is_latin(character) is False or is_accentuated(character))
278
+ and is_cjk(character) is False
279
+ and is_hangul(character) is False
280
+ and is_katakana(character) is False
281
+ and is_hiragana(character) is False
282
+ and is_thai(character) is False
283
+ ):
284
+ self._foreign_long_watch = True
285
+ if (
286
+ is_cjk(character)
287
+ or is_hangul(character)
288
+ or is_katakana(character)
289
+ or is_hiragana(character)
290
+ or is_thai(character)
291
+ ):
292
+ self._buffer_glyph_count += 1
293
+ return
294
+ if not self._buffer:
295
+ return
296
+ if (
297
+ character.isspace() or is_punctuation(character) or is_separator(character)
298
+ ) and self._buffer:
299
+ self._word_count += 1
300
+ buffer_length: int = len(self._buffer)
301
+
302
+ self._character_count += buffer_length
303
+
304
+ if buffer_length >= 4:
305
+ if self._buffer_accent_count / buffer_length >= 0.5:
306
+ self._is_current_word_bad = True
307
+ # Word/Buffer ending with an upper case accentuated letter are so rare,
308
+ # that we will consider them all as suspicious. Same weight as foreign_long suspicious.
309
+ elif (
310
+ is_accentuated(self._buffer[-1])
311
+ and self._buffer[-1].isupper()
312
+ and all(_.isupper() for _ in self._buffer) is False
313
+ ):
314
+ self._foreign_long_count += 1
315
+ self._is_current_word_bad = True
316
+ elif self._buffer_glyph_count == 1:
317
+ self._is_current_word_bad = True
318
+ self._foreign_long_count += 1
319
+ if buffer_length >= 24 and self._foreign_long_watch:
320
+ camel_case_dst = [
321
+ i
322
+ for c, i in zip(self._buffer, range(0, buffer_length))
323
+ if c.isupper()
324
+ ]
325
+ probable_camel_cased: bool = False
326
+
327
+ if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3):
328
+ probable_camel_cased = True
329
+
330
+ if not probable_camel_cased:
331
+ self._foreign_long_count += 1
332
+ self._is_current_word_bad = True
333
+
334
+ if self._is_current_word_bad:
335
+ self._bad_word_count += 1
336
+ self._bad_character_count += len(self._buffer)
337
+ self._is_current_word_bad = False
338
+
339
+ self._foreign_long_watch = False
340
+ self._buffer = ""
341
+ self._buffer_accent_count = 0
342
+ self._buffer_glyph_count = 0
343
+ elif (
344
+ character not in {"<", ">", "-", "=", "~", "|", "_"}
345
+ and character.isdigit() is False
346
+ and is_symbol(character)
347
+ ):
348
+ self._is_current_word_bad = True
349
+ self._buffer += character
350
+
351
+ def reset(self) -> None: # Abstract
352
+ self._buffer = ""
353
+ self._is_current_word_bad = False
354
+ self._foreign_long_watch = False
355
+ self._bad_word_count = 0
356
+ self._word_count = 0
357
+ self._character_count = 0
358
+ self._bad_character_count = 0
359
+ self._foreign_long_count = 0
360
+
361
+ @property
362
+ def ratio(self) -> float:
363
+ if self._word_count <= 10 and self._foreign_long_count == 0:
364
+ return 0.0
365
+
366
+ return self._bad_character_count / self._character_count
367
+
368
+
369
+ class CjkUncommonPlugin(MessDetectorPlugin):
370
+ """
371
+ Detect messy CJK text that probably means nothing.
372
+ """
373
+
374
+ def __init__(self) -> None:
375
+ self._character_count: int = 0
376
+ self._uncommon_count: int = 0
377
+
378
+ def eligible(self, character: str) -> bool:
379
+ return is_cjk(character)
380
+
381
+ def feed(self, character: str) -> None:
382
+ self._character_count += 1
383
+
384
+ if is_cjk_uncommon(character):
385
+ self._uncommon_count += 1
386
+ return
387
+
388
+ def reset(self) -> None: # Abstract
389
+ self._character_count = 0
390
+ self._uncommon_count = 0
391
+
392
+ @property
393
+ def ratio(self) -> float:
394
+ if self._character_count < 8:
395
+ return 0.0
396
+
397
+ uncommon_form_usage: float = self._uncommon_count / self._character_count
398
+
399
+ # we can be pretty sure it's garbage when uncommon characters are widely
400
+ # used. otherwise it could just be traditional chinese for example.
401
+ return uncommon_form_usage / 10 if uncommon_form_usage > 0.5 else 0.0
402
+
403
+
404
+ class ArchaicUpperLowerPlugin(MessDetectorPlugin):
405
+ def __init__(self) -> None:
406
+ self._buf: bool = False
407
+
408
+ self._character_count_since_last_sep: int = 0
409
+
410
+ self._successive_upper_lower_count: int = 0
411
+ self._successive_upper_lower_count_final: int = 0
412
+
413
+ self._character_count: int = 0
414
+
415
+ self._last_alpha_seen: str | None = None
416
+ self._current_ascii_only: bool = True
417
+
418
+ def eligible(self, character: str) -> bool:
419
+ return True
420
+
421
+ def feed(self, character: str) -> None:
422
+ is_concerned = character.isalpha() and is_case_variable(character)
423
+ chunk_sep = is_concerned is False
424
+
425
+ if chunk_sep and self._character_count_since_last_sep > 0:
426
+ if (
427
+ self._character_count_since_last_sep <= 64
428
+ and character.isdigit() is False
429
+ and self._current_ascii_only is False
430
+ ):
431
+ self._successive_upper_lower_count_final += (
432
+ self._successive_upper_lower_count
433
+ )
434
+
435
+ self._successive_upper_lower_count = 0
436
+ self._character_count_since_last_sep = 0
437
+ self._last_alpha_seen = None
438
+ self._buf = False
439
+ self._character_count += 1
440
+ self._current_ascii_only = True
441
+
442
+ return
443
+
444
+ if self._current_ascii_only is True and character.isascii() is False:
445
+ self._current_ascii_only = False
446
+
447
+ if self._last_alpha_seen is not None:
448
+ if (character.isupper() and self._last_alpha_seen.islower()) or (
449
+ character.islower() and self._last_alpha_seen.isupper()
450
+ ):
451
+ if self._buf is True:
452
+ self._successive_upper_lower_count += 2
453
+ self._buf = False
454
+ else:
455
+ self._buf = True
456
+ else:
457
+ self._buf = False
458
+
459
+ self._character_count += 1
460
+ self._character_count_since_last_sep += 1
461
+ self._last_alpha_seen = character
462
+
463
+ def reset(self) -> None: # Abstract
464
+ self._character_count = 0
465
+ self._character_count_since_last_sep = 0
466
+ self._successive_upper_lower_count = 0
467
+ self._successive_upper_lower_count_final = 0
468
+ self._last_alpha_seen = None
469
+ self._buf = False
470
+ self._current_ascii_only = True
471
+
472
+ @property
473
+ def ratio(self) -> float:
474
+ if self._character_count == 0:
475
+ return 0.0
476
+
477
+ return self._successive_upper_lower_count_final / self._character_count
478
+
479
+
480
+ class ArabicIsolatedFormPlugin(MessDetectorPlugin):
481
+ def __init__(self) -> None:
482
+ self._character_count: int = 0
483
+ self._isolated_form_count: int = 0
484
+
485
+ def reset(self) -> None: # Abstract
486
+ self._character_count = 0
487
+ self._isolated_form_count = 0
488
+
489
+ def eligible(self, character: str) -> bool:
490
+ return is_arabic(character)
491
+
492
+ def feed(self, character: str) -> None:
493
+ self._character_count += 1
494
+
495
+ if is_arabic_isolated_form(character):
496
+ self._isolated_form_count += 1
497
+
498
+ @property
499
+ def ratio(self) -> float:
500
+ if self._character_count < 8:
501
+ return 0.0
502
+
503
+ isolated_form_usage: float = self._isolated_form_count / self._character_count
504
+
505
+ return isolated_form_usage
506
+
507
+
508
+ @lru_cache(maxsize=1024)
509
+ def is_suspiciously_successive_range(
510
+ unicode_range_a: str | None, unicode_range_b: str | None
511
+ ) -> bool:
512
+ """
513
+ Determine if two Unicode range seen next to each other can be considered as suspicious.
514
+ """
515
+ if unicode_range_a is None or unicode_range_b is None:
516
+ return True
517
+
518
+ if unicode_range_a == unicode_range_b:
519
+ return False
520
+
521
+ if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
522
+ return False
523
+
524
+ if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
525
+ return False
526
+
527
+ # Latin characters can be accompanied with a combining diacritical mark
528
+ # eg. Vietnamese.
529
+ if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
530
+ "Combining" in unicode_range_a or "Combining" in unicode_range_b
531
+ ):
532
+ return False
533
+
534
+ keywords_range_a, keywords_range_b = (
535
+ unicode_range_a.split(" "),
536
+ unicode_range_b.split(" "),
537
+ )
538
+
539
+ for el in keywords_range_a:
540
+ if el in UNICODE_SECONDARY_RANGE_KEYWORD:
541
+ continue
542
+ if el in keywords_range_b:
543
+ return False
544
+
545
+ # Japanese Exception
546
+ range_a_jp_chars, range_b_jp_chars = (
547
+ unicode_range_a
548
+ in (
549
+ "Hiragana",
550
+ "Katakana",
551
+ ),
552
+ unicode_range_b in ("Hiragana", "Katakana"),
553
+ )
554
+ if (range_a_jp_chars or range_b_jp_chars) and (
555
+ "CJK" in unicode_range_a or "CJK" in unicode_range_b
556
+ ):
557
+ return False
558
+ if range_a_jp_chars and range_b_jp_chars:
559
+ return False
560
+
561
+ if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
562
+ if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
563
+ return False
564
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
565
+ return False
566
+
567
+ # Chinese/Japanese use dedicated range for punctuation and/or separators.
568
+ if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
569
+ unicode_range_a in ["Katakana", "Hiragana"]
570
+ and unicode_range_b in ["Katakana", "Hiragana"]
571
+ ):
572
+ if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
573
+ return False
574
+ if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
575
+ return False
576
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
577
+ return False
578
+
579
+ return True
580
+
581
+
582
+ @lru_cache(maxsize=2048)
583
+ def mess_ratio(
584
+ decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
585
+ ) -> float:
586
+ """
587
+ Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
588
+ """
589
+
590
+ detectors: list[MessDetectorPlugin] = [
591
+ md_class() for md_class in MessDetectorPlugin.__subclasses__()
592
+ ]
593
+
594
+ length: int = len(decoded_sequence) + 1
595
+
596
+ mean_mess_ratio: float = 0.0
597
+
598
+ if length < 512:
599
+ intermediary_mean_mess_ratio_calc: int = 32
600
+ elif length <= 1024:
601
+ intermediary_mean_mess_ratio_calc = 64
602
+ else:
603
+ intermediary_mean_mess_ratio_calc = 128
604
+
605
+ for character, index in zip(decoded_sequence + "\n", range(length)):
606
+ for detector in detectors:
607
+ if detector.eligible(character):
608
+ detector.feed(character)
609
+
610
+ if (
611
+ index > 0 and index % intermediary_mean_mess_ratio_calc == 0
612
+ ) or index == length - 1:
613
+ mean_mess_ratio = sum(dt.ratio for dt in detectors)
614
+
615
+ if mean_mess_ratio >= maximum_threshold:
616
+ break
617
+
618
+ if debug:
619
+ logger = getLogger("charset_normalizer")
620
+
621
+ logger.log(
622
+ TRACE,
623
+ "Mess-detector extended-analysis start. "
624
+ f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} "
625
+ f"maximum_threshold={maximum_threshold}",
626
+ )
627
+
628
+ if len(decoded_sequence) > 16:
629
+ logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}")
630
+ logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}")
631
+
632
+ for dt in detectors:
633
+ logger.log(TRACE, f"{dt.__class__}: {dt.ratio}")
634
+
635
+ return round(mean_mess_ratio, 3)
venv/lib/python3.10/site-packages/charset_normalizer/models.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from encodings.aliases import aliases
4
+ from hashlib import sha256
5
+ from json import dumps
6
+ from re import sub
7
+ from typing import Any, Iterator, List, Tuple
8
+
9
+ from .constant import RE_POSSIBLE_ENCODING_INDICATION, TOO_BIG_SEQUENCE
10
+ from .utils import iana_name, is_multi_byte_encoding, unicode_range
11
+
12
+
13
+ class CharsetMatch:
14
+ def __init__(
15
+ self,
16
+ payload: bytes,
17
+ guessed_encoding: str,
18
+ mean_mess_ratio: float,
19
+ has_sig_or_bom: bool,
20
+ languages: CoherenceMatches,
21
+ decoded_payload: str | None = None,
22
+ preemptive_declaration: str | None = None,
23
+ ):
24
+ self._payload: bytes = payload
25
+
26
+ self._encoding: str = guessed_encoding
27
+ self._mean_mess_ratio: float = mean_mess_ratio
28
+ self._languages: CoherenceMatches = languages
29
+ self._has_sig_or_bom: bool = has_sig_or_bom
30
+ self._unicode_ranges: list[str] | None = None
31
+
32
+ self._leaves: list[CharsetMatch] = []
33
+ self._mean_coherence_ratio: float = 0.0
34
+
35
+ self._output_payload: bytes | None = None
36
+ self._output_encoding: str | None = None
37
+
38
+ self._string: str | None = decoded_payload
39
+
40
+ self._preemptive_declaration: str | None = preemptive_declaration
41
+
42
+ def __eq__(self, other: object) -> bool:
43
+ if not isinstance(other, CharsetMatch):
44
+ if isinstance(other, str):
45
+ return iana_name(other) == self.encoding
46
+ return False
47
+ return self.encoding == other.encoding and self.fingerprint == other.fingerprint
48
+
49
+ def __lt__(self, other: object) -> bool:
50
+ """
51
+ Implemented to make sorted available upon CharsetMatches items.
52
+ """
53
+ if not isinstance(other, CharsetMatch):
54
+ raise ValueError
55
+
56
+ chaos_difference: float = abs(self.chaos - other.chaos)
57
+ coherence_difference: float = abs(self.coherence - other.coherence)
58
+
59
+ # Below 1% difference --> Use Coherence
60
+ if chaos_difference < 0.01 and coherence_difference > 0.02:
61
+ return self.coherence > other.coherence
62
+ elif chaos_difference < 0.01 and coherence_difference <= 0.02:
63
+ # When having a difficult decision, use the result that decoded as many multi-byte as possible.
64
+ # preserve RAM usage!
65
+ if len(self._payload) >= TOO_BIG_SEQUENCE:
66
+ return self.chaos < other.chaos
67
+ return self.multi_byte_usage > other.multi_byte_usage
68
+
69
+ return self.chaos < other.chaos
70
+
71
+ @property
72
+ def multi_byte_usage(self) -> float:
73
+ return 1.0 - (len(str(self)) / len(self.raw))
74
+
75
+ def __str__(self) -> str:
76
+ # Lazy Str Loading
77
+ if self._string is None:
78
+ self._string = str(self._payload, self._encoding, "strict")
79
+ return self._string
80
+
81
+ def __repr__(self) -> str:
82
+ return f"<CharsetMatch '{self.encoding}' bytes({self.fingerprint})>"
83
+
84
+ def add_submatch(self, other: CharsetMatch) -> None:
85
+ if not isinstance(other, CharsetMatch) or other == self:
86
+ raise ValueError(
87
+ "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
88
+ other.__class__
89
+ )
90
+ )
91
+
92
+ other._string = None # Unload RAM usage; dirty trick.
93
+ self._leaves.append(other)
94
+
95
+ @property
96
+ def encoding(self) -> str:
97
+ return self._encoding
98
+
99
+ @property
100
+ def encoding_aliases(self) -> list[str]:
101
+ """
102
+ Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
103
+ """
104
+ also_known_as: list[str] = []
105
+ for u, p in aliases.items():
106
+ if self.encoding == u:
107
+ also_known_as.append(p)
108
+ elif self.encoding == p:
109
+ also_known_as.append(u)
110
+ return also_known_as
111
+
112
+ @property
113
+ def bom(self) -> bool:
114
+ return self._has_sig_or_bom
115
+
116
+ @property
117
+ def byte_order_mark(self) -> bool:
118
+ return self._has_sig_or_bom
119
+
120
+ @property
121
+ def languages(self) -> list[str]:
122
+ """
123
+ Return the complete list of possible languages found in decoded sequence.
124
+ Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
125
+ """
126
+ return [e[0] for e in self._languages]
127
+
128
+ @property
129
+ def language(self) -> str:
130
+ """
131
+ Most probable language found in decoded sequence. If none were detected or inferred, the property will return
132
+ "Unknown".
133
+ """
134
+ if not self._languages:
135
+ # Trying to infer the language based on the given encoding
136
+ # Its either English or we should not pronounce ourselves in certain cases.
137
+ if "ascii" in self.could_be_from_charset:
138
+ return "English"
139
+
140
+ # doing it there to avoid circular import
141
+ from charset_normalizer.cd import encoding_languages, mb_encoding_languages
142
+
143
+ languages = (
144
+ mb_encoding_languages(self.encoding)
145
+ if is_multi_byte_encoding(self.encoding)
146
+ else encoding_languages(self.encoding)
147
+ )
148
+
149
+ if len(languages) == 0 or "Latin Based" in languages:
150
+ return "Unknown"
151
+
152
+ return languages[0]
153
+
154
+ return self._languages[0][0]
155
+
156
+ @property
157
+ def chaos(self) -> float:
158
+ return self._mean_mess_ratio
159
+
160
+ @property
161
+ def coherence(self) -> float:
162
+ if not self._languages:
163
+ return 0.0
164
+ return self._languages[0][1]
165
+
166
+ @property
167
+ def percent_chaos(self) -> float:
168
+ return round(self.chaos * 100, ndigits=3)
169
+
170
+ @property
171
+ def percent_coherence(self) -> float:
172
+ return round(self.coherence * 100, ndigits=3)
173
+
174
+ @property
175
+ def raw(self) -> bytes:
176
+ """
177
+ Original untouched bytes.
178
+ """
179
+ return self._payload
180
+
181
+ @property
182
+ def submatch(self) -> list[CharsetMatch]:
183
+ return self._leaves
184
+
185
+ @property
186
+ def has_submatch(self) -> bool:
187
+ return len(self._leaves) > 0
188
+
189
+ @property
190
+ def alphabets(self) -> list[str]:
191
+ if self._unicode_ranges is not None:
192
+ return self._unicode_ranges
193
+ # list detected ranges
194
+ detected_ranges: list[str | None] = [unicode_range(char) for char in str(self)]
195
+ # filter and sort
196
+ self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
197
+ return self._unicode_ranges
198
+
199
+ @property
200
+ def could_be_from_charset(self) -> list[str]:
201
+ """
202
+ The complete list of encoding that output the exact SAME str result and therefore could be the originating
203
+ encoding.
204
+ This list does include the encoding available in property 'encoding'.
205
+ """
206
+ return [self._encoding] + [m.encoding for m in self._leaves]
207
+
208
+ def output(self, encoding: str = "utf_8") -> bytes:
209
+ """
210
+ Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
211
+ Any errors will be simply ignored by the encoder NOT replaced.
212
+ """
213
+ if self._output_encoding is None or self._output_encoding != encoding:
214
+ self._output_encoding = encoding
215
+ decoded_string = str(self)
216
+ if (
217
+ self._preemptive_declaration is not None
218
+ and self._preemptive_declaration.lower()
219
+ not in ["utf-8", "utf8", "utf_8"]
220
+ ):
221
+ patched_header = sub(
222
+ RE_POSSIBLE_ENCODING_INDICATION,
223
+ lambda m: m.string[m.span()[0] : m.span()[1]].replace(
224
+ m.groups()[0],
225
+ iana_name(self._output_encoding).replace("_", "-"), # type: ignore[arg-type]
226
+ ),
227
+ decoded_string[:8192],
228
+ count=1,
229
+ )
230
+
231
+ decoded_string = patched_header + decoded_string[8192:]
232
+
233
+ self._output_payload = decoded_string.encode(encoding, "replace")
234
+
235
+ return self._output_payload # type: ignore
236
+
237
+ @property
238
+ def fingerprint(self) -> str:
239
+ """
240
+ Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
241
+ """
242
+ return sha256(self.output()).hexdigest()
243
+
244
+
245
+ class CharsetMatches:
246
+ """
247
+ Container with every CharsetMatch items ordered by default from most probable to the less one.
248
+ Act like a list(iterable) but does not implements all related methods.
249
+ """
250
+
251
+ def __init__(self, results: list[CharsetMatch] | None = None):
252
+ self._results: list[CharsetMatch] = sorted(results) if results else []
253
+
254
+ def __iter__(self) -> Iterator[CharsetMatch]:
255
+ yield from self._results
256
+
257
+ def __getitem__(self, item: int | str) -> CharsetMatch:
258
+ """
259
+ Retrieve a single item either by its position or encoding name (alias may be used here).
260
+ Raise KeyError upon invalid index or encoding not present in results.
261
+ """
262
+ if isinstance(item, int):
263
+ return self._results[item]
264
+ if isinstance(item, str):
265
+ item = iana_name(item, False)
266
+ for result in self._results:
267
+ if item in result.could_be_from_charset:
268
+ return result
269
+ raise KeyError
270
+
271
+ def __len__(self) -> int:
272
+ return len(self._results)
273
+
274
+ def __bool__(self) -> bool:
275
+ return len(self._results) > 0
276
+
277
+ def append(self, item: CharsetMatch) -> None:
278
+ """
279
+ Insert a single match. Will be inserted accordingly to preserve sort.
280
+ Can be inserted as a submatch.
281
+ """
282
+ if not isinstance(item, CharsetMatch):
283
+ raise ValueError(
284
+ "Cannot append instance '{}' to CharsetMatches".format(
285
+ str(item.__class__)
286
+ )
287
+ )
288
+ # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
289
+ if len(item.raw) < TOO_BIG_SEQUENCE:
290
+ for match in self._results:
291
+ if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
292
+ match.add_submatch(item)
293
+ return
294
+ self._results.append(item)
295
+ self._results = sorted(self._results)
296
+
297
+ def best(self) -> CharsetMatch | None:
298
+ """
299
+ Simply return the first match. Strict equivalent to matches[0].
300
+ """
301
+ if not self._results:
302
+ return None
303
+ return self._results[0]
304
+
305
+ def first(self) -> CharsetMatch | None:
306
+ """
307
+ Redundant method, call the method best(). Kept for BC reasons.
308
+ """
309
+ return self.best()
310
+
311
+
312
+ CoherenceMatch = Tuple[str, float]
313
+ CoherenceMatches = List[CoherenceMatch]
314
+
315
+
316
+ class CliDetectionResult:
317
+ def __init__(
318
+ self,
319
+ path: str,
320
+ encoding: str | None,
321
+ encoding_aliases: list[str],
322
+ alternative_encodings: list[str],
323
+ language: str,
324
+ alphabets: list[str],
325
+ has_sig_or_bom: bool,
326
+ chaos: float,
327
+ coherence: float,
328
+ unicode_path: str | None,
329
+ is_preferred: bool,
330
+ ):
331
+ self.path: str = path
332
+ self.unicode_path: str | None = unicode_path
333
+ self.encoding: str | None = encoding
334
+ self.encoding_aliases: list[str] = encoding_aliases
335
+ self.alternative_encodings: list[str] = alternative_encodings
336
+ self.language: str = language
337
+ self.alphabets: list[str] = alphabets
338
+ self.has_sig_or_bom: bool = has_sig_or_bom
339
+ self.chaos: float = chaos
340
+ self.coherence: float = coherence
341
+ self.is_preferred: bool = is_preferred
342
+
343
+ @property
344
+ def __dict__(self) -> dict[str, Any]: # type: ignore
345
+ return {
346
+ "path": self.path,
347
+ "encoding": self.encoding,
348
+ "encoding_aliases": self.encoding_aliases,
349
+ "alternative_encodings": self.alternative_encodings,
350
+ "language": self.language,
351
+ "alphabets": self.alphabets,
352
+ "has_sig_or_bom": self.has_sig_or_bom,
353
+ "chaos": self.chaos,
354
+ "coherence": self.coherence,
355
+ "unicode_path": self.unicode_path,
356
+ "is_preferred": self.is_preferred,
357
+ }
358
+
359
+ def to_json(self) -> str:
360
+ return dumps(self.__dict__, ensure_ascii=True, indent=4)
venv/lib/python3.10/site-packages/charset_normalizer/py.typed ADDED
File without changes
venv/lib/python3.10/site-packages/charset_normalizer/utils.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import logging
5
+ import unicodedata
6
+ from codecs import IncrementalDecoder
7
+ from encodings.aliases import aliases
8
+ from functools import lru_cache
9
+ from re import findall
10
+ from typing import Generator
11
+
12
+ from _multibytecodec import ( # type: ignore[import-not-found,import]
13
+ MultibyteIncrementalDecoder,
14
+ )
15
+
16
+ from .constant import (
17
+ ENCODING_MARKS,
18
+ IANA_SUPPORTED_SIMILAR,
19
+ RE_POSSIBLE_ENCODING_INDICATION,
20
+ UNICODE_RANGES_COMBINED,
21
+ UNICODE_SECONDARY_RANGE_KEYWORD,
22
+ UTF8_MAXIMAL_ALLOCATION,
23
+ COMMON_CJK_CHARACTERS,
24
+ )
25
+
26
+
27
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
28
+ def is_accentuated(character: str) -> bool:
29
+ try:
30
+ description: str = unicodedata.name(character)
31
+ except ValueError: # Defensive: unicode database outdated?
32
+ return False
33
+ return (
34
+ "WITH GRAVE" in description
35
+ or "WITH ACUTE" in description
36
+ or "WITH CEDILLA" in description
37
+ or "WITH DIAERESIS" in description
38
+ or "WITH CIRCUMFLEX" in description
39
+ or "WITH TILDE" in description
40
+ or "WITH MACRON" in description
41
+ or "WITH RING ABOVE" in description
42
+ )
43
+
44
+
45
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
46
+ def remove_accent(character: str) -> str:
47
+ decomposed: str = unicodedata.decomposition(character)
48
+ if not decomposed:
49
+ return character
50
+
51
+ codes: list[str] = decomposed.split(" ")
52
+
53
+ return chr(int(codes[0], 16))
54
+
55
+
56
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
57
+ def unicode_range(character: str) -> str | None:
58
+ """
59
+ Retrieve the Unicode range official name from a single character.
60
+ """
61
+ character_ord: int = ord(character)
62
+
63
+ for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
64
+ if character_ord in ord_range:
65
+ return range_name
66
+
67
+ return None
68
+
69
+
70
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
71
+ def is_latin(character: str) -> bool:
72
+ try:
73
+ description: str = unicodedata.name(character)
74
+ except ValueError: # Defensive: unicode database outdated?
75
+ return False
76
+ return "LATIN" in description
77
+
78
+
79
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
80
+ def is_punctuation(character: str) -> bool:
81
+ character_category: str = unicodedata.category(character)
82
+
83
+ if "P" in character_category:
84
+ return True
85
+
86
+ character_range: str | None = unicode_range(character)
87
+
88
+ if character_range is None:
89
+ return False
90
+
91
+ return "Punctuation" in character_range
92
+
93
+
94
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
95
+ def is_symbol(character: str) -> bool:
96
+ character_category: str = unicodedata.category(character)
97
+
98
+ if "S" in character_category or "N" in character_category:
99
+ return True
100
+
101
+ character_range: str | None = unicode_range(character)
102
+
103
+ if character_range is None:
104
+ return False
105
+
106
+ return "Forms" in character_range and character_category != "Lo"
107
+
108
+
109
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
110
+ def is_emoticon(character: str) -> bool:
111
+ character_range: str | None = unicode_range(character)
112
+
113
+ if character_range is None:
114
+ return False
115
+
116
+ return "Emoticons" in character_range or "Pictographs" in character_range
117
+
118
+
119
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
120
+ def is_separator(character: str) -> bool:
121
+ if character.isspace() or character in {"|", "+", "<", ">"}:
122
+ return True
123
+
124
+ character_category: str = unicodedata.category(character)
125
+
126
+ return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
127
+
128
+
129
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
130
+ def is_case_variable(character: str) -> bool:
131
+ return character.islower() != character.isupper()
132
+
133
+
134
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
135
+ def is_cjk(character: str) -> bool:
136
+ try:
137
+ character_name = unicodedata.name(character)
138
+ except ValueError: # Defensive: unicode database outdated?
139
+ return False
140
+
141
+ return "CJK" in character_name
142
+
143
+
144
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
145
+ def is_hiragana(character: str) -> bool:
146
+ try:
147
+ character_name = unicodedata.name(character)
148
+ except ValueError: # Defensive: unicode database outdated?
149
+ return False
150
+
151
+ return "HIRAGANA" in character_name
152
+
153
+
154
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
155
+ def is_katakana(character: str) -> bool:
156
+ try:
157
+ character_name = unicodedata.name(character)
158
+ except ValueError: # Defensive: unicode database outdated?
159
+ return False
160
+
161
+ return "KATAKANA" in character_name
162
+
163
+
164
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
165
+ def is_hangul(character: str) -> bool:
166
+ try:
167
+ character_name = unicodedata.name(character)
168
+ except ValueError: # Defensive: unicode database outdated?
169
+ return False
170
+
171
+ return "HANGUL" in character_name
172
+
173
+
174
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
175
+ def is_thai(character: str) -> bool:
176
+ try:
177
+ character_name = unicodedata.name(character)
178
+ except ValueError: # Defensive: unicode database outdated?
179
+ return False
180
+
181
+ return "THAI" in character_name
182
+
183
+
184
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
185
+ def is_arabic(character: str) -> bool:
186
+ try:
187
+ character_name = unicodedata.name(character)
188
+ except ValueError: # Defensive: unicode database outdated?
189
+ return False
190
+
191
+ return "ARABIC" in character_name
192
+
193
+
194
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
195
+ def is_arabic_isolated_form(character: str) -> bool:
196
+ try:
197
+ character_name = unicodedata.name(character)
198
+ except ValueError: # Defensive: unicode database outdated?
199
+ return False
200
+
201
+ return "ARABIC" in character_name and "ISOLATED FORM" in character_name
202
+
203
+
204
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
205
+ def is_cjk_uncommon(character: str) -> bool:
206
+ return character not in COMMON_CJK_CHARACTERS
207
+
208
+
209
+ @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
210
+ def is_unicode_range_secondary(range_name: str) -> bool:
211
+ return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
212
+
213
+
214
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
215
+ def is_unprintable(character: str) -> bool:
216
+ return (
217
+ character.isspace() is False # includes \n \t \r \v
218
+ and character.isprintable() is False
219
+ and character != "\x1a" # Why? Its the ASCII substitute character.
220
+ and character != "\ufeff" # bug discovered in Python,
221
+ # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
222
+ )
223
+
224
+
225
+ def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> str | None:
226
+ """
227
+ Extract using ASCII-only decoder any specified encoding in the first n-bytes.
228
+ """
229
+ if not isinstance(sequence, bytes):
230
+ raise TypeError
231
+
232
+ seq_len: int = len(sequence)
233
+
234
+ results: list[str] = findall(
235
+ RE_POSSIBLE_ENCODING_INDICATION,
236
+ sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
237
+ )
238
+
239
+ if len(results) == 0:
240
+ return None
241
+
242
+ for specified_encoding in results:
243
+ specified_encoding = specified_encoding.lower().replace("-", "_")
244
+
245
+ encoding_alias: str
246
+ encoding_iana: str
247
+
248
+ for encoding_alias, encoding_iana in aliases.items():
249
+ if encoding_alias == specified_encoding:
250
+ return encoding_iana
251
+ if encoding_iana == specified_encoding:
252
+ return encoding_iana
253
+
254
+ return None
255
+
256
+
257
+ @lru_cache(maxsize=128)
258
+ def is_multi_byte_encoding(name: str) -> bool:
259
+ """
260
+ Verify is a specific encoding is a multi byte one based on it IANA name
261
+ """
262
+ return name in {
263
+ "utf_8",
264
+ "utf_8_sig",
265
+ "utf_16",
266
+ "utf_16_be",
267
+ "utf_16_le",
268
+ "utf_32",
269
+ "utf_32_le",
270
+ "utf_32_be",
271
+ "utf_7",
272
+ } or issubclass(
273
+ importlib.import_module(f"encodings.{name}").IncrementalDecoder,
274
+ MultibyteIncrementalDecoder,
275
+ )
276
+
277
+
278
+ def identify_sig_or_bom(sequence: bytes) -> tuple[str | None, bytes]:
279
+ """
280
+ Identify and extract SIG/BOM in given sequence.
281
+ """
282
+
283
+ for iana_encoding in ENCODING_MARKS:
284
+ marks: bytes | list[bytes] = ENCODING_MARKS[iana_encoding]
285
+
286
+ if isinstance(marks, bytes):
287
+ marks = [marks]
288
+
289
+ for mark in marks:
290
+ if sequence.startswith(mark):
291
+ return iana_encoding, mark
292
+
293
+ return None, b""
294
+
295
+
296
+ def should_strip_sig_or_bom(iana_encoding: str) -> bool:
297
+ return iana_encoding not in {"utf_16", "utf_32"}
298
+
299
+
300
+ def iana_name(cp_name: str, strict: bool = True) -> str:
301
+ """Returns the Python normalized encoding name (Not the IANA official name)."""
302
+ cp_name = cp_name.lower().replace("-", "_")
303
+
304
+ encoding_alias: str
305
+ encoding_iana: str
306
+
307
+ for encoding_alias, encoding_iana in aliases.items():
308
+ if cp_name in [encoding_alias, encoding_iana]:
309
+ return encoding_iana
310
+
311
+ if strict:
312
+ raise ValueError(f"Unable to retrieve IANA for '{cp_name}'")
313
+
314
+ return cp_name
315
+
316
+
317
+ def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
318
+ if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
319
+ return 0.0
320
+
321
+ decoder_a = importlib.import_module(f"encodings.{iana_name_a}").IncrementalDecoder
322
+ decoder_b = importlib.import_module(f"encodings.{iana_name_b}").IncrementalDecoder
323
+
324
+ id_a: IncrementalDecoder = decoder_a(errors="ignore")
325
+ id_b: IncrementalDecoder = decoder_b(errors="ignore")
326
+
327
+ character_match_count: int = 0
328
+
329
+ for i in range(255):
330
+ to_be_decoded: bytes = bytes([i])
331
+ if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
332
+ character_match_count += 1
333
+
334
+ return character_match_count / 254
335
+
336
+
337
+ def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
338
+ """
339
+ Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
340
+ the function cp_similarity.
341
+ """
342
+ return (
343
+ iana_name_a in IANA_SUPPORTED_SIMILAR
344
+ and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
345
+ )
346
+
347
+
348
+ def set_logging_handler(
349
+ name: str = "charset_normalizer",
350
+ level: int = logging.INFO,
351
+ format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
352
+ ) -> None:
353
+ logger = logging.getLogger(name)
354
+ logger.setLevel(level)
355
+
356
+ handler = logging.StreamHandler()
357
+ handler.setFormatter(logging.Formatter(format_string))
358
+ logger.addHandler(handler)
359
+
360
+
361
+ def cut_sequence_chunks(
362
+ sequences: bytes,
363
+ encoding_iana: str,
364
+ offsets: range,
365
+ chunk_size: int,
366
+ bom_or_sig_available: bool,
367
+ strip_sig_or_bom: bool,
368
+ sig_payload: bytes,
369
+ is_multi_byte_decoder: bool,
370
+ decoded_payload: str | None = None,
371
+ ) -> Generator[str, None, None]:
372
+ if decoded_payload and is_multi_byte_decoder is False:
373
+ for i in offsets:
374
+ chunk = decoded_payload[i : i + chunk_size]
375
+ if not chunk:
376
+ break
377
+ yield chunk
378
+ else:
379
+ for i in offsets:
380
+ chunk_end = i + chunk_size
381
+ if chunk_end > len(sequences) + 8:
382
+ continue
383
+
384
+ cut_sequence = sequences[i : i + chunk_size]
385
+
386
+ if bom_or_sig_available and strip_sig_or_bom is False:
387
+ cut_sequence = sig_payload + cut_sequence
388
+
389
+ chunk = cut_sequence.decode(
390
+ encoding_iana,
391
+ errors="ignore" if is_multi_byte_decoder else "strict",
392
+ )
393
+
394
+ # multi-byte bad cutting detector and adjustment
395
+ # not the cleanest way to perform that fix but clever enough for now.
396
+ if is_multi_byte_decoder and i > 0:
397
+ chunk_partial_size_chk: int = min(chunk_size, 16)
398
+
399
+ if (
400
+ decoded_payload
401
+ and chunk[:chunk_partial_size_chk] not in decoded_payload
402
+ ):
403
+ for j in range(i, i - 4, -1):
404
+ cut_sequence = sequences[j:chunk_end]
405
+
406
+ if bom_or_sig_available and strip_sig_or_bom is False:
407
+ cut_sequence = sig_payload + cut_sequence
408
+
409
+ chunk = cut_sequence.decode(encoding_iana, errors="ignore")
410
+
411
+ if chunk[:chunk_partial_size_chk] in decoded_payload:
412
+ break
413
+
414
+ yield chunk
venv/lib/python3.10/site-packages/charset_normalizer/version.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Expose version
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ __version__ = "3.4.3"
8
+ VERSION = __version__.split(".")
venv/lib/python3.10/site-packages/click_option_group/__init__.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ click-option-group
3
+ ~~~~~~~~~~~~~~~~~~
4
+
5
+ Option groups missing in Click
6
+
7
+ :copyright: © 2019-2020 by Eugene Prilepin
8
+ :license: BSD, see LICENSE for more details.
9
+ """
10
+
11
+ from ._core import (
12
+ AllOptionGroup,
13
+ GroupedOption,
14
+ MutuallyExclusiveOptionGroup,
15
+ OptionGroup,
16
+ RequiredAllOptionGroup,
17
+ RequiredAnyOptionGroup,
18
+ RequiredMutuallyExclusiveOptionGroup,
19
+ )
20
+ from ._decorators import optgroup
21
+ from ._version import __version__
22
+
23
+ __all__ = [
24
+ "__version__",
25
+ "optgroup",
26
+ "GroupedOption",
27
+ "OptionGroup",
28
+ "RequiredAnyOptionGroup",
29
+ "AllOptionGroup",
30
+ "RequiredAllOptionGroup",
31
+ "MutuallyExclusiveOptionGroup",
32
+ "RequiredMutuallyExclusiveOptionGroup",
33
+ ]
venv/lib/python3.10/site-packages/click_option_group/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (831 Bytes). View file
 
venv/lib/python3.10/site-packages/click_option_group/__pycache__/_core.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
venv/lib/python3.10/site-packages/click_option_group/__pycache__/_decorators.cpython-310.pyc ADDED
Binary file (8.06 kB). View file
 
venv/lib/python3.10/site-packages/click_option_group/__pycache__/_helpers.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
venv/lib/python3.10/site-packages/click_option_group/__pycache__/_version.cpython-310.pyc ADDED
Binary file (630 Bytes). View file
 
venv/lib/python3.10/site-packages/click_option_group/_core.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import inspect
3
+ import weakref
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Dict,
8
+ List,
9
+ Mapping,
10
+ Optional,
11
+ Sequence,
12
+ Set,
13
+ Tuple,
14
+ Union,
15
+ )
16
+
17
+ import click
18
+ from click.core import augment_usage_errors
19
+
20
+ from ._helpers import (
21
+ get_callback_and_params,
22
+ get_fake_option_name,
23
+ raise_mixing_decorators_error,
24
+ resolve_wrappers,
25
+ )
26
+
27
+ FC = Union[Callable, click.Command]
28
+
29
+
30
+ class GroupedOption(click.Option):
31
+ """Represents grouped (related) optional values
32
+
33
+ The class should be used only with `OptionGroup` class for creating grouped options.
34
+
35
+ :param param_decls: option declaration tuple
36
+ :param group: `OptionGroup` instance (the group for this option)
37
+ :param attrs: additional option attributes
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ param_decls: Optional[Sequence[str]] = None,
43
+ *,
44
+ group: "OptionGroup",
45
+ **attrs: Any,
46
+ ):
47
+ super().__init__(param_decls, **attrs)
48
+
49
+ for attr in group.forbidden_option_attrs:
50
+ if attr in attrs:
51
+ msg = f"'{attr}' attribute is not allowed for '{type(group).__name__}' option `{self.name}'."
52
+ raise TypeError(msg)
53
+
54
+ self.__group = group
55
+
56
+ @property
57
+ def group(self) -> "OptionGroup":
58
+ """Returns the reference to the group for this option
59
+
60
+ :return: `OptionGroup` the group instance for this option
61
+ """
62
+ return self.__group
63
+
64
+ def handle_parse_result(
65
+ self,
66
+ ctx: click.Context,
67
+ opts: Mapping[str, Any],
68
+ args: List[str],
69
+ ) -> Tuple[Any, List[str]]:
70
+ with augment_usage_errors(ctx, param=self):
71
+ if not ctx.resilient_parsing:
72
+ self.group.handle_parse_result(self, ctx, opts)
73
+ return super().handle_parse_result(ctx, opts, args)
74
+
75
+ def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]:
76
+ help_record = super().get_help_record(ctx)
77
+ if help_record is None:
78
+ # this happens if the option is hidden
79
+ return help_record
80
+
81
+ opts, opt_help = help_record
82
+
83
+ formatter = ctx.make_formatter()
84
+ with formatter.indentation():
85
+ indent = " " * formatter.current_indent
86
+ return f"{indent}{opts}", opt_help
87
+
88
+
89
+ class _GroupTitleFakeOption(click.Option):
90
+ """The helper `Option` class to display option group title in help"""
91
+
92
+ def __init__(
93
+ self,
94
+ param_decls: Optional[Sequence[str]] = None,
95
+ *,
96
+ group: "OptionGroup",
97
+ **attrs: Any,
98
+ ) -> None:
99
+ self.__group = group
100
+ super().__init__(param_decls, hidden=True, expose_value=False, help=group.help, **attrs)
101
+
102
+ # We remove parsed opts for the fake options just in case.
103
+ # For example it is workaround for correct click-repl autocomplete
104
+ self.opts = []
105
+ self.secondary_opts = []
106
+
107
+ def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]:
108
+ return self.__group.get_help_record(ctx)
109
+
110
+
111
+ class OptionGroup:
112
+ """Option group manages grouped (related) options
113
+
114
+ The class is used for creating the groups of options. The class can de used as based class to implement
115
+ specific behavior for grouped options.
116
+
117
+ :param name: the group name. If it is not set the default group name will be used
118
+ :param help: the group help text or None
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ name: Optional[str] = None,
124
+ *,
125
+ hidden: bool = False,
126
+ help: Optional[str] = None,
127
+ ) -> None:
128
+ self._name = name if name else ""
129
+ self._help = inspect.cleandoc(help if help else "")
130
+ self._hidden = hidden
131
+
132
+ self._options: Mapping[Any, Any] = collections.defaultdict(weakref.WeakValueDictionary)
133
+ self._group_title_options = weakref.WeakValueDictionary()
134
+
135
+ @property
136
+ def name(self) -> str:
137
+ """Returns the group name or empty string if it was not set
138
+
139
+ :return: group name
140
+ """
141
+ return self._name
142
+
143
+ @property
144
+ def help(self) -> str:
145
+ """Returns the group help or empty string if it was not set
146
+
147
+ :return: group help
148
+ """
149
+ return self._help
150
+
151
+ @property
152
+ def name_extra(self) -> List[str]:
153
+ """Returns extra name attributes for the group"""
154
+ return []
155
+
156
+ @property
157
+ def forbidden_option_attrs(self) -> List[str]:
158
+ """Returns the list of forbidden option attributes for the group"""
159
+ return []
160
+
161
+ def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]:
162
+ """Returns the help record for the group
163
+
164
+ :param ctx: Click Context object
165
+ :return: the tuple of two fileds: `(name, help)`
166
+ """
167
+ if all(o.hidden for o in self.get_options(ctx).values()):
168
+ return None
169
+
170
+ name = self.name
171
+ help_ = self.help if self.help else ""
172
+
173
+ extra = ", ".join(self.name_extra)
174
+ if extra:
175
+ extra = f"[{extra}]"
176
+
177
+ if name:
178
+ name = f"{name}: {extra}"
179
+ elif extra:
180
+ name = f"{extra}:"
181
+
182
+ if not name and not help_:
183
+ return None
184
+
185
+ return name, help_
186
+
187
+ def option(self, *param_decls: str, **attrs: Any) -> Callable:
188
+ """Decorator attaches a grouped option to the command
189
+
190
+ The decorator is used for adding options to the group and to the Click-command
191
+ """
192
+
193
+ def decorator(func: FC) -> FC:
194
+ option_attrs = attrs.copy()
195
+ option_attrs.setdefault("cls", GroupedOption)
196
+ if self._hidden:
197
+ option_attrs.setdefault("hidden", self._hidden)
198
+
199
+ if not issubclass(option_attrs["cls"], GroupedOption):
200
+ msg = "'cls' argument must be a subclass of 'GroupedOption' class."
201
+ raise TypeError(msg)
202
+
203
+ self._check_mixing_decorators(func)
204
+ func = click.option(*param_decls, group=self, **option_attrs)(func)
205
+ self._option_memo(func)
206
+
207
+ # Add the fake invisible option to use for print nice title help for grouped options
208
+ self._add_title_fake_option(func)
209
+
210
+ return func
211
+
212
+ return decorator
213
+
214
+ def get_options(self, ctx: click.Context) -> Dict[str, GroupedOption]:
215
+ """Returns the dictionary with group options"""
216
+ return self._options.get(resolve_wrappers(ctx.command.callback), {})
217
+
218
+ def get_option_names(self, ctx: click.Context) -> List[str]:
219
+ """Returns the list with option names ordered by addition in the group"""
220
+ return list(reversed(list(self.get_options(ctx))))
221
+
222
+ def get_error_hint(self, ctx: click.Context, option_names: Optional[Set[str]] = None) -> str:
223
+ options = self.get_options(ctx)
224
+ text = ""
225
+
226
+ for name, opt in reversed(list(options.items())):
227
+ if option_names and name not in option_names:
228
+ continue
229
+ text += f" {opt.get_error_hint(ctx)}\n"
230
+
231
+ if text:
232
+ text = text[:-1]
233
+
234
+ return text
235
+
236
+ def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None:
237
+ """The method should be used for adding specific behavior and relation for options in the group"""
238
+
239
+ def _check_mixing_decorators(self, func: Callable) -> None:
240
+ func, params = get_callback_and_params(func)
241
+
242
+ if not params or func not in self._options:
243
+ return
244
+
245
+ last_param = params[-1]
246
+ title_option = self._group_title_options[func]
247
+ options = self._options[func]
248
+
249
+ if last_param.name != title_option.name and last_param.name not in options:
250
+ raise_mixing_decorators_error(last_param, func)
251
+
252
+ def _add_title_fake_option(self, func: FC) -> None:
253
+ callback, params = get_callback_and_params(func)
254
+
255
+ if callback not in self._group_title_options:
256
+ func = click.option(get_fake_option_name(), group=self, cls=_GroupTitleFakeOption)(func)
257
+
258
+ _, params = get_callback_and_params(func)
259
+ self._group_title_options[callback] = params[-1]
260
+
261
+ title_option = self._group_title_options[callback]
262
+ last_option = params[-1]
263
+
264
+ if title_option.name != last_option.name:
265
+ # Hold title fake option on the top of the option group
266
+ title_index = params.index(title_option)
267
+ params[-1], params[title_index] = params[title_index], params[-1]
268
+
269
+ def _option_memo(self, func: Callable) -> None:
270
+ func, params = get_callback_and_params(func)
271
+ option = params[-1]
272
+ self._options[func][option.name] = option
273
+
274
+ def _group_name_str(self) -> str:
275
+ return f"'{self.name}'" if self.name else "the"
276
+
277
+
278
+ class RequiredAnyOptionGroup(OptionGroup):
279
+ """Option group with required any options of this group
280
+
281
+ `RequiredAnyOptionGroup` defines the behavior: At least one option from the group must be set.
282
+ """
283
+
284
+ @property
285
+ def forbidden_option_attrs(self) -> List[str]:
286
+ return ["required"]
287
+
288
+ @property
289
+ def name_extra(self) -> List[str]:
290
+ return [*super().name_extra, "required_any"]
291
+
292
+ def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None:
293
+ if option.name in opts:
294
+ return
295
+
296
+ if all(o.hidden for o in self.get_options(ctx).values()):
297
+ cls_name = self.__class__.__name__
298
+ group_name = self._group_name_str()
299
+
300
+ msg = f"Need at least one non-hidden option in {group_name} option group ({cls_name})."
301
+ raise TypeError(msg)
302
+
303
+ option_names = set(self.get_options(ctx))
304
+
305
+ if not option_names.intersection(opts):
306
+ group_name = self._group_name_str()
307
+ option_info = self.get_error_hint(ctx)
308
+
309
+ msg = f"At least one of the following options from {group_name} option group is required:\n{option_info}"
310
+ raise click.UsageError(
311
+ msg,
312
+ ctx=ctx,
313
+ )
314
+
315
+
316
+ class RequiredAllOptionGroup(OptionGroup):
317
+ """Option group with required all options of this group
318
+
319
+ `RequiredAllOptionGroup` defines the behavior: All options from the group must be set.
320
+ """
321
+
322
+ @property
323
+ def forbidden_option_attrs(self) -> List[str]:
324
+ return ["required", "hidden"]
325
+
326
+ @property
327
+ def name_extra(self) -> List[str]:
328
+ return [*super().name_extra, "required_all"]
329
+
330
+ def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None:
331
+ option_names = set(self.get_options(ctx))
332
+
333
+ if not option_names.issubset(opts):
334
+ group_name = self._group_name_str()
335
+ required_names = option_names.difference(option_names.intersection(opts))
336
+ option_info = self.get_error_hint(ctx, required_names)
337
+
338
+ msg = f"Missing required options from {group_name} option group:\n{option_info}"
339
+ raise click.UsageError(
340
+ msg,
341
+ ctx=ctx,
342
+ )
343
+
344
+
345
+ class MutuallyExclusiveOptionGroup(OptionGroup):
346
+ """Option group with mutually exclusive behavior for grouped options
347
+
348
+ `MutuallyExclusiveOptionGroup` defines the behavior:
349
+ - Only one or none option from the group must be set
350
+ """
351
+
352
+ @property
353
+ def forbidden_option_attrs(self) -> List[str]:
354
+ return ["required"]
355
+
356
+ @property
357
+ def name_extra(self) -> List[str]:
358
+ return [*super().name_extra, "mutually_exclusive"]
359
+
360
+ def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None:
361
+ option_names = set(self.get_options(ctx))
362
+ given_option_names = option_names.intersection(opts)
363
+ given_option_count = len(given_option_names)
364
+
365
+ if given_option_count > 1:
366
+ group_name = self._group_name_str()
367
+ option_info = self.get_error_hint(ctx, given_option_names)
368
+
369
+ msg = f"Mutually exclusive options from {group_name} option group cannot be used at the same time:\n{option_info}"
370
+ raise click.UsageError(
371
+ msg,
372
+ ctx=ctx,
373
+ )
374
+
375
+
376
+ class RequiredMutuallyExclusiveOptionGroup(MutuallyExclusiveOptionGroup):
377
+ """Option group with required and mutually exclusive behavior for grouped options
378
+
379
+ `RequiredMutuallyExclusiveOptionGroup` defines the behavior:
380
+ - Only one required option from the group must be set
381
+ """
382
+
383
+ @property
384
+ def name_extra(self) -> List[str]:
385
+ return [*super().name_extra, "required"]
386
+
387
+ def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None:
388
+ super().handle_parse_result(option, ctx, opts)
389
+
390
+ option_names = set(self.get_option_names(ctx))
391
+ given_option_names = option_names.intersection(opts)
392
+
393
+ if len(given_option_names) == 0:
394
+ group_name = self._group_name_str()
395
+ option_info = self.get_error_hint(ctx)
396
+
397
+ msg = (
398
+ f"Missing one of the required mutually exclusive options from {group_name} option group:\n{option_info}"
399
+ )
400
+ raise click.UsageError(
401
+ msg,
402
+ ctx=ctx,
403
+ )
404
+
405
+
406
+ class AllOptionGroup(OptionGroup):
407
+ """Option group with required all/none options of this group
408
+
409
+ `AllOptionGroup` defines the behavior:
410
+ - All options from the group must be set or None must be set
411
+ """
412
+
413
+ @property
414
+ def forbidden_option_attrs(self) -> List[str]:
415
+ return ["required", "hidden"]
416
+
417
+ @property
418
+ def name_extra(self) -> List[str]:
419
+ return [*super().name_extra, "all_or_none"]
420
+
421
+ def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None:
422
+ option_names = set(self.get_options(ctx))
423
+
424
+ if not option_names.isdisjoint(opts) and option_names.intersection(opts) != option_names:
425
+ group_name = self._group_name_str()
426
+ option_info = self.get_error_hint(ctx)
427
+
428
+ msg = f"All options from {group_name} option group should be specified or none should be specified. Missing required options:\n{option_info}"
429
+ raise click.UsageError(
430
+ msg,
431
+ ctx=ctx,
432
+ )
venv/lib/python3.10/site-packages/click_option_group/_decorators.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import inspect
3
+ import warnings
4
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Type, TypeVar
5
+
6
+ import click
7
+
8
+ from ._core import OptionGroup
9
+ from ._helpers import (
10
+ get_callback_and_params,
11
+ raise_mixing_decorators_error,
12
+ )
13
+
14
+ T = TypeVar("T")
15
+ F = TypeVar("F", bound=Callable)
16
+
17
+ Decorator = Callable[[F], F]
18
+
19
+
20
+ class OptionStackItem(NamedTuple):
21
+ param_decls: Tuple[str, ...]
22
+ attrs: Dict[str, Any]
23
+ param_count: int
24
+
25
+
26
+ class _NotAttachedOption(click.Option):
27
+ """The helper class to catch grouped options which were not attached to the group
28
+
29
+ Raises TypeError if not attached options exist.
30
+ """
31
+
32
+ def __init__(self, param_decls=None, *, all_not_attached_options, **attrs):
33
+ super().__init__(param_decls, expose_value=False, hidden=False, is_eager=True, **attrs)
34
+ self._all_not_attached_options = all_not_attached_options
35
+
36
+ def handle_parse_result(self, ctx, opts, args):
37
+ options_error_hint = ""
38
+ for option in reversed(self._all_not_attached_options[ctx.command.callback]):
39
+ options_error_hint += f" {option.get_error_hint(ctx)}\n"
40
+ options_error_hint = options_error_hint[:-1]
41
+
42
+ msg = f"Missing option group decorator in '{ctx.command.name}' command for the following grouped options:\n{options_error_hint}\n"
43
+ raise TypeError(msg)
44
+
45
+
46
+ class _OptGroup:
47
+ """A helper class to manage creating groups and group options via decorators
48
+
49
+ The class provides two decorator-methods: `group`/`__call__` and `option`.
50
+ These decorators should be used for adding grouped options. The class have
51
+ single global instance `optgroup` that should be used in most cases.
52
+
53
+ The example of usage::
54
+
55
+ ...
56
+ @optgroup('Group 1', help='option group 1')
57
+ @optgroup.option('--foo')
58
+ @optgroup.option('--bar')
59
+ @optgroup.group('Group 2', help='option group 2')
60
+ @optgroup.option('--spam')
61
+ ...
62
+ """
63
+
64
+ def __init__(self) -> None:
65
+ self._decorating_state: Dict[Callable, List[OptionStackItem]] = collections.defaultdict(list)
66
+ self._not_attached_options: Dict[Callable, List[click.Option]] = collections.defaultdict(list)
67
+ self._outer_frame_index = 1
68
+
69
+ def __call__(
70
+ self,
71
+ name: Optional[str] = None,
72
+ *,
73
+ help: Optional[str] = None,
74
+ cls: Optional[Type[OptionGroup]] = None,
75
+ **attrs,
76
+ ):
77
+ """Creates a new group and collects its options
78
+
79
+ Creates the option group and registers all grouped options
80
+ which were added by `option` decorator.
81
+
82
+ :param name: Group name or None for default name
83
+ :param help: Group help or None for empty help
84
+ :param cls: Option group class that should be inherited from `OptionGroup` class
85
+ :param attrs: Additional parameters of option group class
86
+ """
87
+ try:
88
+ self._outer_frame_index = 2
89
+ return self.group(name, help=help, cls=cls, **attrs)
90
+ finally:
91
+ self._outer_frame_index = 1
92
+
93
+ def group(
94
+ self,
95
+ name: Optional[str] = None,
96
+ *,
97
+ help: Optional[str] = None,
98
+ cls: Optional[Type[OptionGroup]] = None,
99
+ **attrs,
100
+ ) -> Decorator:
101
+ """The decorator creates a new group and collects its options
102
+
103
+ Creates the option group and registers all grouped options
104
+ which were added by `option` decorator.
105
+
106
+ :param name: Group name or None for default name
107
+ :param help: Group help or None for empty help
108
+ :param cls: Option group class that should be inherited from `OptionGroup` class
109
+ :param attrs: Additional parameters of option group class
110
+ """
111
+
112
+ if not cls:
113
+ cls = OptionGroup
114
+ elif not issubclass(cls, OptionGroup):
115
+ msg = "'cls' must be a subclass of 'OptionGroup' class."
116
+ raise TypeError(msg)
117
+
118
+ def decorator(func):
119
+ callback, params = get_callback_and_params(func)
120
+
121
+ if callback not in self._decorating_state:
122
+ frame = inspect.getouterframes(inspect.currentframe())[self._outer_frame_index]
123
+ lineno = frame.lineno
124
+
125
+ with_name = f' "{name}"' if name else ""
126
+ warnings.warn(
127
+ (
128
+ f"The empty option group{with_name} was found (line {lineno}) "
129
+ f'for "{callback.__name__}". The group will not be added.'
130
+ ),
131
+ RuntimeWarning,
132
+ stacklevel=2,
133
+ )
134
+ return func
135
+
136
+ option_stack = self._decorating_state.pop(callback)
137
+
138
+ [params.remove(opt) for opt in self._not_attached_options.pop(callback)]
139
+ self._check_mixing_decorators(callback, option_stack, self._filter_not_attached(params))
140
+
141
+ attrs["help"] = help
142
+
143
+ try:
144
+ option_group = cls(name, **attrs)
145
+ except TypeError as err:
146
+ message = str(err).replace("__init__()", f"'{cls.__name__}' constructor")
147
+ raise TypeError(message) from err
148
+
149
+ for item in option_stack:
150
+ func = option_group.option(*item.param_decls, **item.attrs)(func)
151
+
152
+ return func
153
+
154
+ return decorator
155
+
156
+ def option(self, *param_decls, **attrs) -> Decorator:
157
+ """The decorator adds a new option to the group
158
+
159
+ The decorator is lazy. It adds option decls and attrs.
160
+ All options will be registered by `group` decorator.
161
+
162
+ :param param_decls: option declaration tuple
163
+ :param attrs: additional option attributes and parameters
164
+ """
165
+
166
+ def decorator(func):
167
+ callback, params = get_callback_and_params(func)
168
+
169
+ option_stack = self._decorating_state[callback]
170
+ params = self._filter_not_attached(params)
171
+
172
+ self._check_mixing_decorators(callback, option_stack, params)
173
+ self._add_not_attached_option(func, param_decls)
174
+ option_stack.append(OptionStackItem(param_decls, attrs, len(params)))
175
+
176
+ return func
177
+
178
+ return decorator
179
+
180
+ def help_option(self, *param_decls, **attrs) -> Decorator:
181
+ """This decorator adds a help option to the group, which prints
182
+ the command's help text and exits.
183
+ """
184
+ if not param_decls:
185
+ param_decls = ("--help",)
186
+
187
+ attrs.setdefault("is_flag", True)
188
+ attrs.setdefault("is_eager", True)
189
+ attrs.setdefault("expose_value", False)
190
+ attrs.setdefault("help", "Show this message and exit.")
191
+
192
+ if "callback" not in attrs:
193
+
194
+ def callback(ctx, _, value):
195
+ if not value or ctx.resilient_parsing:
196
+ return
197
+ click.echo(ctx.get_help(), color=ctx.color)
198
+ ctx.exit()
199
+
200
+ attrs["callback"] = callback
201
+
202
+ return self.option(*param_decls, **attrs)
203
+
204
+ def _add_not_attached_option(self, func, param_decls) -> None:
205
+ click.option(
206
+ *param_decls,
207
+ all_not_attached_options=self._not_attached_options,
208
+ cls=_NotAttachedOption,
209
+ )(func)
210
+
211
+ callback, params = get_callback_and_params(func)
212
+ self._not_attached_options[callback].append(params[-1])
213
+
214
+ @staticmethod
215
+ def _filter_not_attached(options: List[T]) -> List[T]:
216
+ return [opt for opt in options if not isinstance(opt, _NotAttachedOption)]
217
+
218
+ @staticmethod
219
+ def _check_mixing_decorators(callback, options_stack, params):
220
+ if options_stack:
221
+ last_state = options_stack[-1]
222
+
223
+ if len(params) > last_state.param_count:
224
+ raise_mixing_decorators_error(params[-1], callback)
225
+
226
+
227
+ optgroup = _OptGroup()
228
+ """Provides decorators for creating option groups and adding grouped options
229
+
230
+ Decorators:
231
+ - `group` is used for creating an option group
232
+ - `option` is used for adding options to a group
233
+
234
+ Example::
235
+
236
+ @optgroup.group('Group 1', help='option group 1')
237
+ @optgroup.option('--foo')
238
+ @optgroup.option('--bar')
239
+ @optgroup.group('Group 2', help='option group 2')
240
+ @optgroup.option('--spam')
241
+ """
venv/lib/python3.10/site-packages/click_option_group/_helpers.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import string
3
+ from typing import Callable, List, NoReturn, Tuple, TypeVar
4
+
5
+ import click
6
+
7
+ F = TypeVar("F", bound=Callable)
8
+
9
+ FAKE_OPT_NAME_LEN = 30
10
+
11
+
12
+ def get_callback_and_params(func) -> Tuple[Callable, List[click.Option]]:
13
+ """Returns callback function and its parameters list
14
+
15
+ :param func: decorated function or click Command
16
+ :return: (callback, params)
17
+ """
18
+ if isinstance(func, click.Command):
19
+ params = func.params
20
+ func = func.callback
21
+ else:
22
+ params = getattr(func, "__click_params__", [])
23
+
24
+ func = resolve_wrappers(func)
25
+ return func, params
26
+
27
+
28
+ def get_fake_option_name(name_len: int = FAKE_OPT_NAME_LEN, prefix: str = "fake") -> str:
29
+ return f"--{prefix}-" + "".join(random.choices(string.ascii_lowercase, k=name_len))
30
+
31
+
32
+ def raise_mixing_decorators_error(wrong_option: click.Option, callback: Callable) -> NoReturn:
33
+ error_hint = wrong_option.opts or [wrong_option.name]
34
+
35
+ msg = f"Grouped options must not be mixed with regular parameters while adding by decorator. Check decorator position for {error_hint} option in '{callback.__name__}'."
36
+ raise TypeError(msg)
37
+
38
+
39
+ def resolve_wrappers(f: F) -> F:
40
+ """Get the underlying function behind any level of function wrappers."""
41
+ return resolve_wrappers(f.__wrapped__) if hasattr(f, "__wrapped__") else f
venv/lib/python3.10/site-packages/click_option_group/_version.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
5
+
6
+ TYPE_CHECKING = False
7
+ if TYPE_CHECKING:
8
+ from typing import Tuple
9
+ from typing import Union
10
+
11
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
12
+ else:
13
+ VERSION_TUPLE = object
14
+
15
+ version: str
16
+ __version__: str
17
+ __version_tuple__: VERSION_TUPLE
18
+ version_tuple: VERSION_TUPLE
19
+
20
+ __version__ = version = '0.5.7'
21
+ __version_tuple__ = version_tuple = (0, 5, 7)
venv/lib/python3.10/site-packages/click_option_group/py.typed ADDED
File without changes