Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- wemm/lib/python3.10/site-packages/Crypto/IO/PKCS8.pyi +17 -0
- wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/PKCS8.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.py +799 -0
- wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.pyi +3 -0
- wemm/lib/python3.10/site-packages/Crypto/Math/__init__.py +0 -0
- wemm/lib/python3.10/site-packages/Crypto/Signature/PKCS1_PSS.pyi +28 -0
- wemm/lib/python3.10/site-packages/Crypto/Signature/pss.pyi +30 -0
- wemm/lib/python3.10/site-packages/Crypto/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER +1 -0
- wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE +21 -0
- wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2014-05-31/endpoint-rule-set-1.json.gz +3 -0
- wemm/lib/python3.10/site-packages/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json.gz +3 -0
- wemm/lib/python3.10/site-packages/tokenizers/__init__.pyi +1200 -0
- wemm/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi +271 -0
- wemm/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py +122 -0
- wemm/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py +150 -0
- wemm/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py +196 -0
- wemm/lib/python3.10/site-packages/tokenizers/models/__init__.py +8 -0
- wemm/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi +595 -0
- wemm/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.pyi +607 -0
- wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.py +9 -0
- wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi +342 -0
- wemm/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so +3 -0
- wemm/lib/python3.10/site-packages/tokenizers/tools/__init__.py +1 -0
- wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css +170 -0
- wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer.py +403 -0
- wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.py +8 -0
- wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi +156 -0
- wemm/lib/python3.10/site-packages/torchvision/__init__.py +114 -0
- wemm/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py +58 -0
- wemm/lib/python3.10/site-packages/torchvision/_utils.py +32 -0
- wemm/lib/python3.10/site-packages/torchvision/datapoints/__init__.py +12 -0
- wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_datapoint.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_image.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_mask.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datapoints/_datapoint.py +259 -0
- wemm/lib/python3.10/site-packages/torchvision/datapoints/_image.py +260 -0
.gitattributes
CHANGED
|
@@ -183,3 +183,4 @@ parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
|
|
| 183 |
parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
|
| 184 |
parrot/lib/libsqlite3.so filter=lfs diff=lfs merge=lfs -text
|
| 185 |
parrot/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 183 |
parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
|
| 184 |
parrot/lib/libsqlite3.so filter=lfs diff=lfs merge=lfs -text
|
| 185 |
parrot/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text
|
| 186 |
+
wemm/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
wemm/lib/python3.10/site-packages/Crypto/IO/PKCS8.pyi
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Tuple, Optional, Union, Callable
|
| 2 |
+
from typing_extensions import NotRequired
|
| 3 |
+
|
| 4 |
+
from Crypto.Util.asn1 import DerObject
|
| 5 |
+
from Crypto.IO._PBES import ProtParams
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def wrap(private_key: bytes,
|
| 9 |
+
key_oid: str,
|
| 10 |
+
passphrase: Union[bytes, str] = ...,
|
| 11 |
+
protection: str = ...,
|
| 12 |
+
prot_params: Optional[ProtParams] = ...,
|
| 13 |
+
key_params: Optional[DerObject] = ...,
|
| 14 |
+
randfunc: Optional[Callable[[int], str]] = ...) -> bytes: ...
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def unwrap(p8_private_key: bytes, passphrase: Optional[Union[bytes, str]] = ...) -> Tuple[str, bytes, Optional[bytes]]: ...
|
wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/PKCS8.cpython-310.pyc
ADDED
|
Binary file (4.2 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (185 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.py
ADDED
|
@@ -0,0 +1,799 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ===================================================================
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Redistribution and use in source and binary forms, with or without
|
| 7 |
+
# modification, are permitted provided that the following conditions
|
| 8 |
+
# are met:
|
| 9 |
+
#
|
| 10 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 11 |
+
# notice, this list of conditions and the following disclaimer.
|
| 12 |
+
# 2. Redistributions in binary form must reproduce the above copyright
|
| 13 |
+
# notice, this list of conditions and the following disclaimer in
|
| 14 |
+
# the documentation and/or other materials provided with the
|
| 15 |
+
# distribution.
|
| 16 |
+
#
|
| 17 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 18 |
+
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 19 |
+
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
| 20 |
+
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
| 21 |
+
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
| 22 |
+
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
| 23 |
+
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 24 |
+
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 25 |
+
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
| 26 |
+
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
| 27 |
+
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 28 |
+
# POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
# ===================================================================
|
| 30 |
+
|
| 31 |
+
import sys
|
| 32 |
+
import struct
|
| 33 |
+
|
| 34 |
+
from Crypto.Util.py3compat import is_native_int
|
| 35 |
+
|
| 36 |
+
from Crypto.Util._raw_api import (backend, load_lib,
|
| 37 |
+
c_ulong, c_size_t, c_uint8_ptr)
|
| 38 |
+
|
| 39 |
+
from ._IntegerBase import IntegerBase
|
| 40 |
+
|
| 41 |
+
gmp_defs = """typedef unsigned long UNIX_ULONG;
|
| 42 |
+
typedef struct { int a; int b; void *c; } MPZ;
|
| 43 |
+
typedef MPZ mpz_t[1];
|
| 44 |
+
typedef UNIX_ULONG mp_bitcnt_t;
|
| 45 |
+
|
| 46 |
+
void __gmpz_init (mpz_t x);
|
| 47 |
+
void __gmpz_init_set (mpz_t rop, const mpz_t op);
|
| 48 |
+
void __gmpz_init_set_ui (mpz_t rop, UNIX_ULONG op);
|
| 49 |
+
|
| 50 |
+
UNIX_ULONG __gmpz_get_ui (const mpz_t op);
|
| 51 |
+
void __gmpz_set (mpz_t rop, const mpz_t op);
|
| 52 |
+
void __gmpz_set_ui (mpz_t rop, UNIX_ULONG op);
|
| 53 |
+
void __gmpz_add (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 54 |
+
void __gmpz_add_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2);
|
| 55 |
+
void __gmpz_sub_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2);
|
| 56 |
+
void __gmpz_addmul (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 57 |
+
void __gmpz_addmul_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2);
|
| 58 |
+
void __gmpz_submul_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2);
|
| 59 |
+
void __gmpz_import (mpz_t rop, size_t count, int order, size_t size,
|
| 60 |
+
int endian, size_t nails, const void *op);
|
| 61 |
+
void * __gmpz_export (void *rop, size_t *countp, int order,
|
| 62 |
+
size_t size,
|
| 63 |
+
int endian, size_t nails, const mpz_t op);
|
| 64 |
+
size_t __gmpz_sizeinbase (const mpz_t op, int base);
|
| 65 |
+
void __gmpz_sub (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 66 |
+
void __gmpz_mul (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 67 |
+
void __gmpz_mul_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2);
|
| 68 |
+
int __gmpz_cmp (const mpz_t op1, const mpz_t op2);
|
| 69 |
+
void __gmpz_powm (mpz_t rop, const mpz_t base, const mpz_t exp, const
|
| 70 |
+
mpz_t mod);
|
| 71 |
+
void __gmpz_powm_ui (mpz_t rop, const mpz_t base, UNIX_ULONG exp,
|
| 72 |
+
const mpz_t mod);
|
| 73 |
+
void __gmpz_pow_ui (mpz_t rop, const mpz_t base, UNIX_ULONG exp);
|
| 74 |
+
void __gmpz_sqrt(mpz_t rop, const mpz_t op);
|
| 75 |
+
void __gmpz_mod (mpz_t r, const mpz_t n, const mpz_t d);
|
| 76 |
+
void __gmpz_neg (mpz_t rop, const mpz_t op);
|
| 77 |
+
void __gmpz_abs (mpz_t rop, const mpz_t op);
|
| 78 |
+
void __gmpz_and (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 79 |
+
void __gmpz_ior (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 80 |
+
void __gmpz_clear (mpz_t x);
|
| 81 |
+
void __gmpz_tdiv_q_2exp (mpz_t q, const mpz_t n, mp_bitcnt_t b);
|
| 82 |
+
void __gmpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d);
|
| 83 |
+
void __gmpz_mul_2exp (mpz_t rop, const mpz_t op1, mp_bitcnt_t op2);
|
| 84 |
+
int __gmpz_tstbit (const mpz_t op, mp_bitcnt_t bit_index);
|
| 85 |
+
int __gmpz_perfect_square_p (const mpz_t op);
|
| 86 |
+
int __gmpz_jacobi (const mpz_t a, const mpz_t b);
|
| 87 |
+
void __gmpz_gcd (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 88 |
+
UNIX_ULONG __gmpz_gcd_ui (mpz_t rop, const mpz_t op1,
|
| 89 |
+
UNIX_ULONG op2);
|
| 90 |
+
void __gmpz_lcm (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 91 |
+
int __gmpz_invert (mpz_t rop, const mpz_t op1, const mpz_t op2);
|
| 92 |
+
int __gmpz_divisible_p (const mpz_t n, const mpz_t d);
|
| 93 |
+
int __gmpz_divisible_ui_p (const mpz_t n, UNIX_ULONG d);
|
| 94 |
+
|
| 95 |
+
size_t __gmpz_size (const mpz_t op);
|
| 96 |
+
UNIX_ULONG __gmpz_getlimbn (const mpz_t op, size_t n);
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
if sys.platform == "win32":
|
| 100 |
+
raise ImportError("Not using GMP on Windows")
|
| 101 |
+
|
| 102 |
+
lib = load_lib("gmp", gmp_defs)
|
| 103 |
+
implementation = {"library": "gmp", "api": backend}
|
| 104 |
+
|
| 105 |
+
if hasattr(lib, "__mpir_version"):
|
| 106 |
+
raise ImportError("MPIR library detected")
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# Lazy creation of GMP methods
|
| 110 |
+
class _GMP(object):
|
| 111 |
+
|
| 112 |
+
def __getattr__(self, name):
|
| 113 |
+
if name.startswith("mpz_"):
|
| 114 |
+
func_name = "__gmpz_" + name[4:]
|
| 115 |
+
elif name.startswith("gmp_"):
|
| 116 |
+
func_name = "__gmp_" + name[4:]
|
| 117 |
+
else:
|
| 118 |
+
raise AttributeError("Attribute %s is invalid" % name)
|
| 119 |
+
func = getattr(lib, func_name)
|
| 120 |
+
setattr(self, name, func)
|
| 121 |
+
return func
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
_gmp = _GMP()
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
# In order to create a function that returns a pointer to
|
| 128 |
+
# a new MPZ structure, we need to break the abstraction
|
| 129 |
+
# and know exactly what ffi backend we have
|
| 130 |
+
if implementation["api"] == "ctypes":
|
| 131 |
+
from ctypes import Structure, c_int, c_void_p, byref
|
| 132 |
+
|
| 133 |
+
class _MPZ(Structure):
|
| 134 |
+
_fields_ = [('_mp_alloc', c_int),
|
| 135 |
+
('_mp_size', c_int),
|
| 136 |
+
('_mp_d', c_void_p)]
|
| 137 |
+
|
| 138 |
+
def new_mpz():
|
| 139 |
+
return byref(_MPZ())
|
| 140 |
+
|
| 141 |
+
_gmp.mpz_getlimbn.restype = c_ulong
|
| 142 |
+
|
| 143 |
+
else:
|
| 144 |
+
# We are using CFFI
|
| 145 |
+
from Crypto.Util._raw_api import ffi
|
| 146 |
+
|
| 147 |
+
def new_mpz():
|
| 148 |
+
return ffi.new("MPZ*")
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
# Size of a native word
|
| 152 |
+
_sys_bits = 8 * struct.calcsize("P")
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class IntegerGMP(IntegerBase):
|
| 156 |
+
"""A fast, arbitrary precision integer"""
|
| 157 |
+
|
| 158 |
+
_zero_mpz_p = new_mpz()
|
| 159 |
+
_gmp.mpz_init_set_ui(_zero_mpz_p, c_ulong(0))
|
| 160 |
+
|
| 161 |
+
def __init__(self, value):
|
| 162 |
+
"""Initialize the integer to the given value."""
|
| 163 |
+
|
| 164 |
+
self._mpz_p = new_mpz()
|
| 165 |
+
self._initialized = False
|
| 166 |
+
|
| 167 |
+
if isinstance(value, float):
|
| 168 |
+
raise ValueError("A floating point type is not a natural number")
|
| 169 |
+
|
| 170 |
+
if is_native_int(value):
|
| 171 |
+
_gmp.mpz_init(self._mpz_p)
|
| 172 |
+
self._initialized = True
|
| 173 |
+
if value == 0:
|
| 174 |
+
return
|
| 175 |
+
|
| 176 |
+
tmp = new_mpz()
|
| 177 |
+
_gmp.mpz_init(tmp)
|
| 178 |
+
|
| 179 |
+
try:
|
| 180 |
+
positive = value >= 0
|
| 181 |
+
reduce = abs(value)
|
| 182 |
+
slots = (reduce.bit_length() - 1) // 32 + 1
|
| 183 |
+
|
| 184 |
+
while slots > 0:
|
| 185 |
+
slots = slots - 1
|
| 186 |
+
_gmp.mpz_set_ui(tmp,
|
| 187 |
+
c_ulong(0xFFFFFFFF & (reduce >> (slots * 32))))
|
| 188 |
+
_gmp.mpz_mul_2exp(tmp, tmp, c_ulong(slots * 32))
|
| 189 |
+
_gmp.mpz_add(self._mpz_p, self._mpz_p, tmp)
|
| 190 |
+
finally:
|
| 191 |
+
_gmp.mpz_clear(tmp)
|
| 192 |
+
|
| 193 |
+
if not positive:
|
| 194 |
+
_gmp.mpz_neg(self._mpz_p, self._mpz_p)
|
| 195 |
+
|
| 196 |
+
elif isinstance(value, IntegerGMP):
|
| 197 |
+
_gmp.mpz_init_set(self._mpz_p, value._mpz_p)
|
| 198 |
+
self._initialized = True
|
| 199 |
+
else:
|
| 200 |
+
raise NotImplementedError
|
| 201 |
+
|
| 202 |
+
# Conversions
|
| 203 |
+
def __int__(self):
|
| 204 |
+
tmp = new_mpz()
|
| 205 |
+
_gmp.mpz_init_set(tmp, self._mpz_p)
|
| 206 |
+
|
| 207 |
+
try:
|
| 208 |
+
value = 0
|
| 209 |
+
slot = 0
|
| 210 |
+
while _gmp.mpz_cmp(tmp, self._zero_mpz_p) != 0:
|
| 211 |
+
lsb = _gmp.mpz_get_ui(tmp) & 0xFFFFFFFF
|
| 212 |
+
value |= lsb << (slot * 32)
|
| 213 |
+
_gmp.mpz_tdiv_q_2exp(tmp, tmp, c_ulong(32))
|
| 214 |
+
slot = slot + 1
|
| 215 |
+
finally:
|
| 216 |
+
_gmp.mpz_clear(tmp)
|
| 217 |
+
|
| 218 |
+
if self < 0:
|
| 219 |
+
value = -value
|
| 220 |
+
return int(value)
|
| 221 |
+
|
| 222 |
+
def __str__(self):
|
| 223 |
+
return str(int(self))
|
| 224 |
+
|
| 225 |
+
def __repr__(self):
|
| 226 |
+
return "Integer(%s)" % str(self)
|
| 227 |
+
|
| 228 |
+
# Only Python 2.x
|
| 229 |
+
def __hex__(self):
|
| 230 |
+
return hex(int(self))
|
| 231 |
+
|
| 232 |
+
# Only Python 3.x
|
| 233 |
+
def __index__(self):
|
| 234 |
+
return int(self)
|
| 235 |
+
|
| 236 |
+
def to_bytes(self, block_size=0, byteorder='big'):
|
| 237 |
+
"""Convert the number into a byte string.
|
| 238 |
+
|
| 239 |
+
This method encodes the number in network order and prepends
|
| 240 |
+
as many zero bytes as required. It only works for non-negative
|
| 241 |
+
values.
|
| 242 |
+
|
| 243 |
+
:Parameters:
|
| 244 |
+
block_size : integer
|
| 245 |
+
The exact size the output byte string must have.
|
| 246 |
+
If zero, the string has the minimal length.
|
| 247 |
+
byteorder : string
|
| 248 |
+
'big' for big-endian integers (default), 'little' for litte-endian.
|
| 249 |
+
:Returns:
|
| 250 |
+
A byte string.
|
| 251 |
+
:Raise ValueError:
|
| 252 |
+
If the value is negative or if ``block_size`` is
|
| 253 |
+
provided and the length of the byte string would exceed it.
|
| 254 |
+
"""
|
| 255 |
+
|
| 256 |
+
if self < 0:
|
| 257 |
+
raise ValueError("Conversion only valid for non-negative numbers")
|
| 258 |
+
|
| 259 |
+
num_limbs = _gmp.mpz_size(self._mpz_p)
|
| 260 |
+
if _sys_bits == 32:
|
| 261 |
+
spchar = "L"
|
| 262 |
+
num_limbs = max(1, num_limbs, (block_size + 3) // 4)
|
| 263 |
+
elif _sys_bits == 64:
|
| 264 |
+
spchar = "Q"
|
| 265 |
+
num_limbs = max(1, num_limbs, (block_size + 7) // 8)
|
| 266 |
+
else:
|
| 267 |
+
raise ValueError("Unknown limb size")
|
| 268 |
+
|
| 269 |
+
# mpz_getlimbn returns 0 if i is larger than the number of actual limbs
|
| 270 |
+
limbs = [_gmp.mpz_getlimbn(self._mpz_p, num_limbs - i - 1) for i in range(num_limbs)]
|
| 271 |
+
|
| 272 |
+
result = struct.pack(">" + spchar * num_limbs, *limbs)
|
| 273 |
+
cutoff_len = len(result) - block_size
|
| 274 |
+
if block_size == 0:
|
| 275 |
+
result = result.lstrip(b'\x00')
|
| 276 |
+
elif cutoff_len > 0:
|
| 277 |
+
if result[:cutoff_len] != b'\x00' * (cutoff_len):
|
| 278 |
+
raise ValueError("Number is too big to convert to "
|
| 279 |
+
"byte string of prescribed length")
|
| 280 |
+
result = result[cutoff_len:]
|
| 281 |
+
elif cutoff_len < 0:
|
| 282 |
+
result = b'\x00' * (-cutoff_len) + result
|
| 283 |
+
|
| 284 |
+
if byteorder == 'little':
|
| 285 |
+
result = result[::-1]
|
| 286 |
+
elif byteorder == 'big':
|
| 287 |
+
pass
|
| 288 |
+
else:
|
| 289 |
+
raise ValueError("Incorrect byteorder")
|
| 290 |
+
|
| 291 |
+
if len(result) == 0:
|
| 292 |
+
result = b'\x00'
|
| 293 |
+
|
| 294 |
+
return result
|
| 295 |
+
|
| 296 |
+
@staticmethod
|
| 297 |
+
def from_bytes(byte_string, byteorder='big'):
|
| 298 |
+
"""Convert a byte string into a number.
|
| 299 |
+
|
| 300 |
+
:Parameters:
|
| 301 |
+
byte_string : byte string
|
| 302 |
+
The input number, encoded in network order.
|
| 303 |
+
It can only be non-negative.
|
| 304 |
+
byteorder : string
|
| 305 |
+
'big' for big-endian integers (default), 'little' for litte-endian.
|
| 306 |
+
|
| 307 |
+
:Return:
|
| 308 |
+
The ``Integer`` object carrying the same value as the input.
|
| 309 |
+
"""
|
| 310 |
+
result = IntegerGMP(0)
|
| 311 |
+
if byteorder == 'big':
|
| 312 |
+
pass
|
| 313 |
+
elif byteorder == 'little':
|
| 314 |
+
byte_string = bytearray(byte_string)
|
| 315 |
+
byte_string.reverse()
|
| 316 |
+
else:
|
| 317 |
+
raise ValueError("Incorrect byteorder")
|
| 318 |
+
_gmp.mpz_import(
|
| 319 |
+
result._mpz_p,
|
| 320 |
+
c_size_t(len(byte_string)), # Amount of words to read
|
| 321 |
+
1, # Big endian
|
| 322 |
+
c_size_t(1), # Each word is 1 byte long
|
| 323 |
+
0, # Endianess within a word - not relevant
|
| 324 |
+
c_size_t(0), # No nails
|
| 325 |
+
c_uint8_ptr(byte_string))
|
| 326 |
+
return result
|
| 327 |
+
|
| 328 |
+
# Relations
|
| 329 |
+
def _apply_and_return(self, func, term):
|
| 330 |
+
if not isinstance(term, IntegerGMP):
|
| 331 |
+
term = IntegerGMP(term)
|
| 332 |
+
return func(self._mpz_p, term._mpz_p)
|
| 333 |
+
|
| 334 |
+
def __eq__(self, term):
|
| 335 |
+
if not (isinstance(term, IntegerGMP) or is_native_int(term)):
|
| 336 |
+
return False
|
| 337 |
+
return self._apply_and_return(_gmp.mpz_cmp, term) == 0
|
| 338 |
+
|
| 339 |
+
def __ne__(self, term):
|
| 340 |
+
if not (isinstance(term, IntegerGMP) or is_native_int(term)):
|
| 341 |
+
return True
|
| 342 |
+
return self._apply_and_return(_gmp.mpz_cmp, term) != 0
|
| 343 |
+
|
| 344 |
+
def __lt__(self, term):
|
| 345 |
+
return self._apply_and_return(_gmp.mpz_cmp, term) < 0
|
| 346 |
+
|
| 347 |
+
def __le__(self, term):
|
| 348 |
+
return self._apply_and_return(_gmp.mpz_cmp, term) <= 0
|
| 349 |
+
|
| 350 |
+
def __gt__(self, term):
|
| 351 |
+
return self._apply_and_return(_gmp.mpz_cmp, term) > 0
|
| 352 |
+
|
| 353 |
+
def __ge__(self, term):
|
| 354 |
+
return self._apply_and_return(_gmp.mpz_cmp, term) >= 0
|
| 355 |
+
|
| 356 |
+
def __nonzero__(self):
|
| 357 |
+
return _gmp.mpz_cmp(self._mpz_p, self._zero_mpz_p) != 0
|
| 358 |
+
__bool__ = __nonzero__
|
| 359 |
+
|
| 360 |
+
def is_negative(self):
|
| 361 |
+
return _gmp.mpz_cmp(self._mpz_p, self._zero_mpz_p) < 0
|
| 362 |
+
|
| 363 |
+
# Arithmetic operations
|
| 364 |
+
def __add__(self, term):
|
| 365 |
+
result = IntegerGMP(0)
|
| 366 |
+
if not isinstance(term, IntegerGMP):
|
| 367 |
+
try:
|
| 368 |
+
term = IntegerGMP(term)
|
| 369 |
+
except NotImplementedError:
|
| 370 |
+
return NotImplemented
|
| 371 |
+
_gmp.mpz_add(result._mpz_p,
|
| 372 |
+
self._mpz_p,
|
| 373 |
+
term._mpz_p)
|
| 374 |
+
return result
|
| 375 |
+
|
| 376 |
+
def __sub__(self, term):
|
| 377 |
+
result = IntegerGMP(0)
|
| 378 |
+
if not isinstance(term, IntegerGMP):
|
| 379 |
+
try:
|
| 380 |
+
term = IntegerGMP(term)
|
| 381 |
+
except NotImplementedError:
|
| 382 |
+
return NotImplemented
|
| 383 |
+
_gmp.mpz_sub(result._mpz_p,
|
| 384 |
+
self._mpz_p,
|
| 385 |
+
term._mpz_p)
|
| 386 |
+
return result
|
| 387 |
+
|
| 388 |
+
def __mul__(self, term):
|
| 389 |
+
result = IntegerGMP(0)
|
| 390 |
+
if not isinstance(term, IntegerGMP):
|
| 391 |
+
try:
|
| 392 |
+
term = IntegerGMP(term)
|
| 393 |
+
except NotImplementedError:
|
| 394 |
+
return NotImplemented
|
| 395 |
+
_gmp.mpz_mul(result._mpz_p,
|
| 396 |
+
self._mpz_p,
|
| 397 |
+
term._mpz_p)
|
| 398 |
+
return result
|
| 399 |
+
|
| 400 |
+
def __floordiv__(self, divisor):
|
| 401 |
+
if not isinstance(divisor, IntegerGMP):
|
| 402 |
+
divisor = IntegerGMP(divisor)
|
| 403 |
+
if _gmp.mpz_cmp(divisor._mpz_p,
|
| 404 |
+
self._zero_mpz_p) == 0:
|
| 405 |
+
raise ZeroDivisionError("Division by zero")
|
| 406 |
+
result = IntegerGMP(0)
|
| 407 |
+
_gmp.mpz_fdiv_q(result._mpz_p,
|
| 408 |
+
self._mpz_p,
|
| 409 |
+
divisor._mpz_p)
|
| 410 |
+
return result
|
| 411 |
+
|
| 412 |
+
def __mod__(self, divisor):
|
| 413 |
+
if not isinstance(divisor, IntegerGMP):
|
| 414 |
+
divisor = IntegerGMP(divisor)
|
| 415 |
+
comp = _gmp.mpz_cmp(divisor._mpz_p,
|
| 416 |
+
self._zero_mpz_p)
|
| 417 |
+
if comp == 0:
|
| 418 |
+
raise ZeroDivisionError("Division by zero")
|
| 419 |
+
if comp < 0:
|
| 420 |
+
raise ValueError("Modulus must be positive")
|
| 421 |
+
result = IntegerGMP(0)
|
| 422 |
+
_gmp.mpz_mod(result._mpz_p,
|
| 423 |
+
self._mpz_p,
|
| 424 |
+
divisor._mpz_p)
|
| 425 |
+
return result
|
| 426 |
+
|
| 427 |
+
def inplace_pow(self, exponent, modulus=None):
|
| 428 |
+
|
| 429 |
+
if modulus is None:
|
| 430 |
+
if exponent < 0:
|
| 431 |
+
raise ValueError("Exponent must not be negative")
|
| 432 |
+
|
| 433 |
+
# Normal exponentiation
|
| 434 |
+
if exponent > 256:
|
| 435 |
+
raise ValueError("Exponent is too big")
|
| 436 |
+
_gmp.mpz_pow_ui(self._mpz_p,
|
| 437 |
+
self._mpz_p, # Base
|
| 438 |
+
c_ulong(int(exponent))
|
| 439 |
+
)
|
| 440 |
+
else:
|
| 441 |
+
# Modular exponentiation
|
| 442 |
+
if not isinstance(modulus, IntegerGMP):
|
| 443 |
+
modulus = IntegerGMP(modulus)
|
| 444 |
+
if not modulus:
|
| 445 |
+
raise ZeroDivisionError("Division by zero")
|
| 446 |
+
if modulus.is_negative():
|
| 447 |
+
raise ValueError("Modulus must be positive")
|
| 448 |
+
if is_native_int(exponent):
|
| 449 |
+
if exponent < 0:
|
| 450 |
+
raise ValueError("Exponent must not be negative")
|
| 451 |
+
if exponent < 65536:
|
| 452 |
+
_gmp.mpz_powm_ui(self._mpz_p,
|
| 453 |
+
self._mpz_p,
|
| 454 |
+
c_ulong(exponent),
|
| 455 |
+
modulus._mpz_p)
|
| 456 |
+
return self
|
| 457 |
+
exponent = IntegerGMP(exponent)
|
| 458 |
+
elif exponent.is_negative():
|
| 459 |
+
raise ValueError("Exponent must not be negative")
|
| 460 |
+
_gmp.mpz_powm(self._mpz_p,
|
| 461 |
+
self._mpz_p,
|
| 462 |
+
exponent._mpz_p,
|
| 463 |
+
modulus._mpz_p)
|
| 464 |
+
return self
|
| 465 |
+
|
| 466 |
+
def __pow__(self, exponent, modulus=None):
|
| 467 |
+
result = IntegerGMP(self)
|
| 468 |
+
return result.inplace_pow(exponent, modulus)
|
| 469 |
+
|
| 470 |
+
def __abs__(self):
|
| 471 |
+
result = IntegerGMP(0)
|
| 472 |
+
_gmp.mpz_abs(result._mpz_p, self._mpz_p)
|
| 473 |
+
return result
|
| 474 |
+
|
| 475 |
+
def sqrt(self, modulus=None):
|
| 476 |
+
"""Return the largest Integer that does not
|
| 477 |
+
exceed the square root"""
|
| 478 |
+
|
| 479 |
+
if modulus is None:
|
| 480 |
+
if self < 0:
|
| 481 |
+
raise ValueError("Square root of negative value")
|
| 482 |
+
result = IntegerGMP(0)
|
| 483 |
+
_gmp.mpz_sqrt(result._mpz_p,
|
| 484 |
+
self._mpz_p)
|
| 485 |
+
else:
|
| 486 |
+
if modulus <= 0:
|
| 487 |
+
raise ValueError("Modulus must be positive")
|
| 488 |
+
modulus = int(modulus)
|
| 489 |
+
result = IntegerGMP(self._tonelli_shanks(int(self) % modulus, modulus))
|
| 490 |
+
|
| 491 |
+
return result
|
| 492 |
+
|
| 493 |
+
def __iadd__(self, term):
|
| 494 |
+
if is_native_int(term):
|
| 495 |
+
if 0 <= term < 65536:
|
| 496 |
+
_gmp.mpz_add_ui(self._mpz_p,
|
| 497 |
+
self._mpz_p,
|
| 498 |
+
c_ulong(term))
|
| 499 |
+
return self
|
| 500 |
+
if -65535 < term < 0:
|
| 501 |
+
_gmp.mpz_sub_ui(self._mpz_p,
|
| 502 |
+
self._mpz_p,
|
| 503 |
+
c_ulong(-term))
|
| 504 |
+
return self
|
| 505 |
+
term = IntegerGMP(term)
|
| 506 |
+
_gmp.mpz_add(self._mpz_p,
|
| 507 |
+
self._mpz_p,
|
| 508 |
+
term._mpz_p)
|
| 509 |
+
return self
|
| 510 |
+
|
| 511 |
+
def __isub__(self, term):
|
| 512 |
+
if is_native_int(term):
|
| 513 |
+
if 0 <= term < 65536:
|
| 514 |
+
_gmp.mpz_sub_ui(self._mpz_p,
|
| 515 |
+
self._mpz_p,
|
| 516 |
+
c_ulong(term))
|
| 517 |
+
return self
|
| 518 |
+
if -65535 < term < 0:
|
| 519 |
+
_gmp.mpz_add_ui(self._mpz_p,
|
| 520 |
+
self._mpz_p,
|
| 521 |
+
c_ulong(-term))
|
| 522 |
+
return self
|
| 523 |
+
term = IntegerGMP(term)
|
| 524 |
+
_gmp.mpz_sub(self._mpz_p,
|
| 525 |
+
self._mpz_p,
|
| 526 |
+
term._mpz_p)
|
| 527 |
+
return self
|
| 528 |
+
|
| 529 |
+
def __imul__(self, term):
|
| 530 |
+
if is_native_int(term):
|
| 531 |
+
if 0 <= term < 65536:
|
| 532 |
+
_gmp.mpz_mul_ui(self._mpz_p,
|
| 533 |
+
self._mpz_p,
|
| 534 |
+
c_ulong(term))
|
| 535 |
+
return self
|
| 536 |
+
if -65535 < term < 0:
|
| 537 |
+
_gmp.mpz_mul_ui(self._mpz_p,
|
| 538 |
+
self._mpz_p,
|
| 539 |
+
c_ulong(-term))
|
| 540 |
+
_gmp.mpz_neg(self._mpz_p, self._mpz_p)
|
| 541 |
+
return self
|
| 542 |
+
term = IntegerGMP(term)
|
| 543 |
+
_gmp.mpz_mul(self._mpz_p,
|
| 544 |
+
self._mpz_p,
|
| 545 |
+
term._mpz_p)
|
| 546 |
+
return self
|
| 547 |
+
|
| 548 |
+
def __imod__(self, divisor):
|
| 549 |
+
if not isinstance(divisor, IntegerGMP):
|
| 550 |
+
divisor = IntegerGMP(divisor)
|
| 551 |
+
comp = _gmp.mpz_cmp(divisor._mpz_p,
|
| 552 |
+
divisor._zero_mpz_p)
|
| 553 |
+
if comp == 0:
|
| 554 |
+
raise ZeroDivisionError("Division by zero")
|
| 555 |
+
if comp < 0:
|
| 556 |
+
raise ValueError("Modulus must be positive")
|
| 557 |
+
_gmp.mpz_mod(self._mpz_p,
|
| 558 |
+
self._mpz_p,
|
| 559 |
+
divisor._mpz_p)
|
| 560 |
+
return self
|
| 561 |
+
|
| 562 |
+
# Boolean/bit operations
|
| 563 |
+
def __and__(self, term):
|
| 564 |
+
result = IntegerGMP(0)
|
| 565 |
+
if not isinstance(term, IntegerGMP):
|
| 566 |
+
term = IntegerGMP(term)
|
| 567 |
+
_gmp.mpz_and(result._mpz_p,
|
| 568 |
+
self._mpz_p,
|
| 569 |
+
term._mpz_p)
|
| 570 |
+
return result
|
| 571 |
+
|
| 572 |
+
def __or__(self, term):
|
| 573 |
+
result = IntegerGMP(0)
|
| 574 |
+
if not isinstance(term, IntegerGMP):
|
| 575 |
+
term = IntegerGMP(term)
|
| 576 |
+
_gmp.mpz_ior(result._mpz_p,
|
| 577 |
+
self._mpz_p,
|
| 578 |
+
term._mpz_p)
|
| 579 |
+
return result
|
| 580 |
+
|
| 581 |
+
def __rshift__(self, pos):
|
| 582 |
+
result = IntegerGMP(0)
|
| 583 |
+
if pos < 0:
|
| 584 |
+
raise ValueError("negative shift count")
|
| 585 |
+
if pos > 65536:
|
| 586 |
+
if self < 0:
|
| 587 |
+
return -1
|
| 588 |
+
else:
|
| 589 |
+
return 0
|
| 590 |
+
_gmp.mpz_tdiv_q_2exp(result._mpz_p,
|
| 591 |
+
self._mpz_p,
|
| 592 |
+
c_ulong(int(pos)))
|
| 593 |
+
return result
|
| 594 |
+
|
| 595 |
+
def __irshift__(self, pos):
|
| 596 |
+
if pos < 0:
|
| 597 |
+
raise ValueError("negative shift count")
|
| 598 |
+
if pos > 65536:
|
| 599 |
+
if self < 0:
|
| 600 |
+
return -1
|
| 601 |
+
else:
|
| 602 |
+
return 0
|
| 603 |
+
_gmp.mpz_tdiv_q_2exp(self._mpz_p,
|
| 604 |
+
self._mpz_p,
|
| 605 |
+
c_ulong(int(pos)))
|
| 606 |
+
return self
|
| 607 |
+
|
| 608 |
+
def __lshift__(self, pos):
|
| 609 |
+
result = IntegerGMP(0)
|
| 610 |
+
if not 0 <= pos < 65536:
|
| 611 |
+
raise ValueError("Incorrect shift count")
|
| 612 |
+
_gmp.mpz_mul_2exp(result._mpz_p,
|
| 613 |
+
self._mpz_p,
|
| 614 |
+
c_ulong(int(pos)))
|
| 615 |
+
return result
|
| 616 |
+
|
| 617 |
+
def __ilshift__(self, pos):
|
| 618 |
+
if not 0 <= pos < 65536:
|
| 619 |
+
raise ValueError("Incorrect shift count")
|
| 620 |
+
_gmp.mpz_mul_2exp(self._mpz_p,
|
| 621 |
+
self._mpz_p,
|
| 622 |
+
c_ulong(int(pos)))
|
| 623 |
+
return self
|
| 624 |
+
|
| 625 |
+
def get_bit(self, n):
|
| 626 |
+
"""Return True if the n-th bit is set to 1.
|
| 627 |
+
Bit 0 is the least significant."""
|
| 628 |
+
|
| 629 |
+
if self < 0:
|
| 630 |
+
raise ValueError("no bit representation for negative values")
|
| 631 |
+
if n < 0:
|
| 632 |
+
raise ValueError("negative bit count")
|
| 633 |
+
if n > 65536:
|
| 634 |
+
return 0
|
| 635 |
+
return bool(_gmp.mpz_tstbit(self._mpz_p,
|
| 636 |
+
c_ulong(int(n))))
|
| 637 |
+
|
| 638 |
+
# Extra
|
| 639 |
+
def is_odd(self):
|
| 640 |
+
return _gmp.mpz_tstbit(self._mpz_p, 0) == 1
|
| 641 |
+
|
| 642 |
+
def is_even(self):
|
| 643 |
+
return _gmp.mpz_tstbit(self._mpz_p, 0) == 0
|
| 644 |
+
|
| 645 |
+
def size_in_bits(self):
|
| 646 |
+
"""Return the minimum number of bits that can encode the number."""
|
| 647 |
+
|
| 648 |
+
if self < 0:
|
| 649 |
+
raise ValueError("Conversion only valid for non-negative numbers")
|
| 650 |
+
return _gmp.mpz_sizeinbase(self._mpz_p, 2)
|
| 651 |
+
|
| 652 |
+
def size_in_bytes(self):
|
| 653 |
+
"""Return the minimum number of bytes that can encode the number."""
|
| 654 |
+
return (self.size_in_bits() - 1) // 8 + 1
|
| 655 |
+
|
| 656 |
+
def is_perfect_square(self):
|
| 657 |
+
return _gmp.mpz_perfect_square_p(self._mpz_p) != 0
|
| 658 |
+
|
| 659 |
+
def fail_if_divisible_by(self, small_prime):
|
| 660 |
+
"""Raise an exception if the small prime is a divisor."""
|
| 661 |
+
|
| 662 |
+
if is_native_int(small_prime):
|
| 663 |
+
if 0 < small_prime < 65536:
|
| 664 |
+
if _gmp.mpz_divisible_ui_p(self._mpz_p,
|
| 665 |
+
c_ulong(small_prime)):
|
| 666 |
+
raise ValueError("The value is composite")
|
| 667 |
+
return
|
| 668 |
+
small_prime = IntegerGMP(small_prime)
|
| 669 |
+
if _gmp.mpz_divisible_p(self._mpz_p,
|
| 670 |
+
small_prime._mpz_p):
|
| 671 |
+
raise ValueError("The value is composite")
|
| 672 |
+
|
| 673 |
+
def multiply_accumulate(self, a, b):
|
| 674 |
+
"""Increment the number by the product of a and b."""
|
| 675 |
+
|
| 676 |
+
if not isinstance(a, IntegerGMP):
|
| 677 |
+
a = IntegerGMP(a)
|
| 678 |
+
if is_native_int(b):
|
| 679 |
+
if 0 < b < 65536:
|
| 680 |
+
_gmp.mpz_addmul_ui(self._mpz_p,
|
| 681 |
+
a._mpz_p,
|
| 682 |
+
c_ulong(b))
|
| 683 |
+
return self
|
| 684 |
+
if -65535 < b < 0:
|
| 685 |
+
_gmp.mpz_submul_ui(self._mpz_p,
|
| 686 |
+
a._mpz_p,
|
| 687 |
+
c_ulong(-b))
|
| 688 |
+
return self
|
| 689 |
+
b = IntegerGMP(b)
|
| 690 |
+
_gmp.mpz_addmul(self._mpz_p,
|
| 691 |
+
a._mpz_p,
|
| 692 |
+
b._mpz_p)
|
| 693 |
+
return self
|
| 694 |
+
|
| 695 |
+
def set(self, source):
|
| 696 |
+
"""Set the Integer to have the given value"""
|
| 697 |
+
|
| 698 |
+
if not isinstance(source, IntegerGMP):
|
| 699 |
+
source = IntegerGMP(source)
|
| 700 |
+
_gmp.mpz_set(self._mpz_p,
|
| 701 |
+
source._mpz_p)
|
| 702 |
+
return self
|
| 703 |
+
|
| 704 |
+
def inplace_inverse(self, modulus):
|
| 705 |
+
"""Compute the inverse of this number in the ring of
|
| 706 |
+
modulo integers.
|
| 707 |
+
|
| 708 |
+
Raise an exception if no inverse exists.
|
| 709 |
+
"""
|
| 710 |
+
|
| 711 |
+
if not isinstance(modulus, IntegerGMP):
|
| 712 |
+
modulus = IntegerGMP(modulus)
|
| 713 |
+
|
| 714 |
+
comp = _gmp.mpz_cmp(modulus._mpz_p,
|
| 715 |
+
self._zero_mpz_p)
|
| 716 |
+
if comp == 0:
|
| 717 |
+
raise ZeroDivisionError("Modulus cannot be zero")
|
| 718 |
+
if comp < 0:
|
| 719 |
+
raise ValueError("Modulus must be positive")
|
| 720 |
+
|
| 721 |
+
result = _gmp.mpz_invert(self._mpz_p,
|
| 722 |
+
self._mpz_p,
|
| 723 |
+
modulus._mpz_p)
|
| 724 |
+
if not result:
|
| 725 |
+
raise ValueError("No inverse value can be computed")
|
| 726 |
+
return self
|
| 727 |
+
|
| 728 |
+
def inverse(self, modulus):
|
| 729 |
+
result = IntegerGMP(self)
|
| 730 |
+
result.inplace_inverse(modulus)
|
| 731 |
+
return result
|
| 732 |
+
|
| 733 |
+
def gcd(self, term):
|
| 734 |
+
"""Compute the greatest common denominator between this
|
| 735 |
+
number and another term."""
|
| 736 |
+
|
| 737 |
+
result = IntegerGMP(0)
|
| 738 |
+
if is_native_int(term):
|
| 739 |
+
if 0 < term < 65535:
|
| 740 |
+
_gmp.mpz_gcd_ui(result._mpz_p,
|
| 741 |
+
self._mpz_p,
|
| 742 |
+
c_ulong(term))
|
| 743 |
+
return result
|
| 744 |
+
term = IntegerGMP(term)
|
| 745 |
+
_gmp.mpz_gcd(result._mpz_p, self._mpz_p, term._mpz_p)
|
| 746 |
+
return result
|
| 747 |
+
|
| 748 |
+
def lcm(self, term):
|
| 749 |
+
"""Compute the least common multiplier between this
|
| 750 |
+
number and another term."""
|
| 751 |
+
|
| 752 |
+
result = IntegerGMP(0)
|
| 753 |
+
if not isinstance(term, IntegerGMP):
|
| 754 |
+
term = IntegerGMP(term)
|
| 755 |
+
_gmp.mpz_lcm(result._mpz_p, self._mpz_p, term._mpz_p)
|
| 756 |
+
return result
|
| 757 |
+
|
| 758 |
+
@staticmethod
|
| 759 |
+
def jacobi_symbol(a, n):
|
| 760 |
+
"""Compute the Jacobi symbol"""
|
| 761 |
+
|
| 762 |
+
if not isinstance(a, IntegerGMP):
|
| 763 |
+
a = IntegerGMP(a)
|
| 764 |
+
if not isinstance(n, IntegerGMP):
|
| 765 |
+
n = IntegerGMP(n)
|
| 766 |
+
if n <= 0 or n.is_even():
|
| 767 |
+
raise ValueError("n must be positive odd for the Jacobi symbol")
|
| 768 |
+
return _gmp.mpz_jacobi(a._mpz_p, n._mpz_p)
|
| 769 |
+
|
| 770 |
+
@staticmethod
|
| 771 |
+
def _mult_modulo_bytes(term1, term2, modulus):
|
| 772 |
+
if not isinstance(term1, IntegerGMP):
|
| 773 |
+
term1 = IntegerGMP(term1)
|
| 774 |
+
if not isinstance(term2, IntegerGMP):
|
| 775 |
+
term2 = IntegerGMP(term2)
|
| 776 |
+
if not isinstance(modulus, IntegerGMP):
|
| 777 |
+
modulus = IntegerGMP(modulus)
|
| 778 |
+
|
| 779 |
+
if modulus < 0:
|
| 780 |
+
raise ValueError("Modulus must be positive")
|
| 781 |
+
if modulus == 0:
|
| 782 |
+
raise ZeroDivisionError("Modulus cannot be zero")
|
| 783 |
+
if (modulus & 1) == 0:
|
| 784 |
+
raise ValueError("Odd modulus is required")
|
| 785 |
+
|
| 786 |
+
product = (term1 * term2) % modulus
|
| 787 |
+
return product.to_bytes(modulus.size_in_bytes())
|
| 788 |
+
|
| 789 |
+
# Clean-up
|
| 790 |
+
def __del__(self):
|
| 791 |
+
|
| 792 |
+
try:
|
| 793 |
+
if self._mpz_p is not None:
|
| 794 |
+
if self._initialized:
|
| 795 |
+
_gmp.mpz_clear(self._mpz_p)
|
| 796 |
+
|
| 797 |
+
self._mpz_p = None
|
| 798 |
+
except AttributeError:
|
| 799 |
+
pass
|
wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.pyi
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._IntegerBase import IntegerBase
|
| 2 |
+
class IntegerGMP(IntegerBase):
|
| 3 |
+
pass
|
wemm/lib/python3.10/site-packages/Crypto/Math/__init__.py
ADDED
|
File without changes
|
wemm/lib/python3.10/site-packages/Crypto/Signature/PKCS1_PSS.pyi
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union, Callable, Optional
|
| 2 |
+
from typing_extensions import Protocol
|
| 3 |
+
|
| 4 |
+
from Crypto.PublicKey.RSA import RsaKey
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Hash(Protocol):
|
| 8 |
+
def digest(self) -> bytes: ...
|
| 9 |
+
def update(self, bytes) -> None: ...
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class HashModule(Protocol):
|
| 13 |
+
@staticmethod
|
| 14 |
+
def new(data: Optional[bytes]) -> Hash: ...
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
MaskFunction = Callable[[bytes, int, Union[Hash, HashModule]], bytes]
|
| 18 |
+
RndFunction = Callable[[int], bytes]
|
| 19 |
+
|
| 20 |
+
class PSS_SigScheme:
|
| 21 |
+
def __init__(self, key: RsaKey, mgfunc: MaskFunction, saltLen: int, randfunc: RndFunction) -> None: ...
|
| 22 |
+
def can_sign(self) -> bool: ...
|
| 23 |
+
def sign(self, msg_hash: Hash) -> bytes: ...
|
| 24 |
+
def verify(self, msg_hash: Hash, signature: bytes) -> bool: ...
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def new(rsa_key: RsaKey, mgfunc: Optional[MaskFunction]=None, saltLen: Optional[int]=None, randfunc: Optional[RndFunction]=None) -> PSS_SigScheme: ...
|
wemm/lib/python3.10/site-packages/Crypto/Signature/pss.pyi
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union, Callable, Optional
|
| 2 |
+
from typing_extensions import Protocol
|
| 3 |
+
|
| 4 |
+
from Crypto.PublicKey.RSA import RsaKey
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Hash(Protocol):
|
| 8 |
+
def digest(self) -> bytes: ...
|
| 9 |
+
def update(self, bytes) -> None: ...
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class HashModule(Protocol):
|
| 13 |
+
@staticmethod
|
| 14 |
+
def new(data: Optional[bytes]) -> Hash: ...
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
MaskFunction = Callable[[bytes, int, Union[Hash, HashModule]], bytes]
|
| 18 |
+
RndFunction = Callable[[int], bytes]
|
| 19 |
+
|
| 20 |
+
class PSS_SigScheme:
|
| 21 |
+
def __init__(self, key: RsaKey, mgfunc: MaskFunction, saltLen: int, randfunc: RndFunction) -> None: ...
|
| 22 |
+
def can_sign(self) -> bool: ...
|
| 23 |
+
def sign(self, msg_hash: Hash) -> bytes: ...
|
| 24 |
+
def verify(self, msg_hash: Hash, signature: bytes) -> None: ...
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
MGF1 : MaskFunction
|
| 28 |
+
def _EMSA_PSS_ENCODE(mhash: Hash, emBits: int, randFunc: RndFunction, mgf:MaskFunction, sLen: int) -> str: ...
|
| 29 |
+
def _EMSA_PSS_VERIFY(mhash: Hash, em: str, emBits: int, mgf: MaskFunction, sLen: int) -> None: ...
|
| 30 |
+
def new(rsa_key: RsaKey, **kwargs: Union[MaskFunction, RndFunction, int]) -> PSS_SigScheme: ...
|
wemm/lib/python3.10/site-packages/Crypto/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (427 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (717 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The MIT License (MIT)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2015 Hynek Schlawack and the attrs contributors
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2014-05-31/endpoint-rule-set-1.json.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a06b3ceba5eaeb6c6f1759e44ec4eb1d041492dbeb669d033a0f92a05fe513a2
|
| 3 |
+
size 1839
|
wemm/lib/python3.10/site-packages/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:190a177a61ada13dd88dabd5c81eaf6885cc2c6823df30485ce2262e4d941118
|
| 3 |
+
size 1153
|
wemm/lib/python3.10/site-packages/tokenizers/__init__.pyi
ADDED
|
@@ -0,0 +1,1200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class AddedToken:
|
| 3 |
+
"""
|
| 4 |
+
Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
|
| 5 |
+
It can have special options that defines the way it should behave.
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
content (:obj:`str`): The content of the token
|
| 9 |
+
|
| 10 |
+
single_word (:obj:`bool`, defaults to :obj:`False`):
|
| 11 |
+
Defines whether this token should only match single words. If :obj:`True`, this
|
| 12 |
+
token will never match inside of a word. For example the token ``ing`` would match
|
| 13 |
+
on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
|
| 14 |
+
The notion of "`inside of a word`" is defined by the word boundaries pattern in
|
| 15 |
+
regular expressions (ie. the token should start and end with word boundaries).
|
| 16 |
+
|
| 17 |
+
lstrip (:obj:`bool`, defaults to :obj:`False`):
|
| 18 |
+
Defines whether this token should strip all potential whitespaces on its left side.
|
| 19 |
+
If :obj:`True`, this token will greedily match any whitespace on its left. For
|
| 20 |
+
example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
|
| 21 |
+
``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
|
| 22 |
+
|
| 23 |
+
rstrip (:obj:`bool`, defaults to :obj:`False`):
|
| 24 |
+
Defines whether this token should strip all potential whitespaces on its right
|
| 25 |
+
side. If :obj:`True`, this token will greedily match any whitespace on its right.
|
| 26 |
+
It works just like :obj:`lstrip` but on the right.
|
| 27 |
+
|
| 28 |
+
normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
|
| 29 |
+
Defines whether this token should match against the normalized version of the input
|
| 30 |
+
text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
|
| 31 |
+
lowercasing the text, the token could be extract from the input ``"I saw a lion
|
| 32 |
+
Yesterday"``.
|
| 33 |
+
special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
|
| 34 |
+
Defines whether this token should be skipped when decoding.
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False):
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def content(self):
|
| 42 |
+
"""
|
| 43 |
+
Get the content of this :obj:`AddedToken`
|
| 44 |
+
"""
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def lstrip(self):
|
| 49 |
+
"""
|
| 50 |
+
Get the value of the :obj:`lstrip` option
|
| 51 |
+
"""
|
| 52 |
+
pass
|
| 53 |
+
|
| 54 |
+
@property
|
| 55 |
+
def normalized(self):
|
| 56 |
+
"""
|
| 57 |
+
Get the value of the :obj:`normalized` option
|
| 58 |
+
"""
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def rstrip(self):
|
| 63 |
+
"""
|
| 64 |
+
Get the value of the :obj:`rstrip` option
|
| 65 |
+
"""
|
| 66 |
+
pass
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def single_word(self):
|
| 70 |
+
"""
|
| 71 |
+
Get the value of the :obj:`single_word` option
|
| 72 |
+
"""
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def special(self):
|
| 77 |
+
"""
|
| 78 |
+
Get the value of the :obj:`special` option
|
| 79 |
+
"""
|
| 80 |
+
pass
|
| 81 |
+
|
| 82 |
+
class Encoding:
|
| 83 |
+
"""
|
| 84 |
+
The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
|
| 85 |
+
"""
|
| 86 |
+
@property
|
| 87 |
+
def attention_mask(self):
|
| 88 |
+
"""
|
| 89 |
+
The attention mask
|
| 90 |
+
|
| 91 |
+
This indicates to the LM which tokens should be attended to, and which should not.
|
| 92 |
+
This is especially important when batching sequences, where we need to applying
|
| 93 |
+
padding.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
:obj:`List[int]`: The attention mask
|
| 97 |
+
"""
|
| 98 |
+
pass
|
| 99 |
+
|
| 100 |
+
def char_to_token(self, char_pos, sequence_index=0):
|
| 101 |
+
"""
|
| 102 |
+
Get the token that contains the char at the given position in the input sequence.
|
| 103 |
+
|
| 104 |
+
Args:
|
| 105 |
+
char_pos (:obj:`int`):
|
| 106 |
+
The position of a char in the input string
|
| 107 |
+
sequence_index (:obj:`int`, defaults to :obj:`0`):
|
| 108 |
+
The index of the sequence that contains the target char
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
:obj:`int`: The index of the token that contains this char in the encoded sequence
|
| 112 |
+
"""
|
| 113 |
+
pass
|
| 114 |
+
|
| 115 |
+
def char_to_word(self, char_pos, sequence_index=0):
|
| 116 |
+
"""
|
| 117 |
+
Get the word that contains the char at the given position in the input sequence.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
char_pos (:obj:`int`):
|
| 121 |
+
The position of a char in the input string
|
| 122 |
+
sequence_index (:obj:`int`, defaults to :obj:`0`):
|
| 123 |
+
The index of the sequence that contains the target char
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
:obj:`int`: The index of the word that contains this char in the input sequence
|
| 127 |
+
"""
|
| 128 |
+
pass
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def ids(self):
|
| 132 |
+
"""
|
| 133 |
+
The generated IDs
|
| 134 |
+
|
| 135 |
+
The IDs are the main input to a Language Model. They are the token indices,
|
| 136 |
+
the numerical representations that a LM understands.
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
:obj:`List[int]`: The list of IDs
|
| 140 |
+
"""
|
| 141 |
+
pass
|
| 142 |
+
|
| 143 |
+
@staticmethod
|
| 144 |
+
def merge(encodings, growing_offsets=True):
|
| 145 |
+
"""
|
| 146 |
+
Merge the list of encodings into one final :class:`~tokenizers.Encoding`
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
|
| 150 |
+
The list of encodings that should be merged in one
|
| 151 |
+
|
| 152 |
+
growing_offsets (:obj:`bool`, defaults to :obj:`True`):
|
| 153 |
+
Whether the offsets should accumulate while merging
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
:class:`~tokenizers.Encoding`: The resulting Encoding
|
| 157 |
+
"""
|
| 158 |
+
pass
|
| 159 |
+
|
| 160 |
+
@property
|
| 161 |
+
def n_sequences(self):
|
| 162 |
+
"""
|
| 163 |
+
The number of sequences represented
|
| 164 |
+
|
| 165 |
+
Returns:
|
| 166 |
+
:obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
|
| 167 |
+
"""
|
| 168 |
+
pass
|
| 169 |
+
|
| 170 |
+
@property
|
| 171 |
+
def offsets(self):
|
| 172 |
+
"""
|
| 173 |
+
The offsets associated to each token
|
| 174 |
+
|
| 175 |
+
These offsets let's you slice the input string, and thus retrieve the original
|
| 176 |
+
part that led to producing the corresponding token.
|
| 177 |
+
|
| 178 |
+
Returns:
|
| 179 |
+
A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
|
| 180 |
+
"""
|
| 181 |
+
pass
|
| 182 |
+
|
| 183 |
+
@property
|
| 184 |
+
def overflowing(self):
|
| 185 |
+
"""
|
| 186 |
+
A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
|
| 187 |
+
|
| 188 |
+
When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
|
| 189 |
+
the output into as many pieces as required to match the specified maximum length.
|
| 190 |
+
This field lets you retrieve all the subsequent pieces.
|
| 191 |
+
|
| 192 |
+
When you use pairs of sequences, the overflowing pieces will contain enough
|
| 193 |
+
variations to cover all the possible combinations, while respecting the provided
|
| 194 |
+
maximum length.
|
| 195 |
+
"""
|
| 196 |
+
pass
|
| 197 |
+
|
| 198 |
+
def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
|
| 199 |
+
"""
|
| 200 |
+
Pad the :class:`~tokenizers.Encoding` at the given length
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
length (:obj:`int`):
|
| 204 |
+
The desired length
|
| 205 |
+
|
| 206 |
+
direction: (:obj:`str`, defaults to :obj:`right`):
|
| 207 |
+
The expected padding direction. Can be either :obj:`right` or :obj:`left`
|
| 208 |
+
|
| 209 |
+
pad_id (:obj:`int`, defaults to :obj:`0`):
|
| 210 |
+
The ID corresponding to the padding token
|
| 211 |
+
|
| 212 |
+
pad_type_id (:obj:`int`, defaults to :obj:`0`):
|
| 213 |
+
The type ID corresponding to the padding token
|
| 214 |
+
|
| 215 |
+
pad_token (:obj:`str`, defaults to `[PAD]`):
|
| 216 |
+
The pad token to use
|
| 217 |
+
"""
|
| 218 |
+
pass
|
| 219 |
+
|
| 220 |
+
@property
|
| 221 |
+
def sequence_ids(self):
|
| 222 |
+
"""
|
| 223 |
+
The generated sequence indices.
|
| 224 |
+
|
| 225 |
+
They represent the index of the input sequence associated to each token.
|
| 226 |
+
The sequence id can be None if the token is not related to any input sequence,
|
| 227 |
+
like for example with special tokens.
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
|
| 231 |
+
"""
|
| 232 |
+
pass
|
| 233 |
+
|
| 234 |
+
def set_sequence_id(self, sequence_id):
|
| 235 |
+
"""
|
| 236 |
+
Set the given sequence index
|
| 237 |
+
|
| 238 |
+
Set the given sequence index for the whole range of tokens contained in this
|
| 239 |
+
:class:`~tokenizers.Encoding`.
|
| 240 |
+
"""
|
| 241 |
+
pass
|
| 242 |
+
|
| 243 |
+
@property
|
| 244 |
+
def special_tokens_mask(self):
|
| 245 |
+
"""
|
| 246 |
+
The special token mask
|
| 247 |
+
|
| 248 |
+
This indicates which tokens are special tokens, and which are not.
|
| 249 |
+
|
| 250 |
+
Returns:
|
| 251 |
+
:obj:`List[int]`: The special tokens mask
|
| 252 |
+
"""
|
| 253 |
+
pass
|
| 254 |
+
|
| 255 |
+
def token_to_chars(self, token_index):
|
| 256 |
+
"""
|
| 257 |
+
Get the offsets of the token at the given index.
|
| 258 |
+
|
| 259 |
+
The returned offsets are related to the input sequence that contains the
|
| 260 |
+
token. In order to determine in which input sequence it belongs, you
|
| 261 |
+
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
token_index (:obj:`int`):
|
| 265 |
+
The index of a token in the encoded sequence.
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
:obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
|
| 269 |
+
"""
|
| 270 |
+
pass
|
| 271 |
+
|
| 272 |
+
def token_to_sequence(self, token_index):
|
| 273 |
+
"""
|
| 274 |
+
Get the index of the sequence represented by the given token.
|
| 275 |
+
|
| 276 |
+
In the general use case, this method returns :obj:`0` for a single sequence or
|
| 277 |
+
the first sequence of a pair, and :obj:`1` for the second sequence of a pair
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
token_index (:obj:`int`):
|
| 281 |
+
The index of a token in the encoded sequence.
|
| 282 |
+
|
| 283 |
+
Returns:
|
| 284 |
+
:obj:`int`: The sequence id of the given token
|
| 285 |
+
"""
|
| 286 |
+
pass
|
| 287 |
+
|
| 288 |
+
def token_to_word(self, token_index):
|
| 289 |
+
"""
|
| 290 |
+
Get the index of the word that contains the token in one of the input sequences.
|
| 291 |
+
|
| 292 |
+
The returned word index is related to the input sequence that contains
|
| 293 |
+
the token. In order to determine in which input sequence it belongs, you
|
| 294 |
+
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
|
| 295 |
+
|
| 296 |
+
Args:
|
| 297 |
+
token_index (:obj:`int`):
|
| 298 |
+
The index of a token in the encoded sequence.
|
| 299 |
+
|
| 300 |
+
Returns:
|
| 301 |
+
:obj:`int`: The index of the word in the relevant input sequence.
|
| 302 |
+
"""
|
| 303 |
+
pass
|
| 304 |
+
|
| 305 |
+
@property
|
| 306 |
+
def tokens(self):
|
| 307 |
+
"""
|
| 308 |
+
The generated tokens
|
| 309 |
+
|
| 310 |
+
They are the string representation of the IDs.
|
| 311 |
+
|
| 312 |
+
Returns:
|
| 313 |
+
:obj:`List[str]`: The list of tokens
|
| 314 |
+
"""
|
| 315 |
+
pass
|
| 316 |
+
|
| 317 |
+
def truncate(self, max_length, stride=0, direction="right"):
|
| 318 |
+
"""
|
| 319 |
+
Truncate the :class:`~tokenizers.Encoding` at the given length
|
| 320 |
+
|
| 321 |
+
If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
|
| 322 |
+
this information is lost. It will be considered as representing a single sequence.
|
| 323 |
+
|
| 324 |
+
Args:
|
| 325 |
+
max_length (:obj:`int`):
|
| 326 |
+
The desired length
|
| 327 |
+
|
| 328 |
+
stride (:obj:`int`, defaults to :obj:`0`):
|
| 329 |
+
The length of previous content to be included in each overflowing piece
|
| 330 |
+
|
| 331 |
+
direction (:obj:`str`, defaults to :obj:`right`):
|
| 332 |
+
Truncate direction
|
| 333 |
+
"""
|
| 334 |
+
pass
|
| 335 |
+
|
| 336 |
+
@property
|
| 337 |
+
def type_ids(self):
|
| 338 |
+
"""
|
| 339 |
+
The generated type IDs
|
| 340 |
+
|
| 341 |
+
Generally used for tasks like sequence classification or question answering,
|
| 342 |
+
these tokens let the LM know which input sequence corresponds to each tokens.
|
| 343 |
+
|
| 344 |
+
Returns:
|
| 345 |
+
:obj:`List[int]`: The list of type ids
|
| 346 |
+
"""
|
| 347 |
+
pass
|
| 348 |
+
|
| 349 |
+
@property
|
| 350 |
+
def word_ids(self):
|
| 351 |
+
"""
|
| 352 |
+
The generated word indices.
|
| 353 |
+
|
| 354 |
+
They represent the index of the word associated to each token.
|
| 355 |
+
When the input is pre-tokenized, they correspond to the ID of the given input label,
|
| 356 |
+
otherwise they correspond to the words indices as defined by the
|
| 357 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
|
| 358 |
+
|
| 359 |
+
For special tokens and such (any token that was generated from something that was
|
| 360 |
+
not part of the input), the output is :obj:`None`
|
| 361 |
+
|
| 362 |
+
Returns:
|
| 363 |
+
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
|
| 364 |
+
"""
|
| 365 |
+
pass
|
| 366 |
+
|
| 367 |
+
def word_to_chars(self, word_index, sequence_index=0):
|
| 368 |
+
"""
|
| 369 |
+
Get the offsets of the word at the given index in one of the input sequences.
|
| 370 |
+
|
| 371 |
+
Args:
|
| 372 |
+
word_index (:obj:`int`):
|
| 373 |
+
The index of a word in one of the input sequences.
|
| 374 |
+
sequence_index (:obj:`int`, defaults to :obj:`0`):
|
| 375 |
+
The index of the sequence that contains the target word
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
:obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
|
| 379 |
+
"""
|
| 380 |
+
pass
|
| 381 |
+
|
| 382 |
+
def word_to_tokens(self, word_index, sequence_index=0):
|
| 383 |
+
"""
|
| 384 |
+
Get the encoded tokens corresponding to the word at the given index
|
| 385 |
+
in one of the input sequences.
|
| 386 |
+
|
| 387 |
+
Args:
|
| 388 |
+
word_index (:obj:`int`):
|
| 389 |
+
The index of a word in one of the input sequences.
|
| 390 |
+
sequence_index (:obj:`int`, defaults to :obj:`0`):
|
| 391 |
+
The index of the sequence that contains the target word
|
| 392 |
+
|
| 393 |
+
Returns:
|
| 394 |
+
:obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
|
| 395 |
+
"""
|
| 396 |
+
pass
|
| 397 |
+
|
| 398 |
+
@property
|
| 399 |
+
def words(self):
|
| 400 |
+
"""
|
| 401 |
+
The generated word indices.
|
| 402 |
+
|
| 403 |
+
.. warning::
|
| 404 |
+
This is deprecated and will be removed in a future version.
|
| 405 |
+
Please use :obj:`~tokenizers.Encoding.word_ids` instead.
|
| 406 |
+
|
| 407 |
+
They represent the index of the word associated to each token.
|
| 408 |
+
When the input is pre-tokenized, they correspond to the ID of the given input label,
|
| 409 |
+
otherwise they correspond to the words indices as defined by the
|
| 410 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
|
| 411 |
+
|
| 412 |
+
For special tokens and such (any token that was generated from something that was
|
| 413 |
+
not part of the input), the output is :obj:`None`
|
| 414 |
+
|
| 415 |
+
Returns:
|
| 416 |
+
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
|
| 417 |
+
"""
|
| 418 |
+
pass
|
| 419 |
+
|
| 420 |
+
class NormalizedString:
|
| 421 |
+
"""
|
| 422 |
+
NormalizedString
|
| 423 |
+
|
| 424 |
+
A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
|
| 425 |
+
While making all the requested modifications, it keeps track of the alignment information
|
| 426 |
+
between the two versions of the string.
|
| 427 |
+
|
| 428 |
+
Args:
|
| 429 |
+
sequence: str:
|
| 430 |
+
The string sequence used to initialize this NormalizedString
|
| 431 |
+
"""
|
| 432 |
+
def append(self, s):
|
| 433 |
+
"""
|
| 434 |
+
Append the given sequence to the string
|
| 435 |
+
"""
|
| 436 |
+
pass
|
| 437 |
+
|
| 438 |
+
def clear(self):
|
| 439 |
+
"""
|
| 440 |
+
Clears the string
|
| 441 |
+
"""
|
| 442 |
+
pass
|
| 443 |
+
|
| 444 |
+
def filter(self, func):
|
| 445 |
+
"""
|
| 446 |
+
Filter each character of the string using the given func
|
| 447 |
+
"""
|
| 448 |
+
pass
|
| 449 |
+
|
| 450 |
+
def for_each(self, func):
|
| 451 |
+
"""
|
| 452 |
+
Calls the given function for each character of the string
|
| 453 |
+
"""
|
| 454 |
+
pass
|
| 455 |
+
|
| 456 |
+
def lowercase(self):
|
| 457 |
+
"""
|
| 458 |
+
Lowercase the string
|
| 459 |
+
"""
|
| 460 |
+
pass
|
| 461 |
+
|
| 462 |
+
def lstrip(self):
|
| 463 |
+
"""
|
| 464 |
+
Strip the left of the string
|
| 465 |
+
"""
|
| 466 |
+
pass
|
| 467 |
+
|
| 468 |
+
def map(self, func):
|
| 469 |
+
"""
|
| 470 |
+
Calls the given function for each character of the string
|
| 471 |
+
|
| 472 |
+
Replaces each character of the string using the returned value. Each
|
| 473 |
+
returned value **must** be a str of length 1 (ie a character).
|
| 474 |
+
"""
|
| 475 |
+
pass
|
| 476 |
+
|
| 477 |
+
def nfc(self):
|
| 478 |
+
"""
|
| 479 |
+
Runs the NFC normalization
|
| 480 |
+
"""
|
| 481 |
+
pass
|
| 482 |
+
|
| 483 |
+
def nfd(self):
|
| 484 |
+
"""
|
| 485 |
+
Runs the NFD normalization
|
| 486 |
+
"""
|
| 487 |
+
pass
|
| 488 |
+
|
| 489 |
+
def nfkc(self):
|
| 490 |
+
"""
|
| 491 |
+
Runs the NFKC normalization
|
| 492 |
+
"""
|
| 493 |
+
pass
|
| 494 |
+
|
| 495 |
+
def nfkd(self):
|
| 496 |
+
"""
|
| 497 |
+
Runs the NFKD normalization
|
| 498 |
+
"""
|
| 499 |
+
pass
|
| 500 |
+
|
| 501 |
+
@property
|
| 502 |
+
def normalized(self):
|
| 503 |
+
"""
|
| 504 |
+
The normalized part of the string
|
| 505 |
+
"""
|
| 506 |
+
pass
|
| 507 |
+
|
| 508 |
+
def prepend(self, s):
|
| 509 |
+
"""
|
| 510 |
+
Prepend the given sequence to the string
|
| 511 |
+
"""
|
| 512 |
+
pass
|
| 513 |
+
|
| 514 |
+
def replace(self, pattern, content):
|
| 515 |
+
"""
|
| 516 |
+
Replace the content of the given pattern with the provided content
|
| 517 |
+
|
| 518 |
+
Args:
|
| 519 |
+
pattern: Pattern:
|
| 520 |
+
A pattern used to match the string. Usually a string or a Regex
|
| 521 |
+
|
| 522 |
+
content: str:
|
| 523 |
+
The content to be used as replacement
|
| 524 |
+
"""
|
| 525 |
+
pass
|
| 526 |
+
|
| 527 |
+
def rstrip(self):
|
| 528 |
+
"""
|
| 529 |
+
Strip the right of the string
|
| 530 |
+
"""
|
| 531 |
+
pass
|
| 532 |
+
|
| 533 |
+
def slice(self, range):
|
| 534 |
+
"""
|
| 535 |
+
Slice the string using the given range
|
| 536 |
+
"""
|
| 537 |
+
pass
|
| 538 |
+
|
| 539 |
+
def split(self, pattern, behavior):
|
| 540 |
+
"""
|
| 541 |
+
Split the NormalizedString using the given pattern and the specified behavior
|
| 542 |
+
|
| 543 |
+
Args:
|
| 544 |
+
pattern: Pattern:
|
| 545 |
+
A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
|
| 546 |
+
|
| 547 |
+
behavior: SplitDelimiterBehavior:
|
| 548 |
+
The behavior to use when splitting.
|
| 549 |
+
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
|
| 550 |
+
"contiguous"
|
| 551 |
+
|
| 552 |
+
Returns:
|
| 553 |
+
A list of NormalizedString, representing each split
|
| 554 |
+
"""
|
| 555 |
+
pass
|
| 556 |
+
|
| 557 |
+
def strip(self):
|
| 558 |
+
"""
|
| 559 |
+
Strip both ends of the string
|
| 560 |
+
"""
|
| 561 |
+
pass
|
| 562 |
+
|
| 563 |
+
def uppercase(self):
|
| 564 |
+
"""
|
| 565 |
+
Uppercase the string
|
| 566 |
+
"""
|
| 567 |
+
pass
|
| 568 |
+
|
| 569 |
+
class PreTokenizedString:
|
| 570 |
+
"""
|
| 571 |
+
PreTokenizedString
|
| 572 |
+
|
| 573 |
+
Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
|
| 574 |
+
underlying string, while keeping track of the alignment information (offsets).
|
| 575 |
+
|
| 576 |
+
The PreTokenizedString manages what we call `splits`. Each split represents a substring
|
| 577 |
+
which is a subpart of the original string, with the relevant offsets and tokens.
|
| 578 |
+
|
| 579 |
+
When calling one of the methods used to modify the PreTokenizedString (namely one of
|
| 580 |
+
`split`, `normalize` or `tokenize), only the `splits` that don't have any associated
|
| 581 |
+
tokens will get modified.
|
| 582 |
+
|
| 583 |
+
Args:
|
| 584 |
+
sequence: str:
|
| 585 |
+
The string sequence used to initialize this PreTokenizedString
|
| 586 |
+
"""
|
| 587 |
+
def __init__(self, sequence):
|
| 588 |
+
pass
|
| 589 |
+
|
| 590 |
+
def get_splits(self, offset_referential="original", offset_type="char"):
|
| 591 |
+
"""
|
| 592 |
+
Get the splits currently managed by the PreTokenizedString
|
| 593 |
+
|
| 594 |
+
Args:
|
| 595 |
+
offset_referential: :obj:`str`
|
| 596 |
+
Whether the returned splits should have offsets expressed relative
|
| 597 |
+
to the original string, or the normalized one. choices: "original", "normalized".
|
| 598 |
+
|
| 599 |
+
offset_type: :obj:`str`
|
| 600 |
+
Whether the returned splits should have offsets expressed in bytes or chars.
|
| 601 |
+
When slicing an str, we usually want to use chars, which is the default value.
|
| 602 |
+
Now in some cases it might be interesting to get these offsets expressed in bytes,
|
| 603 |
+
so it is possible to change this here.
|
| 604 |
+
choices: "char", "bytes"
|
| 605 |
+
|
| 606 |
+
Returns
|
| 607 |
+
A list of splits
|
| 608 |
+
"""
|
| 609 |
+
pass
|
| 610 |
+
|
| 611 |
+
def normalize(self, func):
|
| 612 |
+
"""
|
| 613 |
+
Normalize each split of the `PreTokenizedString` using the given `func`
|
| 614 |
+
|
| 615 |
+
Args:
|
| 616 |
+
func: Callable[[NormalizedString], None]:
|
| 617 |
+
The function used to normalize each underlying split. This function
|
| 618 |
+
does not need to return anything, just calling the methods on the provided
|
| 619 |
+
NormalizedString allow its modification.
|
| 620 |
+
"""
|
| 621 |
+
pass
|
| 622 |
+
|
| 623 |
+
def split(self, func):
|
| 624 |
+
"""
|
| 625 |
+
Split the PreTokenizedString using the given `func`
|
| 626 |
+
|
| 627 |
+
Args:
|
| 628 |
+
func: Callable[[index, NormalizedString], List[NormalizedString]]:
|
| 629 |
+
The function used to split each underlying split.
|
| 630 |
+
It is expected to return a list of `NormalizedString`, that represent the new
|
| 631 |
+
splits. If the given `NormalizedString` does not need any splitting, we can
|
| 632 |
+
just return it directly.
|
| 633 |
+
In order for the offsets to be tracked accurately, any returned `NormalizedString`
|
| 634 |
+
should come from calling either `.split` or `.slice` on the received one.
|
| 635 |
+
"""
|
| 636 |
+
pass
|
| 637 |
+
|
| 638 |
+
def to_encoding(self, type_id=0, word_idx=None):
|
| 639 |
+
"""
|
| 640 |
+
Return an Encoding generated from this PreTokenizedString
|
| 641 |
+
|
| 642 |
+
Args:
|
| 643 |
+
type_id: int = 0:
|
| 644 |
+
The type_id to be used on the generated Encoding.
|
| 645 |
+
|
| 646 |
+
word_idx: Optional[int] = None:
|
| 647 |
+
An optional word index to be used for each token of this Encoding. If provided,
|
| 648 |
+
all the word indices in the generated Encoding will use this value, instead
|
| 649 |
+
of the one automatically tracked during pre-tokenization.
|
| 650 |
+
|
| 651 |
+
Returns:
|
| 652 |
+
An Encoding
|
| 653 |
+
"""
|
| 654 |
+
pass
|
| 655 |
+
|
| 656 |
+
def tokenize(self, func):
|
| 657 |
+
"""
|
| 658 |
+
Tokenize each split of the `PreTokenizedString` using the given `func`
|
| 659 |
+
|
| 660 |
+
Args:
|
| 661 |
+
func: Callable[[str], List[Token]]:
|
| 662 |
+
The function used to tokenize each underlying split. This function must return
|
| 663 |
+
a list of Token generated from the input str.
|
| 664 |
+
"""
|
| 665 |
+
pass
|
| 666 |
+
|
| 667 |
+
class Regex:
|
| 668 |
+
"""
|
| 669 |
+
Instantiate a new Regex with the given pattern
|
| 670 |
+
"""
|
| 671 |
+
def __init__(self, pattern):
|
| 672 |
+
pass
|
| 673 |
+
|
| 674 |
+
class Token:
|
| 675 |
+
pass
|
| 676 |
+
|
| 677 |
+
class Tokenizer:
|
| 678 |
+
"""
|
| 679 |
+
A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
|
| 680 |
+
and outputs an :class:`~tokenizers.Encoding`.
|
| 681 |
+
|
| 682 |
+
Args:
|
| 683 |
+
model (:class:`~tokenizers.models.Model`):
|
| 684 |
+
The core algorithm that this :obj:`Tokenizer` should be using.
|
| 685 |
+
|
| 686 |
+
"""
|
| 687 |
+
def __init__(self, model):
|
| 688 |
+
pass
|
| 689 |
+
|
| 690 |
+
def add_special_tokens(self, tokens):
|
| 691 |
+
"""
|
| 692 |
+
Add the given special tokens to the Tokenizer.
|
| 693 |
+
|
| 694 |
+
If these tokens are already part of the vocabulary, it just let the Tokenizer know about
|
| 695 |
+
them. If they don't exist, the Tokenizer creates them, giving them a new id.
|
| 696 |
+
|
| 697 |
+
These special tokens will never be processed by the model (ie won't be split into
|
| 698 |
+
multiple tokens), and they can be removed from the output when decoding.
|
| 699 |
+
|
| 700 |
+
Args:
|
| 701 |
+
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
|
| 702 |
+
The list of special tokens we want to add to the vocabulary. Each token can either
|
| 703 |
+
be a string or an instance of :class:`~tokenizers.AddedToken` for more
|
| 704 |
+
customization.
|
| 705 |
+
|
| 706 |
+
Returns:
|
| 707 |
+
:obj:`int`: The number of tokens that were created in the vocabulary
|
| 708 |
+
"""
|
| 709 |
+
pass
|
| 710 |
+
|
| 711 |
+
def add_tokens(self, tokens):
|
| 712 |
+
"""
|
| 713 |
+
Add the given tokens to the vocabulary
|
| 714 |
+
|
| 715 |
+
The given tokens are added only if they don't already exist in the vocabulary.
|
| 716 |
+
Each token then gets a new attributed id.
|
| 717 |
+
|
| 718 |
+
Args:
|
| 719 |
+
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
|
| 720 |
+
The list of tokens we want to add to the vocabulary. Each token can be either a
|
| 721 |
+
string or an instance of :class:`~tokenizers.AddedToken` for more customization.
|
| 722 |
+
|
| 723 |
+
Returns:
|
| 724 |
+
:obj:`int`: The number of tokens that were created in the vocabulary
|
| 725 |
+
"""
|
| 726 |
+
pass
|
| 727 |
+
|
| 728 |
+
def decode(self, ids, skip_special_tokens=True):
|
| 729 |
+
"""
|
| 730 |
+
Decode the given list of ids back to a string
|
| 731 |
+
|
| 732 |
+
This is used to decode anything coming back from a Language Model
|
| 733 |
+
|
| 734 |
+
Args:
|
| 735 |
+
ids (A :obj:`List/Tuple` of :obj:`int`):
|
| 736 |
+
The list of ids that we want to decode
|
| 737 |
+
|
| 738 |
+
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 739 |
+
Whether the special tokens should be removed from the decoded string
|
| 740 |
+
|
| 741 |
+
Returns:
|
| 742 |
+
:obj:`str`: The decoded string
|
| 743 |
+
"""
|
| 744 |
+
pass
|
| 745 |
+
|
| 746 |
+
def decode_batch(self, sequences, skip_special_tokens=True):
|
| 747 |
+
"""
|
| 748 |
+
Decode a batch of ids back to their corresponding string
|
| 749 |
+
|
| 750 |
+
Args:
|
| 751 |
+
sequences (:obj:`List` of :obj:`List[int]`):
|
| 752 |
+
The batch of sequences we want to decode
|
| 753 |
+
|
| 754 |
+
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 755 |
+
Whether the special tokens should be removed from the decoded strings
|
| 756 |
+
|
| 757 |
+
Returns:
|
| 758 |
+
:obj:`List[str]`: A list of decoded strings
|
| 759 |
+
"""
|
| 760 |
+
pass
|
| 761 |
+
|
| 762 |
+
@property
|
| 763 |
+
def decoder(self):
|
| 764 |
+
"""
|
| 765 |
+
The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
|
| 766 |
+
"""
|
| 767 |
+
pass
|
| 768 |
+
|
| 769 |
+
def enable_padding(
|
| 770 |
+
self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
|
| 771 |
+
):
|
| 772 |
+
"""
|
| 773 |
+
Enable the padding
|
| 774 |
+
|
| 775 |
+
Args:
|
| 776 |
+
direction (:obj:`str`, `optional`, defaults to :obj:`right`):
|
| 777 |
+
The direction in which to pad. Can be either ``right`` or ``left``
|
| 778 |
+
|
| 779 |
+
pad_to_multiple_of (:obj:`int`, `optional`):
|
| 780 |
+
If specified, the padding length should always snap to the next multiple of the
|
| 781 |
+
given value. For example if we were going to pad witha length of 250 but
|
| 782 |
+
``pad_to_multiple_of=8`` then we will pad to 256.
|
| 783 |
+
|
| 784 |
+
pad_id (:obj:`int`, defaults to 0):
|
| 785 |
+
The id to be used when padding
|
| 786 |
+
|
| 787 |
+
pad_type_id (:obj:`int`, defaults to 0):
|
| 788 |
+
The type id to be used when padding
|
| 789 |
+
|
| 790 |
+
pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
|
| 791 |
+
The pad token to be used when padding
|
| 792 |
+
|
| 793 |
+
length (:obj:`int`, `optional`):
|
| 794 |
+
If specified, the length at which to pad. If not specified we pad using the size of
|
| 795 |
+
the longest sequence in a batch.
|
| 796 |
+
"""
|
| 797 |
+
pass
|
| 798 |
+
|
| 799 |
+
def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
|
| 800 |
+
"""
|
| 801 |
+
Enable truncation
|
| 802 |
+
|
| 803 |
+
Args:
|
| 804 |
+
max_length (:obj:`int`):
|
| 805 |
+
The max length at which to truncate
|
| 806 |
+
|
| 807 |
+
stride (:obj:`int`, `optional`):
|
| 808 |
+
The length of the previous first sequence to be included in the overflowing
|
| 809 |
+
sequence
|
| 810 |
+
|
| 811 |
+
strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
|
| 812 |
+
The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
|
| 813 |
+
``only_second``.
|
| 814 |
+
|
| 815 |
+
direction (:obj:`str`, defaults to :obj:`right`):
|
| 816 |
+
Truncate direction
|
| 817 |
+
"""
|
| 818 |
+
pass
|
| 819 |
+
|
| 820 |
+
def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
|
| 821 |
+
"""
|
| 822 |
+
Encode the given sequence and pair. This method can process raw text sequences
|
| 823 |
+
as well as already pre-tokenized sequences.
|
| 824 |
+
|
| 825 |
+
Example:
|
| 826 |
+
Here are some examples of the inputs that are accepted::
|
| 827 |
+
|
| 828 |
+
encode("A single sequence")`
|
| 829 |
+
encode("A sequence", "And its pair")`
|
| 830 |
+
encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
|
| 831 |
+
encode(
|
| 832 |
+
[ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
|
| 833 |
+
is_pretokenized=True
|
| 834 |
+
)
|
| 835 |
+
|
| 836 |
+
Args:
|
| 837 |
+
sequence (:obj:`~tokenizers.InputSequence`):
|
| 838 |
+
The main input sequence we want to encode. This sequence can be either raw
|
| 839 |
+
text or pre-tokenized, according to the ``is_pretokenized`` argument:
|
| 840 |
+
|
| 841 |
+
- If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
|
| 842 |
+
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
|
| 843 |
+
|
| 844 |
+
pair (:obj:`~tokenizers.InputSequence`, `optional`):
|
| 845 |
+
An optional input sequence. The expected format is the same that for ``sequence``.
|
| 846 |
+
|
| 847 |
+
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
|
| 848 |
+
Whether the input is already pre-tokenized
|
| 849 |
+
|
| 850 |
+
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 851 |
+
Whether to add the special tokens
|
| 852 |
+
|
| 853 |
+
Returns:
|
| 854 |
+
:class:`~tokenizers.Encoding`: The encoded result
|
| 855 |
+
|
| 856 |
+
"""
|
| 857 |
+
pass
|
| 858 |
+
|
| 859 |
+
def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
|
| 860 |
+
"""
|
| 861 |
+
Encode the given batch of inputs. This method accept both raw text sequences
|
| 862 |
+
as well as already pre-tokenized sequences.
|
| 863 |
+
|
| 864 |
+
Example:
|
| 865 |
+
Here are some examples of the inputs that are accepted::
|
| 866 |
+
|
| 867 |
+
encode_batch([
|
| 868 |
+
"A single sequence",
|
| 869 |
+
("A tuple with a sequence", "And its pair"),
|
| 870 |
+
[ "A", "pre", "tokenized", "sequence" ],
|
| 871 |
+
([ "A", "pre", "tokenized", "sequence" ], "And its pair")
|
| 872 |
+
])
|
| 873 |
+
|
| 874 |
+
Args:
|
| 875 |
+
input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
|
| 876 |
+
A list of single sequences or pair sequences to encode. Each sequence
|
| 877 |
+
can be either raw text or pre-tokenized, according to the ``is_pretokenized``
|
| 878 |
+
argument:
|
| 879 |
+
|
| 880 |
+
- If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
|
| 881 |
+
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
|
| 882 |
+
|
| 883 |
+
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
|
| 884 |
+
Whether the input is already pre-tokenized
|
| 885 |
+
|
| 886 |
+
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 887 |
+
Whether to add the special tokens
|
| 888 |
+
|
| 889 |
+
Returns:
|
| 890 |
+
A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
|
| 891 |
+
|
| 892 |
+
"""
|
| 893 |
+
pass
|
| 894 |
+
|
| 895 |
+
@property
|
| 896 |
+
def encode_special_tokens(self):
|
| 897 |
+
"""
|
| 898 |
+
Modifies the tokenizer in order to use or not the special tokens
|
| 899 |
+
during encoding.
|
| 900 |
+
|
| 901 |
+
Args:
|
| 902 |
+
value (:obj:`bool`):
|
| 903 |
+
Whether to use the special tokens or not
|
| 904 |
+
|
| 905 |
+
"""
|
| 906 |
+
pass
|
| 907 |
+
|
| 908 |
+
@staticmethod
|
| 909 |
+
def from_buffer(buffer):
|
| 910 |
+
"""
|
| 911 |
+
Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
|
| 912 |
+
|
| 913 |
+
Args:
|
| 914 |
+
buffer (:obj:`bytes`):
|
| 915 |
+
A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
|
| 916 |
+
|
| 917 |
+
Returns:
|
| 918 |
+
:class:`~tokenizers.Tokenizer`: The new tokenizer
|
| 919 |
+
"""
|
| 920 |
+
pass
|
| 921 |
+
|
| 922 |
+
@staticmethod
|
| 923 |
+
def from_file(path):
|
| 924 |
+
"""
|
| 925 |
+
Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
|
| 926 |
+
|
| 927 |
+
Args:
|
| 928 |
+
path (:obj:`str`):
|
| 929 |
+
A path to a local JSON file representing a previously serialized
|
| 930 |
+
:class:`~tokenizers.Tokenizer`
|
| 931 |
+
|
| 932 |
+
Returns:
|
| 933 |
+
:class:`~tokenizers.Tokenizer`: The new tokenizer
|
| 934 |
+
"""
|
| 935 |
+
pass
|
| 936 |
+
|
| 937 |
+
@staticmethod
|
| 938 |
+
def from_pretrained(identifier, revision="main", auth_token=None):
|
| 939 |
+
"""
|
| 940 |
+
Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
|
| 941 |
+
Hugging Face Hub.
|
| 942 |
+
|
| 943 |
+
Args:
|
| 944 |
+
identifier (:obj:`str`):
|
| 945 |
+
The identifier of a Model on the Hugging Face Hub, that contains
|
| 946 |
+
a tokenizer.json file
|
| 947 |
+
revision (:obj:`str`, defaults to `main`):
|
| 948 |
+
A branch or commit id
|
| 949 |
+
auth_token (:obj:`str`, `optional`, defaults to `None`):
|
| 950 |
+
An optional auth token used to access private repositories on the
|
| 951 |
+
Hugging Face Hub
|
| 952 |
+
|
| 953 |
+
Returns:
|
| 954 |
+
:class:`~tokenizers.Tokenizer`: The new tokenizer
|
| 955 |
+
"""
|
| 956 |
+
pass
|
| 957 |
+
|
| 958 |
+
@staticmethod
|
| 959 |
+
def from_str(json):
|
| 960 |
+
"""
|
| 961 |
+
Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
|
| 962 |
+
|
| 963 |
+
Args:
|
| 964 |
+
json (:obj:`str`):
|
| 965 |
+
A valid JSON string representing a previously serialized
|
| 966 |
+
:class:`~tokenizers.Tokenizer`
|
| 967 |
+
|
| 968 |
+
Returns:
|
| 969 |
+
:class:`~tokenizers.Tokenizer`: The new tokenizer
|
| 970 |
+
"""
|
| 971 |
+
pass
|
| 972 |
+
|
| 973 |
+
def get_added_tokens_decoder(self):
|
| 974 |
+
"""
|
| 975 |
+
Get the underlying vocabulary
|
| 976 |
+
|
| 977 |
+
Returns:
|
| 978 |
+
:obj:`Dict[int, AddedToken]`: The vocabulary
|
| 979 |
+
"""
|
| 980 |
+
pass
|
| 981 |
+
|
| 982 |
+
def get_vocab(self, with_added_tokens=True):
|
| 983 |
+
"""
|
| 984 |
+
Get the underlying vocabulary
|
| 985 |
+
|
| 986 |
+
Args:
|
| 987 |
+
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 988 |
+
Whether to include the added tokens
|
| 989 |
+
|
| 990 |
+
Returns:
|
| 991 |
+
:obj:`Dict[str, int]`: The vocabulary
|
| 992 |
+
"""
|
| 993 |
+
pass
|
| 994 |
+
|
| 995 |
+
def get_vocab_size(self, with_added_tokens=True):
|
| 996 |
+
"""
|
| 997 |
+
Get the size of the underlying vocabulary
|
| 998 |
+
|
| 999 |
+
Args:
|
| 1000 |
+
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 1001 |
+
Whether to include the added tokens
|
| 1002 |
+
|
| 1003 |
+
Returns:
|
| 1004 |
+
:obj:`int`: The size of the vocabulary
|
| 1005 |
+
"""
|
| 1006 |
+
pass
|
| 1007 |
+
|
| 1008 |
+
def id_to_token(self, id):
|
| 1009 |
+
"""
|
| 1010 |
+
Convert the given id to its corresponding token if it exists
|
| 1011 |
+
|
| 1012 |
+
Args:
|
| 1013 |
+
id (:obj:`int`):
|
| 1014 |
+
The id to convert
|
| 1015 |
+
|
| 1016 |
+
Returns:
|
| 1017 |
+
:obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
|
| 1018 |
+
"""
|
| 1019 |
+
pass
|
| 1020 |
+
|
| 1021 |
+
@property
|
| 1022 |
+
def model(self):
|
| 1023 |
+
"""
|
| 1024 |
+
The :class:`~tokenizers.models.Model` in use by the Tokenizer
|
| 1025 |
+
"""
|
| 1026 |
+
pass
|
| 1027 |
+
|
| 1028 |
+
def no_padding(self):
|
| 1029 |
+
"""
|
| 1030 |
+
Disable padding
|
| 1031 |
+
"""
|
| 1032 |
+
pass
|
| 1033 |
+
|
| 1034 |
+
def no_truncation(self):
|
| 1035 |
+
"""
|
| 1036 |
+
Disable truncation
|
| 1037 |
+
"""
|
| 1038 |
+
pass
|
| 1039 |
+
|
| 1040 |
+
@property
|
| 1041 |
+
def normalizer(self):
|
| 1042 |
+
"""
|
| 1043 |
+
The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
|
| 1044 |
+
"""
|
| 1045 |
+
pass
|
| 1046 |
+
|
| 1047 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 1048 |
+
"""
|
| 1049 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 1050 |
+
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
|
| 1051 |
+
:return:
|
| 1052 |
+
"""
|
| 1053 |
+
pass
|
| 1054 |
+
|
| 1055 |
+
@property
|
| 1056 |
+
def padding(self):
|
| 1057 |
+
"""
|
| 1058 |
+
Get the current padding parameters
|
| 1059 |
+
|
| 1060 |
+
`Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
|
| 1061 |
+
|
| 1062 |
+
Returns:
|
| 1063 |
+
(:obj:`dict`, `optional`):
|
| 1064 |
+
A dict with the current padding parameters if padding is enabled
|
| 1065 |
+
"""
|
| 1066 |
+
pass
|
| 1067 |
+
|
| 1068 |
+
def post_process(self, encoding, pair=None, add_special_tokens=True):
|
| 1069 |
+
"""
|
| 1070 |
+
Apply all the post-processing steps to the given encodings.
|
| 1071 |
+
|
| 1072 |
+
The various steps are:
|
| 1073 |
+
|
| 1074 |
+
1. Truncate according to the set truncation params (provided with
|
| 1075 |
+
:meth:`~tokenizers.Tokenizer.enable_truncation`)
|
| 1076 |
+
2. Apply the :class:`~tokenizers.processors.PostProcessor`
|
| 1077 |
+
3. Pad according to the set padding params (provided with
|
| 1078 |
+
:meth:`~tokenizers.Tokenizer.enable_padding`)
|
| 1079 |
+
|
| 1080 |
+
Args:
|
| 1081 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 1082 |
+
The :class:`~tokenizers.Encoding` corresponding to the main sequence.
|
| 1083 |
+
|
| 1084 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 1085 |
+
An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
|
| 1086 |
+
|
| 1087 |
+
add_special_tokens (:obj:`bool`):
|
| 1088 |
+
Whether to add the special tokens
|
| 1089 |
+
|
| 1090 |
+
Returns:
|
| 1091 |
+
:class:`~tokenizers.Encoding`: The final post-processed encoding
|
| 1092 |
+
"""
|
| 1093 |
+
pass
|
| 1094 |
+
|
| 1095 |
+
@property
|
| 1096 |
+
def post_processor(self):
|
| 1097 |
+
"""
|
| 1098 |
+
The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
|
| 1099 |
+
"""
|
| 1100 |
+
pass
|
| 1101 |
+
|
| 1102 |
+
@property
|
| 1103 |
+
def pre_tokenizer(self):
|
| 1104 |
+
"""
|
| 1105 |
+
The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
|
| 1106 |
+
"""
|
| 1107 |
+
pass
|
| 1108 |
+
|
| 1109 |
+
def save(self, path, pretty=True):
|
| 1110 |
+
"""
|
| 1111 |
+
Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
|
| 1112 |
+
|
| 1113 |
+
Args:
|
| 1114 |
+
path (:obj:`str`):
|
| 1115 |
+
A path to a file in which to save the serialized tokenizer.
|
| 1116 |
+
|
| 1117 |
+
pretty (:obj:`bool`, defaults to :obj:`True`):
|
| 1118 |
+
Whether the JSON file should be pretty formatted.
|
| 1119 |
+
"""
|
| 1120 |
+
pass
|
| 1121 |
+
|
| 1122 |
+
def to_str(self, pretty=False):
|
| 1123 |
+
"""
|
| 1124 |
+
Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
|
| 1125 |
+
|
| 1126 |
+
Args:
|
| 1127 |
+
pretty (:obj:`bool`, defaults to :obj:`False`):
|
| 1128 |
+
Whether the JSON string should be pretty formatted.
|
| 1129 |
+
|
| 1130 |
+
Returns:
|
| 1131 |
+
:obj:`str`: A string representing the serialized Tokenizer
|
| 1132 |
+
"""
|
| 1133 |
+
pass
|
| 1134 |
+
|
| 1135 |
+
def token_to_id(self, token):
|
| 1136 |
+
"""
|
| 1137 |
+
Convert the given token to its corresponding id if it exists
|
| 1138 |
+
|
| 1139 |
+
Args:
|
| 1140 |
+
token (:obj:`str`):
|
| 1141 |
+
The token to convert
|
| 1142 |
+
|
| 1143 |
+
Returns:
|
| 1144 |
+
:obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
|
| 1145 |
+
"""
|
| 1146 |
+
pass
|
| 1147 |
+
|
| 1148 |
+
def train(self, files, trainer=None):
|
| 1149 |
+
"""
|
| 1150 |
+
Train the Tokenizer using the given files.
|
| 1151 |
+
|
| 1152 |
+
Reads the files line by line, while keeping all the whitespace, even new lines.
|
| 1153 |
+
If you want to train from data store in-memory, you can check
|
| 1154 |
+
:meth:`~tokenizers.Tokenizer.train_from_iterator`
|
| 1155 |
+
|
| 1156 |
+
Args:
|
| 1157 |
+
files (:obj:`List[str]`):
|
| 1158 |
+
A list of path to the files that we should use for training
|
| 1159 |
+
|
| 1160 |
+
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
|
| 1161 |
+
An optional trainer that should be used to train our Model
|
| 1162 |
+
"""
|
| 1163 |
+
pass
|
| 1164 |
+
|
| 1165 |
+
def train_from_iterator(self, iterator, trainer=None, length=None):
|
| 1166 |
+
"""
|
| 1167 |
+
Train the Tokenizer using the provided iterator.
|
| 1168 |
+
|
| 1169 |
+
You can provide anything that is a Python Iterator
|
| 1170 |
+
|
| 1171 |
+
* A list of sequences :obj:`List[str]`
|
| 1172 |
+
* A generator that yields :obj:`str` or :obj:`List[str]`
|
| 1173 |
+
* A Numpy array of strings
|
| 1174 |
+
* ...
|
| 1175 |
+
|
| 1176 |
+
Args:
|
| 1177 |
+
iterator (:obj:`Iterator`):
|
| 1178 |
+
Any iterator over strings or list of strings
|
| 1179 |
+
|
| 1180 |
+
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
|
| 1181 |
+
An optional trainer that should be used to train our Model
|
| 1182 |
+
|
| 1183 |
+
length (:obj:`int`, `optional`):
|
| 1184 |
+
The total number of sequences in the iterator. This is used to
|
| 1185 |
+
provide meaningful progress tracking
|
| 1186 |
+
"""
|
| 1187 |
+
pass
|
| 1188 |
+
|
| 1189 |
+
@property
|
| 1190 |
+
def truncation(self):
|
| 1191 |
+
"""
|
| 1192 |
+
Get the currently set truncation parameters
|
| 1193 |
+
|
| 1194 |
+
`Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
|
| 1195 |
+
|
| 1196 |
+
Returns:
|
| 1197 |
+
(:obj:`dict`, `optional`):
|
| 1198 |
+
A dict with the current truncation parameters if truncation is enabled
|
| 1199 |
+
"""
|
| 1200 |
+
pass
|
wemm/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.78 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class Decoder:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all decoders
|
| 5 |
+
|
| 6 |
+
This class is not supposed to be instantiated directly. Instead, any implementation of
|
| 7 |
+
a Decoder will return an instance of this class when instantiated.
|
| 8 |
+
"""
|
| 9 |
+
def decode(self, tokens):
|
| 10 |
+
"""
|
| 11 |
+
Decode the given list of tokens to a final string
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
tokens (:obj:`List[str]`):
|
| 15 |
+
The list of tokens to decode
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
:obj:`str`: The decoded string
|
| 19 |
+
"""
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
class BPEDecoder(Decoder):
|
| 23 |
+
"""
|
| 24 |
+
BPEDecoder Decoder
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`):
|
| 28 |
+
The suffix that was used to caracterize an end-of-word. This suffix will
|
| 29 |
+
be replaced by whitespaces during the decoding
|
| 30 |
+
"""
|
| 31 |
+
def __init__(self, suffix="</w>"):
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
def decode(self, tokens):
|
| 35 |
+
"""
|
| 36 |
+
Decode the given list of tokens to a final string
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
tokens (:obj:`List[str]`):
|
| 40 |
+
The list of tokens to decode
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
:obj:`str`: The decoded string
|
| 44 |
+
"""
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
class ByteFallback(Decoder):
|
| 48 |
+
"""
|
| 49 |
+
ByteFallback Decoder
|
| 50 |
+
ByteFallback is a simple trick which converts tokens looking like `<0x61>`
|
| 51 |
+
to pure bytes, and attempts to make them into a string. If the tokens
|
| 52 |
+
cannot be decoded you will get � instead for each inconvertable byte token
|
| 53 |
+
|
| 54 |
+
"""
|
| 55 |
+
def __init__(self):
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
def decode(self, tokens):
|
| 59 |
+
"""
|
| 60 |
+
Decode the given list of tokens to a final string
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
tokens (:obj:`List[str]`):
|
| 64 |
+
The list of tokens to decode
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
:obj:`str`: The decoded string
|
| 68 |
+
"""
|
| 69 |
+
pass
|
| 70 |
+
|
| 71 |
+
class ByteLevel(Decoder):
|
| 72 |
+
"""
|
| 73 |
+
ByteLevel Decoder
|
| 74 |
+
|
| 75 |
+
This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel`
|
| 76 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`.
|
| 77 |
+
"""
|
| 78 |
+
def __init__(self):
|
| 79 |
+
pass
|
| 80 |
+
|
| 81 |
+
def decode(self, tokens):
|
| 82 |
+
"""
|
| 83 |
+
Decode the given list of tokens to a final string
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
tokens (:obj:`List[str]`):
|
| 87 |
+
The list of tokens to decode
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
:obj:`str`: The decoded string
|
| 91 |
+
"""
|
| 92 |
+
pass
|
| 93 |
+
|
| 94 |
+
class CTC(Decoder):
|
| 95 |
+
"""
|
| 96 |
+
CTC Decoder
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`):
|
| 100 |
+
The pad token used by CTC to delimit a new token.
|
| 101 |
+
word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`):
|
| 102 |
+
The word delimiter token. It will be replaced by a <space>
|
| 103 |
+
cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 104 |
+
Whether to cleanup some tokenization artifacts.
|
| 105 |
+
Mainly spaces before punctuation, and some abbreviated english forms.
|
| 106 |
+
"""
|
| 107 |
+
def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True):
|
| 108 |
+
pass
|
| 109 |
+
|
| 110 |
+
def decode(self, tokens):
|
| 111 |
+
"""
|
| 112 |
+
Decode the given list of tokens to a final string
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
tokens (:obj:`List[str]`):
|
| 116 |
+
The list of tokens to decode
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
:obj:`str`: The decoded string
|
| 120 |
+
"""
|
| 121 |
+
pass
|
| 122 |
+
|
| 123 |
+
class Fuse(Decoder):
|
| 124 |
+
"""
|
| 125 |
+
Fuse Decoder
|
| 126 |
+
Fuse simply fuses every token into a single string.
|
| 127 |
+
This is the last step of decoding, this decoder exists only if
|
| 128 |
+
there is need to add other decoders *after* the fusion
|
| 129 |
+
"""
|
| 130 |
+
def __init__(self):
|
| 131 |
+
pass
|
| 132 |
+
|
| 133 |
+
def decode(self, tokens):
|
| 134 |
+
"""
|
| 135 |
+
Decode the given list of tokens to a final string
|
| 136 |
+
|
| 137 |
+
Args:
|
| 138 |
+
tokens (:obj:`List[str]`):
|
| 139 |
+
The list of tokens to decode
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
:obj:`str`: The decoded string
|
| 143 |
+
"""
|
| 144 |
+
pass
|
| 145 |
+
|
| 146 |
+
class Metaspace(Decoder):
|
| 147 |
+
"""
|
| 148 |
+
Metaspace Decoder
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
|
| 152 |
+
The replacement character. Must be exactly one character. By default we
|
| 153 |
+
use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
|
| 154 |
+
|
| 155 |
+
prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
|
| 156 |
+
Whether to add a space to the first word if there isn't already one. This
|
| 157 |
+
lets us treat `hello` exactly like `say hello`.
|
| 158 |
+
Choices: "always", "never", "first". First means the space is only added on the first
|
| 159 |
+
token (relevant when special tokens are used or other pre_tokenizer are used).
|
| 160 |
+
"""
|
| 161 |
+
def __init__(self, replacement="▁", prepend_scheme="always", split=True):
|
| 162 |
+
pass
|
| 163 |
+
|
| 164 |
+
def decode(self, tokens):
|
| 165 |
+
"""
|
| 166 |
+
Decode the given list of tokens to a final string
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
tokens (:obj:`List[str]`):
|
| 170 |
+
The list of tokens to decode
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
:obj:`str`: The decoded string
|
| 174 |
+
"""
|
| 175 |
+
pass
|
| 176 |
+
|
| 177 |
+
class Replace(Decoder):
|
| 178 |
+
"""
|
| 179 |
+
Replace Decoder
|
| 180 |
+
|
| 181 |
+
This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace`
|
| 182 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`.
|
| 183 |
+
"""
|
| 184 |
+
def __init__(self, pattern, content):
|
| 185 |
+
pass
|
| 186 |
+
|
| 187 |
+
def decode(self, tokens):
|
| 188 |
+
"""
|
| 189 |
+
Decode the given list of tokens to a final string
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
tokens (:obj:`List[str]`):
|
| 193 |
+
The list of tokens to decode
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
:obj:`str`: The decoded string
|
| 197 |
+
"""
|
| 198 |
+
pass
|
| 199 |
+
|
| 200 |
+
class Sequence(Decoder):
|
| 201 |
+
"""
|
| 202 |
+
Sequence Decoder
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
decoders (:obj:`List[Decoder]`)
|
| 206 |
+
The decoders that need to be chained
|
| 207 |
+
"""
|
| 208 |
+
def __init__(self, decoders):
|
| 209 |
+
pass
|
| 210 |
+
|
| 211 |
+
def decode(self, tokens):
|
| 212 |
+
"""
|
| 213 |
+
Decode the given list of tokens to a final string
|
| 214 |
+
|
| 215 |
+
Args:
|
| 216 |
+
tokens (:obj:`List[str]`):
|
| 217 |
+
The list of tokens to decode
|
| 218 |
+
|
| 219 |
+
Returns:
|
| 220 |
+
:obj:`str`: The decoded string
|
| 221 |
+
"""
|
| 222 |
+
pass
|
| 223 |
+
|
| 224 |
+
class Strip(Decoder):
|
| 225 |
+
"""
|
| 226 |
+
Strip normalizer
|
| 227 |
+
Strips n left characters of each token, or n right characters of each token
|
| 228 |
+
"""
|
| 229 |
+
def __init__(self, content, left=0, right=0):
|
| 230 |
+
pass
|
| 231 |
+
|
| 232 |
+
def decode(self, tokens):
|
| 233 |
+
"""
|
| 234 |
+
Decode the given list of tokens to a final string
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
tokens (:obj:`List[str]`):
|
| 238 |
+
The list of tokens to decode
|
| 239 |
+
|
| 240 |
+
Returns:
|
| 241 |
+
:obj:`str`: The decoded string
|
| 242 |
+
"""
|
| 243 |
+
pass
|
| 244 |
+
|
| 245 |
+
class WordPiece(Decoder):
|
| 246 |
+
"""
|
| 247 |
+
WordPiece Decoder
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
prefix (:obj:`str`, `optional`, defaults to :obj:`##`):
|
| 251 |
+
The prefix to use for subwords that are not a beginning-of-word
|
| 252 |
+
|
| 253 |
+
cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 254 |
+
Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation,
|
| 255 |
+
and some abbreviated english forms.
|
| 256 |
+
"""
|
| 257 |
+
def __init__(self, prefix="##", cleanup=True):
|
| 258 |
+
pass
|
| 259 |
+
|
| 260 |
+
def decode(self, tokens):
|
| 261 |
+
"""
|
| 262 |
+
Decode the given list of tokens to a final string
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
tokens (:obj:`List[str]`):
|
| 266 |
+
The list of tokens to decode
|
| 267 |
+
|
| 268 |
+
Returns:
|
| 269 |
+
:obj:`str`: The decoded string
|
| 270 |
+
"""
|
| 271 |
+
pass
|
wemm/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (395 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc
ADDED
|
Binary file (15.5 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc
ADDED
|
Binary file (3.38 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc
ADDED
|
Binary file (4.24 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Iterator, List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
|
| 4 |
+
from tokenizers.models import BPE
|
| 5 |
+
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
|
| 6 |
+
|
| 7 |
+
from .base_tokenizer import BaseTokenizer
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ByteLevelBPETokenizer(BaseTokenizer):
|
| 11 |
+
"""ByteLevelBPETokenizer
|
| 12 |
+
|
| 13 |
+
Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
vocab: Optional[Union[str, Dict[str, int]]] = None,
|
| 19 |
+
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
|
| 20 |
+
add_prefix_space: bool = False,
|
| 21 |
+
lowercase: bool = False,
|
| 22 |
+
dropout: Optional[float] = None,
|
| 23 |
+
unicode_normalizer: Optional[str] = None,
|
| 24 |
+
continuing_subword_prefix: Optional[str] = None,
|
| 25 |
+
end_of_word_suffix: Optional[str] = None,
|
| 26 |
+
trim_offsets: bool = False,
|
| 27 |
+
):
|
| 28 |
+
if vocab is not None and merges is not None:
|
| 29 |
+
tokenizer = Tokenizer(
|
| 30 |
+
BPE(
|
| 31 |
+
vocab,
|
| 32 |
+
merges,
|
| 33 |
+
dropout=dropout,
|
| 34 |
+
continuing_subword_prefix=continuing_subword_prefix or "",
|
| 35 |
+
end_of_word_suffix=end_of_word_suffix or "",
|
| 36 |
+
)
|
| 37 |
+
)
|
| 38 |
+
else:
|
| 39 |
+
tokenizer = Tokenizer(BPE())
|
| 40 |
+
|
| 41 |
+
# Check for Unicode normalization first (before everything else)
|
| 42 |
+
normalizers = []
|
| 43 |
+
|
| 44 |
+
if unicode_normalizer:
|
| 45 |
+
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
|
| 46 |
+
|
| 47 |
+
if lowercase:
|
| 48 |
+
normalizers += [Lowercase()]
|
| 49 |
+
|
| 50 |
+
# Create the normalizer structure
|
| 51 |
+
if len(normalizers) > 0:
|
| 52 |
+
if len(normalizers) > 1:
|
| 53 |
+
tokenizer.normalizer = Sequence(normalizers)
|
| 54 |
+
else:
|
| 55 |
+
tokenizer.normalizer = normalizers[0]
|
| 56 |
+
|
| 57 |
+
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
|
| 58 |
+
tokenizer.decoder = decoders.ByteLevel()
|
| 59 |
+
tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
|
| 60 |
+
|
| 61 |
+
parameters = {
|
| 62 |
+
"model": "ByteLevelBPE",
|
| 63 |
+
"add_prefix_space": add_prefix_space,
|
| 64 |
+
"lowercase": lowercase,
|
| 65 |
+
"dropout": dropout,
|
| 66 |
+
"unicode_normalizer": unicode_normalizer,
|
| 67 |
+
"continuing_subword_prefix": continuing_subword_prefix,
|
| 68 |
+
"end_of_word_suffix": end_of_word_suffix,
|
| 69 |
+
"trim_offsets": trim_offsets,
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
super().__init__(tokenizer, parameters)
|
| 73 |
+
|
| 74 |
+
@staticmethod
|
| 75 |
+
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
|
| 76 |
+
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
|
| 77 |
+
return ByteLevelBPETokenizer(vocab, merges, **kwargs)
|
| 78 |
+
|
| 79 |
+
def train(
|
| 80 |
+
self,
|
| 81 |
+
files: Union[str, List[str]],
|
| 82 |
+
vocab_size: int = 30000,
|
| 83 |
+
min_frequency: int = 2,
|
| 84 |
+
show_progress: bool = True,
|
| 85 |
+
special_tokens: List[Union[str, AddedToken]] = [],
|
| 86 |
+
):
|
| 87 |
+
"""Train the model using the given files"""
|
| 88 |
+
|
| 89 |
+
trainer = trainers.BpeTrainer(
|
| 90 |
+
vocab_size=vocab_size,
|
| 91 |
+
min_frequency=min_frequency,
|
| 92 |
+
show_progress=show_progress,
|
| 93 |
+
special_tokens=special_tokens,
|
| 94 |
+
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
|
| 95 |
+
)
|
| 96 |
+
if isinstance(files, str):
|
| 97 |
+
files = [files]
|
| 98 |
+
self._tokenizer.train(files, trainer=trainer)
|
| 99 |
+
|
| 100 |
+
def train_from_iterator(
|
| 101 |
+
self,
|
| 102 |
+
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
|
| 103 |
+
vocab_size: int = 30000,
|
| 104 |
+
min_frequency: int = 2,
|
| 105 |
+
show_progress: bool = True,
|
| 106 |
+
special_tokens: List[Union[str, AddedToken]] = [],
|
| 107 |
+
length: Optional[int] = None,
|
| 108 |
+
):
|
| 109 |
+
"""Train the model using the given iterator"""
|
| 110 |
+
|
| 111 |
+
trainer = trainers.BpeTrainer(
|
| 112 |
+
vocab_size=vocab_size,
|
| 113 |
+
min_frequency=min_frequency,
|
| 114 |
+
show_progress=show_progress,
|
| 115 |
+
special_tokens=special_tokens,
|
| 116 |
+
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
|
| 117 |
+
)
|
| 118 |
+
self._tokenizer.train_from_iterator(
|
| 119 |
+
iterator,
|
| 120 |
+
trainer=trainer,
|
| 121 |
+
length=length,
|
| 122 |
+
)
|
wemm/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Iterator, List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
|
| 4 |
+
from ..models import BPE
|
| 5 |
+
from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
|
| 6 |
+
from .base_tokenizer import BaseTokenizer
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class CharBPETokenizer(BaseTokenizer):
|
| 10 |
+
"""Original BPE Tokenizer
|
| 11 |
+
|
| 12 |
+
Represents the BPE algorithm, as introduced by Rico Sennrich
|
| 13 |
+
(https://arxiv.org/abs/1508.07909)
|
| 14 |
+
|
| 15 |
+
The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original
|
| 16 |
+
Sennrich subword-nmt implementation by the following options that you can deactivate:
|
| 17 |
+
- adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by:
|
| 18 |
+
* removing any control characters and replacing all whitespaces by the classic one.
|
| 19 |
+
* handle chinese chars by putting spaces around them.
|
| 20 |
+
* strip all accents.
|
| 21 |
+
- spitting on punctuation in addition to whitespaces (deactivate it with
|
| 22 |
+
`split_on_whitespace_only=True`)
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self,
|
| 27 |
+
vocab: Optional[Union[str, Dict[str, int]]] = None,
|
| 28 |
+
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
|
| 29 |
+
unk_token: Union[str, AddedToken] = "<unk>",
|
| 30 |
+
suffix: str = "</w>",
|
| 31 |
+
dropout: Optional[float] = None,
|
| 32 |
+
lowercase: bool = False,
|
| 33 |
+
unicode_normalizer: Optional[str] = None,
|
| 34 |
+
bert_normalizer: bool = True,
|
| 35 |
+
split_on_whitespace_only: bool = False,
|
| 36 |
+
):
|
| 37 |
+
if vocab is not None and merges is not None:
|
| 38 |
+
tokenizer = Tokenizer(
|
| 39 |
+
BPE(
|
| 40 |
+
vocab,
|
| 41 |
+
merges,
|
| 42 |
+
dropout=dropout,
|
| 43 |
+
unk_token=str(unk_token),
|
| 44 |
+
end_of_word_suffix=suffix,
|
| 45 |
+
)
|
| 46 |
+
)
|
| 47 |
+
else:
|
| 48 |
+
tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix))
|
| 49 |
+
|
| 50 |
+
if tokenizer.token_to_id(str(unk_token)) is not None:
|
| 51 |
+
tokenizer.add_special_tokens([str(unk_token)])
|
| 52 |
+
|
| 53 |
+
# Check for Unicode normalization first (before everything else)
|
| 54 |
+
normalizers = []
|
| 55 |
+
|
| 56 |
+
if unicode_normalizer:
|
| 57 |
+
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
|
| 58 |
+
|
| 59 |
+
if bert_normalizer:
|
| 60 |
+
normalizers += [BertNormalizer(lowercase=False)]
|
| 61 |
+
|
| 62 |
+
if lowercase:
|
| 63 |
+
normalizers += [Lowercase()]
|
| 64 |
+
|
| 65 |
+
# Create the normalizer structure
|
| 66 |
+
if len(normalizers) > 0:
|
| 67 |
+
if len(normalizers) > 1:
|
| 68 |
+
tokenizer.normalizer = Sequence(normalizers)
|
| 69 |
+
else:
|
| 70 |
+
tokenizer.normalizer = normalizers[0]
|
| 71 |
+
|
| 72 |
+
if split_on_whitespace_only:
|
| 73 |
+
tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
|
| 74 |
+
else:
|
| 75 |
+
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
|
| 76 |
+
|
| 77 |
+
tokenizer.decoder = decoders.BPEDecoder(suffix=suffix)
|
| 78 |
+
|
| 79 |
+
parameters = {
|
| 80 |
+
"model": "BPE",
|
| 81 |
+
"unk_token": unk_token,
|
| 82 |
+
"suffix": suffix,
|
| 83 |
+
"dropout": dropout,
|
| 84 |
+
"lowercase": lowercase,
|
| 85 |
+
"unicode_normalizer": unicode_normalizer,
|
| 86 |
+
"bert_normalizer": bert_normalizer,
|
| 87 |
+
"split_on_whitespace_only": split_on_whitespace_only,
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
super().__init__(tokenizer, parameters)
|
| 91 |
+
|
| 92 |
+
@staticmethod
|
| 93 |
+
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
|
| 94 |
+
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
|
| 95 |
+
return CharBPETokenizer(vocab, merges, **kwargs)
|
| 96 |
+
|
| 97 |
+
def train(
|
| 98 |
+
self,
|
| 99 |
+
files: Union[str, List[str]],
|
| 100 |
+
vocab_size: int = 30000,
|
| 101 |
+
min_frequency: int = 2,
|
| 102 |
+
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
|
| 103 |
+
limit_alphabet: int = 1000,
|
| 104 |
+
initial_alphabet: List[str] = [],
|
| 105 |
+
suffix: Optional[str] = "</w>",
|
| 106 |
+
show_progress: bool = True,
|
| 107 |
+
):
|
| 108 |
+
"""Train the model using the given files"""
|
| 109 |
+
|
| 110 |
+
trainer = trainers.BpeTrainer(
|
| 111 |
+
vocab_size=vocab_size,
|
| 112 |
+
min_frequency=min_frequency,
|
| 113 |
+
special_tokens=special_tokens,
|
| 114 |
+
limit_alphabet=limit_alphabet,
|
| 115 |
+
initial_alphabet=initial_alphabet,
|
| 116 |
+
end_of_word_suffix=suffix,
|
| 117 |
+
show_progress=show_progress,
|
| 118 |
+
)
|
| 119 |
+
if isinstance(files, str):
|
| 120 |
+
files = [files]
|
| 121 |
+
self._tokenizer.train(files, trainer=trainer)
|
| 122 |
+
|
| 123 |
+
def train_from_iterator(
|
| 124 |
+
self,
|
| 125 |
+
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
|
| 126 |
+
vocab_size: int = 30000,
|
| 127 |
+
min_frequency: int = 2,
|
| 128 |
+
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
|
| 129 |
+
limit_alphabet: int = 1000,
|
| 130 |
+
initial_alphabet: List[str] = [],
|
| 131 |
+
suffix: Optional[str] = "</w>",
|
| 132 |
+
show_progress: bool = True,
|
| 133 |
+
length: Optional[int] = None,
|
| 134 |
+
):
|
| 135 |
+
"""Train the model using the given iterator"""
|
| 136 |
+
|
| 137 |
+
trainer = trainers.BpeTrainer(
|
| 138 |
+
vocab_size=vocab_size,
|
| 139 |
+
min_frequency=min_frequency,
|
| 140 |
+
special_tokens=special_tokens,
|
| 141 |
+
limit_alphabet=limit_alphabet,
|
| 142 |
+
initial_alphabet=initial_alphabet,
|
| 143 |
+
end_of_word_suffix=suffix,
|
| 144 |
+
show_progress=show_progress,
|
| 145 |
+
)
|
| 146 |
+
self._tokenizer.train_from_iterator(
|
| 147 |
+
iterator,
|
| 148 |
+
trainer=trainer,
|
| 149 |
+
length=length,
|
| 150 |
+
)
|
wemm/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from typing import Iterator, List, Optional, Union, Tuple
|
| 4 |
+
|
| 5 |
+
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
|
| 6 |
+
from tokenizers.models import Unigram
|
| 7 |
+
|
| 8 |
+
from .base_tokenizer import BaseTokenizer
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SentencePieceUnigramTokenizer(BaseTokenizer):
|
| 12 |
+
"""SentencePiece Unigram Tokenizer
|
| 13 |
+
|
| 14 |
+
Represents the Unigram algorithm, with the pretokenization used by SentencePiece
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
vocab: Optional[List[Tuple[str, float]]] = None,
|
| 20 |
+
replacement: str = "▁",
|
| 21 |
+
add_prefix_space: bool = True,
|
| 22 |
+
):
|
| 23 |
+
if vocab is not None:
|
| 24 |
+
# Let Unigram(..) fail if only one of them is None
|
| 25 |
+
tokenizer = Tokenizer(Unigram(vocab))
|
| 26 |
+
else:
|
| 27 |
+
tokenizer = Tokenizer(Unigram())
|
| 28 |
+
|
| 29 |
+
tokenizer.normalizer = normalizers.Sequence(
|
| 30 |
+
[normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
|
| 31 |
+
)
|
| 32 |
+
prepend_scheme = "always" if add_prefix_space else "never"
|
| 33 |
+
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
| 34 |
+
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
| 35 |
+
|
| 36 |
+
parameters = {
|
| 37 |
+
"model": "SentencePieceUnigram",
|
| 38 |
+
"replacement": replacement,
|
| 39 |
+
"add_prefix_space": add_prefix_space,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
super().__init__(tokenizer, parameters)
|
| 43 |
+
|
| 44 |
+
def train(
|
| 45 |
+
self,
|
| 46 |
+
files: Union[str, List[str]],
|
| 47 |
+
vocab_size: int = 8000,
|
| 48 |
+
show_progress: bool = True,
|
| 49 |
+
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
|
| 50 |
+
initial_alphabet: Optional[List[str]] = None,
|
| 51 |
+
unk_token: Optional[str] = None,
|
| 52 |
+
):
|
| 53 |
+
"""
|
| 54 |
+
Train the model using the given files
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
files (:obj:`List[str]`):
|
| 58 |
+
A list of path to the files that we should use for training
|
| 59 |
+
vocab_size (:obj:`int`):
|
| 60 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 61 |
+
show_progress (:obj:`bool`):
|
| 62 |
+
Whether to show progress bars while training.
|
| 63 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
|
| 64 |
+
A list of special tokens the model should know of.
|
| 65 |
+
initial_alphabet (:obj:`List[str]`, `optional`):
|
| 66 |
+
A list of characters to include in the initial alphabet, even
|
| 67 |
+
if not seen in the training dataset.
|
| 68 |
+
If the strings contain more than one character, only the first one
|
| 69 |
+
is kept.
|
| 70 |
+
unk_token (:obj:`str`, `optional`):
|
| 71 |
+
The unknown token to be used by the model.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
if special_tokens is None:
|
| 75 |
+
special_tokens = []
|
| 76 |
+
|
| 77 |
+
if initial_alphabet is None:
|
| 78 |
+
initial_alphabet = []
|
| 79 |
+
|
| 80 |
+
trainer = trainers.UnigramTrainer(
|
| 81 |
+
vocab_size=vocab_size,
|
| 82 |
+
special_tokens=special_tokens,
|
| 83 |
+
show_progress=show_progress,
|
| 84 |
+
initial_alphabet=initial_alphabet,
|
| 85 |
+
unk_token=unk_token,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
if isinstance(files, str):
|
| 89 |
+
files = [files]
|
| 90 |
+
self._tokenizer.train(files, trainer=trainer)
|
| 91 |
+
|
| 92 |
+
def train_from_iterator(
|
| 93 |
+
self,
|
| 94 |
+
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
|
| 95 |
+
vocab_size: int = 8000,
|
| 96 |
+
show_progress: bool = True,
|
| 97 |
+
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
|
| 98 |
+
initial_alphabet: Optional[List[str]] = None,
|
| 99 |
+
unk_token: Optional[str] = None,
|
| 100 |
+
length: Optional[int] = None,
|
| 101 |
+
):
|
| 102 |
+
"""
|
| 103 |
+
Train the model using the given iterator
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
|
| 107 |
+
Any iterator over strings or list of strings
|
| 108 |
+
vocab_size (:obj:`int`):
|
| 109 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 110 |
+
show_progress (:obj:`bool`):
|
| 111 |
+
Whether to show progress bars while training.
|
| 112 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
|
| 113 |
+
A list of special tokens the model should know of.
|
| 114 |
+
initial_alphabet (:obj:`List[str]`, `optional`):
|
| 115 |
+
A list of characters to include in the initial alphabet, even
|
| 116 |
+
if not seen in the training dataset.
|
| 117 |
+
If the strings contain more than one character, only the first one
|
| 118 |
+
is kept.
|
| 119 |
+
unk_token (:obj:`str`, `optional`):
|
| 120 |
+
The unknown token to be used by the model.
|
| 121 |
+
length (:obj:`int`, `optional`):
|
| 122 |
+
The total number of sequences in the iterator. This is used to
|
| 123 |
+
provide meaningful progress tracking
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
if special_tokens is None:
|
| 127 |
+
special_tokens = []
|
| 128 |
+
|
| 129 |
+
if initial_alphabet is None:
|
| 130 |
+
initial_alphabet = []
|
| 131 |
+
|
| 132 |
+
trainer = trainers.UnigramTrainer(
|
| 133 |
+
vocab_size=vocab_size,
|
| 134 |
+
special_tokens=special_tokens,
|
| 135 |
+
show_progress=show_progress,
|
| 136 |
+
initial_alphabet=initial_alphabet,
|
| 137 |
+
unk_token=unk_token,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
self._tokenizer.train_from_iterator(
|
| 141 |
+
iterator,
|
| 142 |
+
trainer=trainer,
|
| 143 |
+
length=length,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
@staticmethod
|
| 147 |
+
def from_spm(filename: str):
|
| 148 |
+
try:
|
| 149 |
+
import sys
|
| 150 |
+
|
| 151 |
+
sys.path.append(".")
|
| 152 |
+
|
| 153 |
+
import sentencepiece_model_pb2 as model
|
| 154 |
+
except Exception:
|
| 155 |
+
raise Exception(
|
| 156 |
+
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
m = model.ModelProto()
|
| 160 |
+
m.ParseFromString(open(filename, "rb").read())
|
| 161 |
+
|
| 162 |
+
precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
|
| 163 |
+
vocab = [(piece.piece, piece.score) for piece in m.pieces]
|
| 164 |
+
unk_id = m.trainer_spec.unk_id
|
| 165 |
+
model_type = m.trainer_spec.model_type
|
| 166 |
+
byte_fallback = m.trainer_spec.byte_fallback
|
| 167 |
+
if model_type != 1:
|
| 168 |
+
raise Exception(
|
| 169 |
+
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
replacement = "▁"
|
| 173 |
+
add_prefix_space = True
|
| 174 |
+
|
| 175 |
+
tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
|
| 176 |
+
|
| 177 |
+
if precompiled_charsmap:
|
| 178 |
+
tokenizer.normalizer = normalizers.Sequence(
|
| 179 |
+
[
|
| 180 |
+
normalizers.Precompiled(precompiled_charsmap),
|
| 181 |
+
normalizers.Replace(Regex(" {2,}"), " "),
|
| 182 |
+
]
|
| 183 |
+
)
|
| 184 |
+
else:
|
| 185 |
+
tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
|
| 186 |
+
prepend_scheme = "always" if add_prefix_space else "never"
|
| 187 |
+
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
| 188 |
+
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
| 189 |
+
|
| 190 |
+
parameters = {
|
| 191 |
+
"model": "SentencePieceUnigram",
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
|
| 195 |
+
BaseTokenizer.__init__(obj, tokenizer, parameters)
|
| 196 |
+
return obj
|
wemm/lib/python3.10/site-packages/tokenizers/models/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
from .. import models
|
| 3 |
+
|
| 4 |
+
Model = models.Model
|
| 5 |
+
BPE = models.BPE
|
| 6 |
+
Unigram = models.Unigram
|
| 7 |
+
WordLevel = models.WordLevel
|
| 8 |
+
WordPiece = models.WordPiece
|
wemm/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi
ADDED
|
@@ -0,0 +1,595 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class Normalizer:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all normalizers
|
| 5 |
+
|
| 6 |
+
This class is not supposed to be instantiated directly. Instead, any implementation of a
|
| 7 |
+
Normalizer will return an instance of this class when instantiated.
|
| 8 |
+
"""
|
| 9 |
+
def normalize(self, normalized):
|
| 10 |
+
"""
|
| 11 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 12 |
+
|
| 13 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 14 |
+
keep track of the alignment information. If you just want to see the result
|
| 15 |
+
of the normalization on a raw string, you can use
|
| 16 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 20 |
+
The normalized string on which to apply this
|
| 21 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 22 |
+
"""
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def normalize_str(self, sequence):
|
| 26 |
+
"""
|
| 27 |
+
Normalize the given string
|
| 28 |
+
|
| 29 |
+
This method provides a way to visualize the effect of a
|
| 30 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 31 |
+
information. If you need to get/convert offsets, you can use
|
| 32 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
sequence (:obj:`str`):
|
| 36 |
+
A string to normalize
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
:obj:`str`: A string after normalization
|
| 40 |
+
"""
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
class BertNormalizer(Normalizer):
|
| 44 |
+
"""
|
| 45 |
+
BertNormalizer
|
| 46 |
+
|
| 47 |
+
Takes care of normalizing raw text before giving it to a Bert model.
|
| 48 |
+
This includes cleaning the text, handling accents, chinese chars and lowercasing
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 52 |
+
Whether to clean the text, by removing any control characters
|
| 53 |
+
and replacing all whitespaces by the classic one.
|
| 54 |
+
|
| 55 |
+
handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 56 |
+
Whether to handle chinese chars by putting spaces around them.
|
| 57 |
+
|
| 58 |
+
strip_accents (:obj:`bool`, `optional`):
|
| 59 |
+
Whether to strip all accents. If this option is not specified (ie == None),
|
| 60 |
+
then it will be determined by the value for `lowercase` (as in the original Bert).
|
| 61 |
+
|
| 62 |
+
lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 63 |
+
Whether to lowercase.
|
| 64 |
+
"""
|
| 65 |
+
def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True):
|
| 66 |
+
pass
|
| 67 |
+
|
| 68 |
+
def normalize(self, normalized):
|
| 69 |
+
"""
|
| 70 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 71 |
+
|
| 72 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 73 |
+
keep track of the alignment information. If you just want to see the result
|
| 74 |
+
of the normalization on a raw string, you can use
|
| 75 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 79 |
+
The normalized string on which to apply this
|
| 80 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 81 |
+
"""
|
| 82 |
+
pass
|
| 83 |
+
|
| 84 |
+
def normalize_str(self, sequence):
|
| 85 |
+
"""
|
| 86 |
+
Normalize the given string
|
| 87 |
+
|
| 88 |
+
This method provides a way to visualize the effect of a
|
| 89 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 90 |
+
information. If you need to get/convert offsets, you can use
|
| 91 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
sequence (:obj:`str`):
|
| 95 |
+
A string to normalize
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
:obj:`str`: A string after normalization
|
| 99 |
+
"""
|
| 100 |
+
pass
|
| 101 |
+
|
| 102 |
+
class Lowercase(Normalizer):
|
| 103 |
+
"""
|
| 104 |
+
Lowercase Normalizer
|
| 105 |
+
"""
|
| 106 |
+
def __init__(self):
|
| 107 |
+
pass
|
| 108 |
+
|
| 109 |
+
def normalize(self, normalized):
|
| 110 |
+
"""
|
| 111 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 112 |
+
|
| 113 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 114 |
+
keep track of the alignment information. If you just want to see the result
|
| 115 |
+
of the normalization on a raw string, you can use
|
| 116 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 120 |
+
The normalized string on which to apply this
|
| 121 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 122 |
+
"""
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
def normalize_str(self, sequence):
|
| 126 |
+
"""
|
| 127 |
+
Normalize the given string
|
| 128 |
+
|
| 129 |
+
This method provides a way to visualize the effect of a
|
| 130 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 131 |
+
information. If you need to get/convert offsets, you can use
|
| 132 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
sequence (:obj:`str`):
|
| 136 |
+
A string to normalize
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
:obj:`str`: A string after normalization
|
| 140 |
+
"""
|
| 141 |
+
pass
|
| 142 |
+
|
| 143 |
+
class NFC(Normalizer):
|
| 144 |
+
"""
|
| 145 |
+
NFC Unicode Normalizer
|
| 146 |
+
"""
|
| 147 |
+
def __init__(self):
|
| 148 |
+
pass
|
| 149 |
+
|
| 150 |
+
def normalize(self, normalized):
|
| 151 |
+
"""
|
| 152 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 153 |
+
|
| 154 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 155 |
+
keep track of the alignment information. If you just want to see the result
|
| 156 |
+
of the normalization on a raw string, you can use
|
| 157 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 161 |
+
The normalized string on which to apply this
|
| 162 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 163 |
+
"""
|
| 164 |
+
pass
|
| 165 |
+
|
| 166 |
+
def normalize_str(self, sequence):
|
| 167 |
+
"""
|
| 168 |
+
Normalize the given string
|
| 169 |
+
|
| 170 |
+
This method provides a way to visualize the effect of a
|
| 171 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 172 |
+
information. If you need to get/convert offsets, you can use
|
| 173 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
sequence (:obj:`str`):
|
| 177 |
+
A string to normalize
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
:obj:`str`: A string after normalization
|
| 181 |
+
"""
|
| 182 |
+
pass
|
| 183 |
+
|
| 184 |
+
class NFD(Normalizer):
|
| 185 |
+
"""
|
| 186 |
+
NFD Unicode Normalizer
|
| 187 |
+
"""
|
| 188 |
+
def __init__(self):
|
| 189 |
+
pass
|
| 190 |
+
|
| 191 |
+
def normalize(self, normalized):
|
| 192 |
+
"""
|
| 193 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 194 |
+
|
| 195 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 196 |
+
keep track of the alignment information. If you just want to see the result
|
| 197 |
+
of the normalization on a raw string, you can use
|
| 198 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 202 |
+
The normalized string on which to apply this
|
| 203 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 204 |
+
"""
|
| 205 |
+
pass
|
| 206 |
+
|
| 207 |
+
def normalize_str(self, sequence):
|
| 208 |
+
"""
|
| 209 |
+
Normalize the given string
|
| 210 |
+
|
| 211 |
+
This method provides a way to visualize the effect of a
|
| 212 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 213 |
+
information. If you need to get/convert offsets, you can use
|
| 214 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
sequence (:obj:`str`):
|
| 218 |
+
A string to normalize
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
:obj:`str`: A string after normalization
|
| 222 |
+
"""
|
| 223 |
+
pass
|
| 224 |
+
|
| 225 |
+
class NFKC(Normalizer):
|
| 226 |
+
"""
|
| 227 |
+
NFKC Unicode Normalizer
|
| 228 |
+
"""
|
| 229 |
+
def __init__(self):
|
| 230 |
+
pass
|
| 231 |
+
|
| 232 |
+
def normalize(self, normalized):
|
| 233 |
+
"""
|
| 234 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 235 |
+
|
| 236 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 237 |
+
keep track of the alignment information. If you just want to see the result
|
| 238 |
+
of the normalization on a raw string, you can use
|
| 239 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 243 |
+
The normalized string on which to apply this
|
| 244 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 245 |
+
"""
|
| 246 |
+
pass
|
| 247 |
+
|
| 248 |
+
def normalize_str(self, sequence):
|
| 249 |
+
"""
|
| 250 |
+
Normalize the given string
|
| 251 |
+
|
| 252 |
+
This method provides a way to visualize the effect of a
|
| 253 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 254 |
+
information. If you need to get/convert offsets, you can use
|
| 255 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 256 |
+
|
| 257 |
+
Args:
|
| 258 |
+
sequence (:obj:`str`):
|
| 259 |
+
A string to normalize
|
| 260 |
+
|
| 261 |
+
Returns:
|
| 262 |
+
:obj:`str`: A string after normalization
|
| 263 |
+
"""
|
| 264 |
+
pass
|
| 265 |
+
|
| 266 |
+
class NFKD(Normalizer):
|
| 267 |
+
"""
|
| 268 |
+
NFKD Unicode Normalizer
|
| 269 |
+
"""
|
| 270 |
+
def __init__(self):
|
| 271 |
+
pass
|
| 272 |
+
|
| 273 |
+
def normalize(self, normalized):
|
| 274 |
+
"""
|
| 275 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 276 |
+
|
| 277 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 278 |
+
keep track of the alignment information. If you just want to see the result
|
| 279 |
+
of the normalization on a raw string, you can use
|
| 280 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 284 |
+
The normalized string on which to apply this
|
| 285 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 286 |
+
"""
|
| 287 |
+
pass
|
| 288 |
+
|
| 289 |
+
def normalize_str(self, sequence):
|
| 290 |
+
"""
|
| 291 |
+
Normalize the given string
|
| 292 |
+
|
| 293 |
+
This method provides a way to visualize the effect of a
|
| 294 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 295 |
+
information. If you need to get/convert offsets, you can use
|
| 296 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
sequence (:obj:`str`):
|
| 300 |
+
A string to normalize
|
| 301 |
+
|
| 302 |
+
Returns:
|
| 303 |
+
:obj:`str`: A string after normalization
|
| 304 |
+
"""
|
| 305 |
+
pass
|
| 306 |
+
|
| 307 |
+
class Nmt(Normalizer):
|
| 308 |
+
"""
|
| 309 |
+
Nmt normalizer
|
| 310 |
+
"""
|
| 311 |
+
def __init__(self):
|
| 312 |
+
pass
|
| 313 |
+
|
| 314 |
+
def normalize(self, normalized):
|
| 315 |
+
"""
|
| 316 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 317 |
+
|
| 318 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 319 |
+
keep track of the alignment information. If you just want to see the result
|
| 320 |
+
of the normalization on a raw string, you can use
|
| 321 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 322 |
+
|
| 323 |
+
Args:
|
| 324 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 325 |
+
The normalized string on which to apply this
|
| 326 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 327 |
+
"""
|
| 328 |
+
pass
|
| 329 |
+
|
| 330 |
+
def normalize_str(self, sequence):
|
| 331 |
+
"""
|
| 332 |
+
Normalize the given string
|
| 333 |
+
|
| 334 |
+
This method provides a way to visualize the effect of a
|
| 335 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 336 |
+
information. If you need to get/convert offsets, you can use
|
| 337 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 338 |
+
|
| 339 |
+
Args:
|
| 340 |
+
sequence (:obj:`str`):
|
| 341 |
+
A string to normalize
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
:obj:`str`: A string after normalization
|
| 345 |
+
"""
|
| 346 |
+
pass
|
| 347 |
+
|
| 348 |
+
class Precompiled(Normalizer):
|
| 349 |
+
"""
|
| 350 |
+
Precompiled normalizer
|
| 351 |
+
Don't use manually it is used for compatiblity for SentencePiece.
|
| 352 |
+
"""
|
| 353 |
+
def __init__(self, precompiled_charsmap):
|
| 354 |
+
pass
|
| 355 |
+
|
| 356 |
+
def normalize(self, normalized):
|
| 357 |
+
"""
|
| 358 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 359 |
+
|
| 360 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 361 |
+
keep track of the alignment information. If you just want to see the result
|
| 362 |
+
of the normalization on a raw string, you can use
|
| 363 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 364 |
+
|
| 365 |
+
Args:
|
| 366 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 367 |
+
The normalized string on which to apply this
|
| 368 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 369 |
+
"""
|
| 370 |
+
pass
|
| 371 |
+
|
| 372 |
+
def normalize_str(self, sequence):
|
| 373 |
+
"""
|
| 374 |
+
Normalize the given string
|
| 375 |
+
|
| 376 |
+
This method provides a way to visualize the effect of a
|
| 377 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 378 |
+
information. If you need to get/convert offsets, you can use
|
| 379 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 380 |
+
|
| 381 |
+
Args:
|
| 382 |
+
sequence (:obj:`str`):
|
| 383 |
+
A string to normalize
|
| 384 |
+
|
| 385 |
+
Returns:
|
| 386 |
+
:obj:`str`: A string after normalization
|
| 387 |
+
"""
|
| 388 |
+
pass
|
| 389 |
+
|
| 390 |
+
class Prepend(Normalizer):
|
| 391 |
+
"""
|
| 392 |
+
Prepend normalizer
|
| 393 |
+
"""
|
| 394 |
+
def __init__(self, prepend):
|
| 395 |
+
pass
|
| 396 |
+
|
| 397 |
+
def normalize(self, normalized):
|
| 398 |
+
"""
|
| 399 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 400 |
+
|
| 401 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 402 |
+
keep track of the alignment information. If you just want to see the result
|
| 403 |
+
of the normalization on a raw string, you can use
|
| 404 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 405 |
+
|
| 406 |
+
Args:
|
| 407 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 408 |
+
The normalized string on which to apply this
|
| 409 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 410 |
+
"""
|
| 411 |
+
pass
|
| 412 |
+
|
| 413 |
+
def normalize_str(self, sequence):
|
| 414 |
+
"""
|
| 415 |
+
Normalize the given string
|
| 416 |
+
|
| 417 |
+
This method provides a way to visualize the effect of a
|
| 418 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 419 |
+
information. If you need to get/convert offsets, you can use
|
| 420 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
sequence (:obj:`str`):
|
| 424 |
+
A string to normalize
|
| 425 |
+
|
| 426 |
+
Returns:
|
| 427 |
+
:obj:`str`: A string after normalization
|
| 428 |
+
"""
|
| 429 |
+
pass
|
| 430 |
+
|
| 431 |
+
class Replace(Normalizer):
|
| 432 |
+
"""
|
| 433 |
+
Replace normalizer
|
| 434 |
+
"""
|
| 435 |
+
def __init__(self, pattern, content):
|
| 436 |
+
pass
|
| 437 |
+
|
| 438 |
+
def normalize(self, normalized):
|
| 439 |
+
"""
|
| 440 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 441 |
+
|
| 442 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 443 |
+
keep track of the alignment information. If you just want to see the result
|
| 444 |
+
of the normalization on a raw string, you can use
|
| 445 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 446 |
+
|
| 447 |
+
Args:
|
| 448 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 449 |
+
The normalized string on which to apply this
|
| 450 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 451 |
+
"""
|
| 452 |
+
pass
|
| 453 |
+
|
| 454 |
+
def normalize_str(self, sequence):
|
| 455 |
+
"""
|
| 456 |
+
Normalize the given string
|
| 457 |
+
|
| 458 |
+
This method provides a way to visualize the effect of a
|
| 459 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 460 |
+
information. If you need to get/convert offsets, you can use
|
| 461 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 462 |
+
|
| 463 |
+
Args:
|
| 464 |
+
sequence (:obj:`str`):
|
| 465 |
+
A string to normalize
|
| 466 |
+
|
| 467 |
+
Returns:
|
| 468 |
+
:obj:`str`: A string after normalization
|
| 469 |
+
"""
|
| 470 |
+
pass
|
| 471 |
+
|
| 472 |
+
class Sequence(Normalizer):
|
| 473 |
+
"""
|
| 474 |
+
Allows concatenating multiple other Normalizer as a Sequence.
|
| 475 |
+
All the normalizers run in sequence in the given order
|
| 476 |
+
|
| 477 |
+
Args:
|
| 478 |
+
normalizers (:obj:`List[Normalizer]`):
|
| 479 |
+
A list of Normalizer to be run as a sequence
|
| 480 |
+
"""
|
| 481 |
+
def normalize(self, normalized):
|
| 482 |
+
"""
|
| 483 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 484 |
+
|
| 485 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 486 |
+
keep track of the alignment information. If you just want to see the result
|
| 487 |
+
of the normalization on a raw string, you can use
|
| 488 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 489 |
+
|
| 490 |
+
Args:
|
| 491 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 492 |
+
The normalized string on which to apply this
|
| 493 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 494 |
+
"""
|
| 495 |
+
pass
|
| 496 |
+
|
| 497 |
+
def normalize_str(self, sequence):
|
| 498 |
+
"""
|
| 499 |
+
Normalize the given string
|
| 500 |
+
|
| 501 |
+
This method provides a way to visualize the effect of a
|
| 502 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 503 |
+
information. If you need to get/convert offsets, you can use
|
| 504 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 505 |
+
|
| 506 |
+
Args:
|
| 507 |
+
sequence (:obj:`str`):
|
| 508 |
+
A string to normalize
|
| 509 |
+
|
| 510 |
+
Returns:
|
| 511 |
+
:obj:`str`: A string after normalization
|
| 512 |
+
"""
|
| 513 |
+
pass
|
| 514 |
+
|
| 515 |
+
class Strip(Normalizer):
|
| 516 |
+
"""
|
| 517 |
+
Strip normalizer
|
| 518 |
+
"""
|
| 519 |
+
def __init__(self, left=True, right=True):
|
| 520 |
+
pass
|
| 521 |
+
|
| 522 |
+
def normalize(self, normalized):
|
| 523 |
+
"""
|
| 524 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 525 |
+
|
| 526 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 527 |
+
keep track of the alignment information. If you just want to see the result
|
| 528 |
+
of the normalization on a raw string, you can use
|
| 529 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 530 |
+
|
| 531 |
+
Args:
|
| 532 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 533 |
+
The normalized string on which to apply this
|
| 534 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 535 |
+
"""
|
| 536 |
+
pass
|
| 537 |
+
|
| 538 |
+
def normalize_str(self, sequence):
|
| 539 |
+
"""
|
| 540 |
+
Normalize the given string
|
| 541 |
+
|
| 542 |
+
This method provides a way to visualize the effect of a
|
| 543 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 544 |
+
information. If you need to get/convert offsets, you can use
|
| 545 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 546 |
+
|
| 547 |
+
Args:
|
| 548 |
+
sequence (:obj:`str`):
|
| 549 |
+
A string to normalize
|
| 550 |
+
|
| 551 |
+
Returns:
|
| 552 |
+
:obj:`str`: A string after normalization
|
| 553 |
+
"""
|
| 554 |
+
pass
|
| 555 |
+
|
| 556 |
+
class StripAccents(Normalizer):
|
| 557 |
+
"""
|
| 558 |
+
StripAccents normalizer
|
| 559 |
+
"""
|
| 560 |
+
def __init__(self):
|
| 561 |
+
pass
|
| 562 |
+
|
| 563 |
+
def normalize(self, normalized):
|
| 564 |
+
"""
|
| 565 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 566 |
+
|
| 567 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 568 |
+
keep track of the alignment information. If you just want to see the result
|
| 569 |
+
of the normalization on a raw string, you can use
|
| 570 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 571 |
+
|
| 572 |
+
Args:
|
| 573 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 574 |
+
The normalized string on which to apply this
|
| 575 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 576 |
+
"""
|
| 577 |
+
pass
|
| 578 |
+
|
| 579 |
+
def normalize_str(self, sequence):
|
| 580 |
+
"""
|
| 581 |
+
Normalize the given string
|
| 582 |
+
|
| 583 |
+
This method provides a way to visualize the effect of a
|
| 584 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 585 |
+
information. If you need to get/convert offsets, you can use
|
| 586 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 587 |
+
|
| 588 |
+
Args:
|
| 589 |
+
sequence (:obj:`str`):
|
| 590 |
+
A string to normalize
|
| 591 |
+
|
| 592 |
+
Returns:
|
| 593 |
+
:obj:`str`: A string after normalization
|
| 594 |
+
"""
|
| 595 |
+
pass
|
wemm/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (786 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.pyi
ADDED
|
@@ -0,0 +1,607 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class PreTokenizer:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all pre-tokenizers
|
| 5 |
+
|
| 6 |
+
This class is not supposed to be instantiated directly. Instead, any implementation of a
|
| 7 |
+
PreTokenizer will return an instance of this class when instantiated.
|
| 8 |
+
"""
|
| 9 |
+
def pre_tokenize(self, pretok):
|
| 10 |
+
"""
|
| 11 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 12 |
+
|
| 13 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 14 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 15 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 16 |
+
the pre-tokenization of a raw string, you can use
|
| 17 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 21 |
+
The pre-tokenized string on which to apply this
|
| 22 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 23 |
+
"""
|
| 24 |
+
pass
|
| 25 |
+
|
| 26 |
+
def pre_tokenize_str(self, sequence):
|
| 27 |
+
"""
|
| 28 |
+
Pre tokenize the given string
|
| 29 |
+
|
| 30 |
+
This method provides a way to visualize the effect of a
|
| 31 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 32 |
+
alignment, nor does it provide all the capabilities of the
|
| 33 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 34 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
sequence (:obj:`str`):
|
| 38 |
+
A string to pre-tokeize
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 42 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 43 |
+
"""
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
class BertPreTokenizer(PreTokenizer):
|
| 47 |
+
"""
|
| 48 |
+
BertPreTokenizer
|
| 49 |
+
|
| 50 |
+
This pre-tokenizer splits tokens on spaces, and also on punctuation.
|
| 51 |
+
Each occurence of a punctuation character will be treated separately.
|
| 52 |
+
"""
|
| 53 |
+
def __init__(self):
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
def pre_tokenize(self, pretok):
|
| 57 |
+
"""
|
| 58 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 59 |
+
|
| 60 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 61 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 62 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 63 |
+
the pre-tokenization of a raw string, you can use
|
| 64 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 68 |
+
The pre-tokenized string on which to apply this
|
| 69 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 70 |
+
"""
|
| 71 |
+
pass
|
| 72 |
+
|
| 73 |
+
def pre_tokenize_str(self, sequence):
|
| 74 |
+
"""
|
| 75 |
+
Pre tokenize the given string
|
| 76 |
+
|
| 77 |
+
This method provides a way to visualize the effect of a
|
| 78 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 79 |
+
alignment, nor does it provide all the capabilities of the
|
| 80 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 81 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
sequence (:obj:`str`):
|
| 85 |
+
A string to pre-tokeize
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 89 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 90 |
+
"""
|
| 91 |
+
pass
|
| 92 |
+
|
| 93 |
+
class ByteLevel(PreTokenizer):
|
| 94 |
+
"""
|
| 95 |
+
ByteLevel PreTokenizer
|
| 96 |
+
|
| 97 |
+
This pre-tokenizer takes care of replacing all bytes of the given string
|
| 98 |
+
with a corresponding representation, as well as splitting into words.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 102 |
+
Whether to add a space to the first word if there isn't already one. This
|
| 103 |
+
lets us treat `hello` exactly like `say hello`.
|
| 104 |
+
use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 105 |
+
Set this to :obj:`False` to prevent this `pre_tokenizer` from using
|
| 106 |
+
the GPT2 specific regexp for spliting on whitespace.
|
| 107 |
+
"""
|
| 108 |
+
def __init__(self, add_prefix_space=True, use_regex=True):
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
@staticmethod
|
| 112 |
+
def alphabet():
|
| 113 |
+
"""
|
| 114 |
+
Returns the alphabet used by this PreTokenizer.
|
| 115 |
+
|
| 116 |
+
Since the ByteLevel works as its name suggests, at the byte level, it
|
| 117 |
+
encodes each byte value to a unique visible character. This means that there is a
|
| 118 |
+
total of 256 different characters composing this alphabet.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
:obj:`List[str]`: A list of characters that compose the alphabet
|
| 122 |
+
"""
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
def pre_tokenize(self, pretok):
|
| 126 |
+
"""
|
| 127 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 128 |
+
|
| 129 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 130 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 131 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 132 |
+
the pre-tokenization of a raw string, you can use
|
| 133 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 137 |
+
The pre-tokenized string on which to apply this
|
| 138 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 139 |
+
"""
|
| 140 |
+
pass
|
| 141 |
+
|
| 142 |
+
def pre_tokenize_str(self, sequence):
|
| 143 |
+
"""
|
| 144 |
+
Pre tokenize the given string
|
| 145 |
+
|
| 146 |
+
This method provides a way to visualize the effect of a
|
| 147 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 148 |
+
alignment, nor does it provide all the capabilities of the
|
| 149 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 150 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
sequence (:obj:`str`):
|
| 154 |
+
A string to pre-tokeize
|
| 155 |
+
|
| 156 |
+
Returns:
|
| 157 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 158 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 159 |
+
"""
|
| 160 |
+
pass
|
| 161 |
+
|
| 162 |
+
class CharDelimiterSplit(PreTokenizer):
|
| 163 |
+
"""
|
| 164 |
+
This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)`
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
delimiter: str:
|
| 168 |
+
The delimiter char that will be used to split input
|
| 169 |
+
"""
|
| 170 |
+
def pre_tokenize(self, pretok):
|
| 171 |
+
"""
|
| 172 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 173 |
+
|
| 174 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 175 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 176 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 177 |
+
the pre-tokenization of a raw string, you can use
|
| 178 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 182 |
+
The pre-tokenized string on which to apply this
|
| 183 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 184 |
+
"""
|
| 185 |
+
pass
|
| 186 |
+
|
| 187 |
+
def pre_tokenize_str(self, sequence):
|
| 188 |
+
"""
|
| 189 |
+
Pre tokenize the given string
|
| 190 |
+
|
| 191 |
+
This method provides a way to visualize the effect of a
|
| 192 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 193 |
+
alignment, nor does it provide all the capabilities of the
|
| 194 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 195 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
sequence (:obj:`str`):
|
| 199 |
+
A string to pre-tokeize
|
| 200 |
+
|
| 201 |
+
Returns:
|
| 202 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 203 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 204 |
+
"""
|
| 205 |
+
pass
|
| 206 |
+
|
| 207 |
+
class Digits(PreTokenizer):
|
| 208 |
+
"""
|
| 209 |
+
This pre-tokenizer simply splits using the digits in separate tokens
|
| 210 |
+
|
| 211 |
+
Args:
|
| 212 |
+
individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
| 213 |
+
If set to True, digits will each be separated as follows::
|
| 214 |
+
|
| 215 |
+
"Call 123 please" -> "Call ", "1", "2", "3", " please"
|
| 216 |
+
|
| 217 |
+
If set to False, digits will grouped as follows::
|
| 218 |
+
|
| 219 |
+
"Call 123 please" -> "Call ", "123", " please"
|
| 220 |
+
"""
|
| 221 |
+
def __init__(self, individual_digits=False):
|
| 222 |
+
pass
|
| 223 |
+
|
| 224 |
+
def pre_tokenize(self, pretok):
|
| 225 |
+
"""
|
| 226 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 227 |
+
|
| 228 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 229 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 230 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 231 |
+
the pre-tokenization of a raw string, you can use
|
| 232 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 236 |
+
The pre-tokenized string on which to apply this
|
| 237 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 238 |
+
"""
|
| 239 |
+
pass
|
| 240 |
+
|
| 241 |
+
def pre_tokenize_str(self, sequence):
|
| 242 |
+
"""
|
| 243 |
+
Pre tokenize the given string
|
| 244 |
+
|
| 245 |
+
This method provides a way to visualize the effect of a
|
| 246 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 247 |
+
alignment, nor does it provide all the capabilities of the
|
| 248 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 249 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
sequence (:obj:`str`):
|
| 253 |
+
A string to pre-tokeize
|
| 254 |
+
|
| 255 |
+
Returns:
|
| 256 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 257 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 258 |
+
"""
|
| 259 |
+
pass
|
| 260 |
+
|
| 261 |
+
class Metaspace(PreTokenizer):
|
| 262 |
+
"""
|
| 263 |
+
Metaspace pre-tokenizer
|
| 264 |
+
|
| 265 |
+
This pre-tokenizer replaces any whitespace by the provided replacement character.
|
| 266 |
+
It then tries to split on these spaces.
|
| 267 |
+
|
| 268 |
+
Args:
|
| 269 |
+
replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
|
| 270 |
+
The replacement character. Must be exactly one character. By default we
|
| 271 |
+
use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
|
| 272 |
+
|
| 273 |
+
prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
|
| 274 |
+
Whether to add a space to the first word if there isn't already one. This
|
| 275 |
+
lets us treat `hello` exactly like `say hello`.
|
| 276 |
+
Choices: "always", "never", "first". First means the space is only added on the first
|
| 277 |
+
token (relevant when special tokens are used or other pre_tokenizer are used).
|
| 278 |
+
|
| 279 |
+
"""
|
| 280 |
+
def __init__(self, replacement="_", prepend_scheme="always", split=True):
|
| 281 |
+
pass
|
| 282 |
+
|
| 283 |
+
def pre_tokenize(self, pretok):
|
| 284 |
+
"""
|
| 285 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 286 |
+
|
| 287 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 288 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 289 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 290 |
+
the pre-tokenization of a raw string, you can use
|
| 291 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 295 |
+
The pre-tokenized string on which to apply this
|
| 296 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 297 |
+
"""
|
| 298 |
+
pass
|
| 299 |
+
|
| 300 |
+
def pre_tokenize_str(self, sequence):
|
| 301 |
+
"""
|
| 302 |
+
Pre tokenize the given string
|
| 303 |
+
|
| 304 |
+
This method provides a way to visualize the effect of a
|
| 305 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 306 |
+
alignment, nor does it provide all the capabilities of the
|
| 307 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 308 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 309 |
+
|
| 310 |
+
Args:
|
| 311 |
+
sequence (:obj:`str`):
|
| 312 |
+
A string to pre-tokeize
|
| 313 |
+
|
| 314 |
+
Returns:
|
| 315 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 316 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 317 |
+
"""
|
| 318 |
+
pass
|
| 319 |
+
|
| 320 |
+
class Punctuation(PreTokenizer):
|
| 321 |
+
"""
|
| 322 |
+
This pre-tokenizer simply splits on punctuation as individual characters.
|
| 323 |
+
|
| 324 |
+
Args:
|
| 325 |
+
behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
|
| 326 |
+
The behavior to use when splitting.
|
| 327 |
+
Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next",
|
| 328 |
+
"contiguous"
|
| 329 |
+
"""
|
| 330 |
+
def __init__(self, behavior="isolated"):
|
| 331 |
+
pass
|
| 332 |
+
|
| 333 |
+
def pre_tokenize(self, pretok):
|
| 334 |
+
"""
|
| 335 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 336 |
+
|
| 337 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 338 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 339 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 340 |
+
the pre-tokenization of a raw string, you can use
|
| 341 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 345 |
+
The pre-tokenized string on which to apply this
|
| 346 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 347 |
+
"""
|
| 348 |
+
pass
|
| 349 |
+
|
| 350 |
+
def pre_tokenize_str(self, sequence):
|
| 351 |
+
"""
|
| 352 |
+
Pre tokenize the given string
|
| 353 |
+
|
| 354 |
+
This method provides a way to visualize the effect of a
|
| 355 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 356 |
+
alignment, nor does it provide all the capabilities of the
|
| 357 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 358 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 359 |
+
|
| 360 |
+
Args:
|
| 361 |
+
sequence (:obj:`str`):
|
| 362 |
+
A string to pre-tokeize
|
| 363 |
+
|
| 364 |
+
Returns:
|
| 365 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 366 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 367 |
+
"""
|
| 368 |
+
pass
|
| 369 |
+
|
| 370 |
+
class Sequence(PreTokenizer):
|
| 371 |
+
"""
|
| 372 |
+
This pre-tokenizer composes other pre_tokenizers and applies them in sequence
|
| 373 |
+
"""
|
| 374 |
+
def __init__(self, pretokenizers):
|
| 375 |
+
pass
|
| 376 |
+
|
| 377 |
+
def pre_tokenize(self, pretok):
|
| 378 |
+
"""
|
| 379 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 380 |
+
|
| 381 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 382 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 383 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 384 |
+
the pre-tokenization of a raw string, you can use
|
| 385 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 386 |
+
|
| 387 |
+
Args:
|
| 388 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 389 |
+
The pre-tokenized string on which to apply this
|
| 390 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 391 |
+
"""
|
| 392 |
+
pass
|
| 393 |
+
|
| 394 |
+
def pre_tokenize_str(self, sequence):
|
| 395 |
+
"""
|
| 396 |
+
Pre tokenize the given string
|
| 397 |
+
|
| 398 |
+
This method provides a way to visualize the effect of a
|
| 399 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 400 |
+
alignment, nor does it provide all the capabilities of the
|
| 401 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 402 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 403 |
+
|
| 404 |
+
Args:
|
| 405 |
+
sequence (:obj:`str`):
|
| 406 |
+
A string to pre-tokeize
|
| 407 |
+
|
| 408 |
+
Returns:
|
| 409 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 410 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 411 |
+
"""
|
| 412 |
+
pass
|
| 413 |
+
|
| 414 |
+
class Split(PreTokenizer):
|
| 415 |
+
"""
|
| 416 |
+
Split PreTokenizer
|
| 417 |
+
|
| 418 |
+
This versatile pre-tokenizer splits using the provided pattern and
|
| 419 |
+
according to the provided behavior. The pattern can be inverted by
|
| 420 |
+
making use of the invert flag.
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
pattern (:obj:`str` or :class:`~tokenizers.Regex`):
|
| 424 |
+
A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex`
|
| 425 |
+
|
| 426 |
+
behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
|
| 427 |
+
The behavior to use when splitting.
|
| 428 |
+
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
|
| 429 |
+
"contiguous"
|
| 430 |
+
|
| 431 |
+
invert (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
| 432 |
+
Whether to invert the pattern.
|
| 433 |
+
"""
|
| 434 |
+
def __init__(self, pattern, behavior, invert=False):
|
| 435 |
+
pass
|
| 436 |
+
|
| 437 |
+
def pre_tokenize(self, pretok):
|
| 438 |
+
"""
|
| 439 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 440 |
+
|
| 441 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 442 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 443 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 444 |
+
the pre-tokenization of a raw string, you can use
|
| 445 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 446 |
+
|
| 447 |
+
Args:
|
| 448 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 449 |
+
The pre-tokenized string on which to apply this
|
| 450 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 451 |
+
"""
|
| 452 |
+
pass
|
| 453 |
+
|
| 454 |
+
def pre_tokenize_str(self, sequence):
|
| 455 |
+
"""
|
| 456 |
+
Pre tokenize the given string
|
| 457 |
+
|
| 458 |
+
This method provides a way to visualize the effect of a
|
| 459 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 460 |
+
alignment, nor does it provide all the capabilities of the
|
| 461 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 462 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 463 |
+
|
| 464 |
+
Args:
|
| 465 |
+
sequence (:obj:`str`):
|
| 466 |
+
A string to pre-tokeize
|
| 467 |
+
|
| 468 |
+
Returns:
|
| 469 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 470 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 471 |
+
"""
|
| 472 |
+
pass
|
| 473 |
+
|
| 474 |
+
class UnicodeScripts(PreTokenizer):
|
| 475 |
+
"""
|
| 476 |
+
This pre-tokenizer splits on characters that belong to different language family
|
| 477 |
+
It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
|
| 478 |
+
Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
|
| 479 |
+
This mimicks SentencePiece Unigram implementation.
|
| 480 |
+
"""
|
| 481 |
+
def __init__(self):
|
| 482 |
+
pass
|
| 483 |
+
|
| 484 |
+
def pre_tokenize(self, pretok):
|
| 485 |
+
"""
|
| 486 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 487 |
+
|
| 488 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 489 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 490 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 491 |
+
the pre-tokenization of a raw string, you can use
|
| 492 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 493 |
+
|
| 494 |
+
Args:
|
| 495 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 496 |
+
The pre-tokenized string on which to apply this
|
| 497 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 498 |
+
"""
|
| 499 |
+
pass
|
| 500 |
+
|
| 501 |
+
def pre_tokenize_str(self, sequence):
|
| 502 |
+
"""
|
| 503 |
+
Pre tokenize the given string
|
| 504 |
+
|
| 505 |
+
This method provides a way to visualize the effect of a
|
| 506 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 507 |
+
alignment, nor does it provide all the capabilities of the
|
| 508 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 509 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 510 |
+
|
| 511 |
+
Args:
|
| 512 |
+
sequence (:obj:`str`):
|
| 513 |
+
A string to pre-tokeize
|
| 514 |
+
|
| 515 |
+
Returns:
|
| 516 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 517 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 518 |
+
"""
|
| 519 |
+
pass
|
| 520 |
+
|
| 521 |
+
class Whitespace(PreTokenizer):
|
| 522 |
+
"""
|
| 523 |
+
This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
|
| 524 |
+
"""
|
| 525 |
+
def __init__(self):
|
| 526 |
+
pass
|
| 527 |
+
|
| 528 |
+
def pre_tokenize(self, pretok):
|
| 529 |
+
"""
|
| 530 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 531 |
+
|
| 532 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 533 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 534 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 535 |
+
the pre-tokenization of a raw string, you can use
|
| 536 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 537 |
+
|
| 538 |
+
Args:
|
| 539 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 540 |
+
The pre-tokenized string on which to apply this
|
| 541 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 542 |
+
"""
|
| 543 |
+
pass
|
| 544 |
+
|
| 545 |
+
def pre_tokenize_str(self, sequence):
|
| 546 |
+
"""
|
| 547 |
+
Pre tokenize the given string
|
| 548 |
+
|
| 549 |
+
This method provides a way to visualize the effect of a
|
| 550 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 551 |
+
alignment, nor does it provide all the capabilities of the
|
| 552 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 553 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 554 |
+
|
| 555 |
+
Args:
|
| 556 |
+
sequence (:obj:`str`):
|
| 557 |
+
A string to pre-tokeize
|
| 558 |
+
|
| 559 |
+
Returns:
|
| 560 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 561 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 562 |
+
"""
|
| 563 |
+
pass
|
| 564 |
+
|
| 565 |
+
class WhitespaceSplit(PreTokenizer):
|
| 566 |
+
"""
|
| 567 |
+
This pre-tokenizer simply splits on the whitespace. Works like `.split()`
|
| 568 |
+
"""
|
| 569 |
+
def __init__(self):
|
| 570 |
+
pass
|
| 571 |
+
|
| 572 |
+
def pre_tokenize(self, pretok):
|
| 573 |
+
"""
|
| 574 |
+
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
|
| 575 |
+
|
| 576 |
+
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
|
| 577 |
+
keep track of the pre-tokenization, and leverage the capabilities of the
|
| 578 |
+
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
|
| 579 |
+
the pre-tokenization of a raw string, you can use
|
| 580 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
|
| 581 |
+
|
| 582 |
+
Args:
|
| 583 |
+
pretok (:class:`~tokenizers.PreTokenizedString):
|
| 584 |
+
The pre-tokenized string on which to apply this
|
| 585 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
|
| 586 |
+
"""
|
| 587 |
+
pass
|
| 588 |
+
|
| 589 |
+
def pre_tokenize_str(self, sequence):
|
| 590 |
+
"""
|
| 591 |
+
Pre tokenize the given string
|
| 592 |
+
|
| 593 |
+
This method provides a way to visualize the effect of a
|
| 594 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
|
| 595 |
+
alignment, nor does it provide all the capabilities of the
|
| 596 |
+
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
|
| 597 |
+
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
|
| 598 |
+
|
| 599 |
+
Args:
|
| 600 |
+
sequence (:obj:`str`):
|
| 601 |
+
A string to pre-tokeize
|
| 602 |
+
|
| 603 |
+
Returns:
|
| 604 |
+
:obj:`List[Tuple[str, Offsets]]`:
|
| 605 |
+
A list of tuple with the pre-tokenized parts and their offsets
|
| 606 |
+
"""
|
| 607 |
+
pass
|
wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (467 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
from .. import processors
|
| 3 |
+
|
| 4 |
+
PostProcessor = processors.PostProcessor
|
| 5 |
+
BertProcessing = processors.BertProcessing
|
| 6 |
+
ByteLevel = processors.ByteLevel
|
| 7 |
+
RobertaProcessing = processors.RobertaProcessing
|
| 8 |
+
Sequence = processors.Sequence
|
| 9 |
+
TemplateProcessing = processors.TemplateProcessing
|
wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class PostProcessor:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all post-processors
|
| 5 |
+
|
| 6 |
+
This class is not supposed to be instantiated directly. Instead, any implementation of
|
| 7 |
+
a PostProcessor will return an instance of this class when instantiated.
|
| 8 |
+
"""
|
| 9 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 10 |
+
"""
|
| 11 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
is_pair (:obj:`bool`):
|
| 15 |
+
Whether the input would be a pair of sequences
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
:obj:`int`: The number of tokens to add
|
| 19 |
+
"""
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 23 |
+
"""
|
| 24 |
+
Post-process the given encodings, generating the final one
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 28 |
+
The encoding for the first sequence
|
| 29 |
+
|
| 30 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 31 |
+
The encoding for the pair sequence
|
| 32 |
+
|
| 33 |
+
add_special_tokens (:obj:`bool`):
|
| 34 |
+
Whether to add the special tokens
|
| 35 |
+
|
| 36 |
+
Return:
|
| 37 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 38 |
+
"""
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
class BertProcessing(PostProcessor):
|
| 42 |
+
"""
|
| 43 |
+
This post-processor takes care of adding the special tokens needed by
|
| 44 |
+
a Bert model:
|
| 45 |
+
|
| 46 |
+
- a SEP token
|
| 47 |
+
- a CLS token
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
sep (:obj:`Tuple[str, int]`):
|
| 51 |
+
A tuple with the string representation of the SEP token, and its id
|
| 52 |
+
|
| 53 |
+
cls (:obj:`Tuple[str, int]`):
|
| 54 |
+
A tuple with the string representation of the CLS token, and its id
|
| 55 |
+
"""
|
| 56 |
+
def __init__(self, sep, cls):
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 60 |
+
"""
|
| 61 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
is_pair (:obj:`bool`):
|
| 65 |
+
Whether the input would be a pair of sequences
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
:obj:`int`: The number of tokens to add
|
| 69 |
+
"""
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 73 |
+
"""
|
| 74 |
+
Post-process the given encodings, generating the final one
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 78 |
+
The encoding for the first sequence
|
| 79 |
+
|
| 80 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 81 |
+
The encoding for the pair sequence
|
| 82 |
+
|
| 83 |
+
add_special_tokens (:obj:`bool`):
|
| 84 |
+
Whether to add the special tokens
|
| 85 |
+
|
| 86 |
+
Return:
|
| 87 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 88 |
+
"""
|
| 89 |
+
pass
|
| 90 |
+
|
| 91 |
+
class ByteLevel(PostProcessor):
|
| 92 |
+
"""
|
| 93 |
+
This post-processor takes care of trimming the offsets.
|
| 94 |
+
|
| 95 |
+
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
|
| 96 |
+
want the offsets to include these whitespaces, then this PostProcessor must be used.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
trim_offsets (:obj:`bool`):
|
| 100 |
+
Whether to trim the whitespaces from the produced offsets.
|
| 101 |
+
"""
|
| 102 |
+
def __init__(self, trim_offsets=True):
|
| 103 |
+
pass
|
| 104 |
+
|
| 105 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 106 |
+
"""
|
| 107 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
is_pair (:obj:`bool`):
|
| 111 |
+
Whether the input would be a pair of sequences
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
:obj:`int`: The number of tokens to add
|
| 115 |
+
"""
|
| 116 |
+
pass
|
| 117 |
+
|
| 118 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 119 |
+
"""
|
| 120 |
+
Post-process the given encodings, generating the final one
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 124 |
+
The encoding for the first sequence
|
| 125 |
+
|
| 126 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 127 |
+
The encoding for the pair sequence
|
| 128 |
+
|
| 129 |
+
add_special_tokens (:obj:`bool`):
|
| 130 |
+
Whether to add the special tokens
|
| 131 |
+
|
| 132 |
+
Return:
|
| 133 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 134 |
+
"""
|
| 135 |
+
pass
|
| 136 |
+
|
| 137 |
+
class RobertaProcessing(PostProcessor):
|
| 138 |
+
"""
|
| 139 |
+
This post-processor takes care of adding the special tokens needed by
|
| 140 |
+
a Roberta model:
|
| 141 |
+
|
| 142 |
+
- a SEP token
|
| 143 |
+
- a CLS token
|
| 144 |
+
|
| 145 |
+
It also takes care of trimming the offsets.
|
| 146 |
+
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
|
| 147 |
+
want the offsets to include these whitespaces, then this PostProcessor should be initialized
|
| 148 |
+
with :obj:`trim_offsets=True`
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
sep (:obj:`Tuple[str, int]`):
|
| 152 |
+
A tuple with the string representation of the SEP token, and its id
|
| 153 |
+
|
| 154 |
+
cls (:obj:`Tuple[str, int]`):
|
| 155 |
+
A tuple with the string representation of the CLS token, and its id
|
| 156 |
+
|
| 157 |
+
trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 158 |
+
Whether to trim the whitespaces from the produced offsets.
|
| 159 |
+
|
| 160 |
+
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 161 |
+
Whether the add_prefix_space option was enabled during pre-tokenization. This
|
| 162 |
+
is relevant because it defines the way the offsets are trimmed out.
|
| 163 |
+
"""
|
| 164 |
+
def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True):
|
| 165 |
+
pass
|
| 166 |
+
|
| 167 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 168 |
+
"""
|
| 169 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
is_pair (:obj:`bool`):
|
| 173 |
+
Whether the input would be a pair of sequences
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
:obj:`int`: The number of tokens to add
|
| 177 |
+
"""
|
| 178 |
+
pass
|
| 179 |
+
|
| 180 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 181 |
+
"""
|
| 182 |
+
Post-process the given encodings, generating the final one
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 186 |
+
The encoding for the first sequence
|
| 187 |
+
|
| 188 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 189 |
+
The encoding for the pair sequence
|
| 190 |
+
|
| 191 |
+
add_special_tokens (:obj:`bool`):
|
| 192 |
+
Whether to add the special tokens
|
| 193 |
+
|
| 194 |
+
Return:
|
| 195 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 196 |
+
"""
|
| 197 |
+
pass
|
| 198 |
+
|
| 199 |
+
class Sequence(PostProcessor):
|
| 200 |
+
"""
|
| 201 |
+
Sequence Processor
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
processors (:obj:`List[PostProcessor]`)
|
| 205 |
+
The processors that need to be chained
|
| 206 |
+
"""
|
| 207 |
+
def __init__(self, processors):
|
| 208 |
+
pass
|
| 209 |
+
|
| 210 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 211 |
+
"""
|
| 212 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
is_pair (:obj:`bool`):
|
| 216 |
+
Whether the input would be a pair of sequences
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
:obj:`int`: The number of tokens to add
|
| 220 |
+
"""
|
| 221 |
+
pass
|
| 222 |
+
|
| 223 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 224 |
+
"""
|
| 225 |
+
Post-process the given encodings, generating the final one
|
| 226 |
+
|
| 227 |
+
Args:
|
| 228 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 229 |
+
The encoding for the first sequence
|
| 230 |
+
|
| 231 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 232 |
+
The encoding for the pair sequence
|
| 233 |
+
|
| 234 |
+
add_special_tokens (:obj:`bool`):
|
| 235 |
+
Whether to add the special tokens
|
| 236 |
+
|
| 237 |
+
Return:
|
| 238 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 239 |
+
"""
|
| 240 |
+
pass
|
| 241 |
+
|
| 242 |
+
class TemplateProcessing(PostProcessor):
|
| 243 |
+
"""
|
| 244 |
+
Provides a way to specify templates in order to add the special tokens to each
|
| 245 |
+
input sequence as relevant.
|
| 246 |
+
|
| 247 |
+
Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to
|
| 248 |
+
delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first
|
| 249 |
+
sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair
|
| 250 |
+
sequences. The final result looks like this:
|
| 251 |
+
|
| 252 |
+
- Single sequence: :obj:`[CLS] Hello there [SEP]`
|
| 253 |
+
- Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]`
|
| 254 |
+
|
| 255 |
+
With the type ids as following::
|
| 256 |
+
|
| 257 |
+
[CLS] ... [SEP] ... [SEP]
|
| 258 |
+
0 0 0 1 1
|
| 259 |
+
|
| 260 |
+
You can achieve such behavior using a TemplateProcessing::
|
| 261 |
+
|
| 262 |
+
TemplateProcessing(
|
| 263 |
+
single="[CLS] $0 [SEP]",
|
| 264 |
+
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
|
| 265 |
+
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
In this example, each input sequence is identified using a ``$`` construct. This identifier
|
| 269 |
+
lets us specify each input sequence, and the type_id to use. When nothing is specified,
|
| 270 |
+
it uses the default values. Here are the different ways to specify it:
|
| 271 |
+
|
| 272 |
+
- Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B``
|
| 273 |
+
- Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ...
|
| 274 |
+
- Specifying both: ``$A:0``, ``$B:1``, ...
|
| 275 |
+
|
| 276 |
+
The same construct is used for special tokens: ``<identifier>(:<type_id>)?``.
|
| 277 |
+
|
| 278 |
+
**Warning**: You must ensure that you are giving the correct tokens/ids as these
|
| 279 |
+
will be added to the Encoding without any further check. If the given ids correspond
|
| 280 |
+
to something totally different in a `Tokenizer` using this `PostProcessor`, it
|
| 281 |
+
might lead to unexpected results.
|
| 282 |
+
|
| 283 |
+
Args:
|
| 284 |
+
single (:obj:`Template`):
|
| 285 |
+
The template used for single sequences
|
| 286 |
+
|
| 287 |
+
pair (:obj:`Template`):
|
| 288 |
+
The template used when both sequences are specified
|
| 289 |
+
|
| 290 |
+
special_tokens (:obj:`Tokens`):
|
| 291 |
+
The list of special tokens used in each sequences
|
| 292 |
+
|
| 293 |
+
Types:
|
| 294 |
+
|
| 295 |
+
Template (:obj:`str` or :obj:`List`):
|
| 296 |
+
- If a :obj:`str` is provided, the whitespace is used as delimiter between tokens
|
| 297 |
+
- If a :obj:`List[str]` is provided, a list of tokens
|
| 298 |
+
|
| 299 |
+
Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`):
|
| 300 |
+
- A :obj:`Tuple` with both a token and its associated ID, in any order
|
| 301 |
+
- A :obj:`dict` with the following keys:
|
| 302 |
+
- "id": :obj:`str` => The special token id, as specified in the Template
|
| 303 |
+
- "ids": :obj:`List[int]` => The associated IDs
|
| 304 |
+
- "tokens": :obj:`List[str]` => The associated tokens
|
| 305 |
+
|
| 306 |
+
The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have
|
| 307 |
+
the same length.
|
| 308 |
+
"""
|
| 309 |
+
def __init__(self, single, pair, special_tokens):
|
| 310 |
+
pass
|
| 311 |
+
|
| 312 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 313 |
+
"""
|
| 314 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 315 |
+
|
| 316 |
+
Args:
|
| 317 |
+
is_pair (:obj:`bool`):
|
| 318 |
+
Whether the input would be a pair of sequences
|
| 319 |
+
|
| 320 |
+
Returns:
|
| 321 |
+
:obj:`int`: The number of tokens to add
|
| 322 |
+
"""
|
| 323 |
+
pass
|
| 324 |
+
|
| 325 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 326 |
+
"""
|
| 327 |
+
Post-process the given encodings, generating the final one
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 331 |
+
The encoding for the first sequence
|
| 332 |
+
|
| 333 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 334 |
+
The encoding for the pair sequence
|
| 335 |
+
|
| 336 |
+
add_special_tokens (:obj:`bool`):
|
| 337 |
+
Whether to add the special tokens
|
| 338 |
+
|
| 339 |
+
Return:
|
| 340 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 341 |
+
"""
|
| 342 |
+
pass
|
wemm/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (345 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e4732f10ce97c9f5e903774dcb953c0f1518f40b27e287992075311835cefce
|
| 3 |
+
size 11815960
|
wemm/lib/python3.10/site-packages/tokenizers/tools/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .visualizer import Annotation, EncodingVisualizer
|
wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (240 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.tokenized-text {
|
| 2 |
+
width:100%;
|
| 3 |
+
padding:2rem;
|
| 4 |
+
max-height: 400px;
|
| 5 |
+
overflow-y: auto;
|
| 6 |
+
box-sizing:border-box;
|
| 7 |
+
line-height:4rem; /* Lots of space between lines */
|
| 8 |
+
font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace;
|
| 9 |
+
box-shadow: 2px 2px 2px rgba(0,0,0,0.2);
|
| 10 |
+
background-color: rgba(0,0,0,0.01);
|
| 11 |
+
letter-spacing:2px; /* Give some extra separation between chars */
|
| 12 |
+
}
|
| 13 |
+
.non-token{
|
| 14 |
+
/* White space and other things the tokenizer ignores*/
|
| 15 |
+
white-space: pre;
|
| 16 |
+
letter-spacing:4px;
|
| 17 |
+
border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/
|
| 18 |
+
border-bottom:1px solid #A0A0A0;
|
| 19 |
+
line-height: 1rem;
|
| 20 |
+
height: calc(100% - 2px);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
.token {
|
| 24 |
+
white-space: pre;
|
| 25 |
+
position:relative;
|
| 26 |
+
color:black;
|
| 27 |
+
letter-spacing:2px;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
.annotation{
|
| 31 |
+
white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */
|
| 32 |
+
border-radius:4px;
|
| 33 |
+
position:relative;
|
| 34 |
+
width:fit-content;
|
| 35 |
+
}
|
| 36 |
+
.annotation:before {
|
| 37 |
+
/*The before holds the text and the after holds the background*/
|
| 38 |
+
z-index:1000; /* Make sure this is above the background */
|
| 39 |
+
content:attr(data-label); /* The annotations label is on a data attribute */
|
| 40 |
+
color:white;
|
| 41 |
+
position:absolute;
|
| 42 |
+
font-size:1rem;
|
| 43 |
+
text-align:center;
|
| 44 |
+
font-weight:bold;
|
| 45 |
+
|
| 46 |
+
top:1.75rem;
|
| 47 |
+
line-height:0;
|
| 48 |
+
left:0;
|
| 49 |
+
width:100%;
|
| 50 |
+
padding:0.5rem 0;
|
| 51 |
+
/* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/
|
| 52 |
+
overflow: hidden;
|
| 53 |
+
white-space: nowrap;
|
| 54 |
+
text-overflow:ellipsis;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
.annotation:after {
|
| 58 |
+
content:attr(data-label); /* The content defines the width of the annotation*/
|
| 59 |
+
position:absolute;
|
| 60 |
+
font-size:0.75rem;
|
| 61 |
+
text-align:center;
|
| 62 |
+
font-weight:bold;
|
| 63 |
+
text-overflow:ellipsis;
|
| 64 |
+
top:1.75rem;
|
| 65 |
+
line-height:0;
|
| 66 |
+
overflow: hidden;
|
| 67 |
+
white-space: nowrap;
|
| 68 |
+
|
| 69 |
+
left:0;
|
| 70 |
+
width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
|
| 71 |
+
|
| 72 |
+
padding:0.5rem 0;
|
| 73 |
+
/* Nast hack below:
|
| 74 |
+
We set the annotations color in code because we don't know the colors at css time.
|
| 75 |
+
But you can't pass a color as a data attribute to get it into the pseudo element (this thing)
|
| 76 |
+
So to get around that, annotations have the color set on them with a style attribute and then we
|
| 77 |
+
can get the color with currentColor.
|
| 78 |
+
Annotations wrap tokens and tokens set the color back to black
|
| 79 |
+
*/
|
| 80 |
+
background-color: currentColor;
|
| 81 |
+
}
|
| 82 |
+
.annotation:hover::after, .annotation:hover::before{
|
| 83 |
+
/* When the user hovers over an annotation expand the label to display in full
|
| 84 |
+
*/
|
| 85 |
+
min-width: fit-content;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
.annotation:hover{
|
| 89 |
+
/* Emphasize the annotation start end with a border on hover*/
|
| 90 |
+
border-color: currentColor;
|
| 91 |
+
border: 2px solid;
|
| 92 |
+
}
|
| 93 |
+
.special-token:not(:empty){
|
| 94 |
+
/*
|
| 95 |
+
A none empty special token is like UNK (as opposed to CLS which has no representation in the text )
|
| 96 |
+
*/
|
| 97 |
+
position:relative;
|
| 98 |
+
}
|
| 99 |
+
.special-token:empty::before{
|
| 100 |
+
/* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/
|
| 101 |
+
content:attr(data-stok);
|
| 102 |
+
background:#202020;
|
| 103 |
+
font-size:0.75rem;
|
| 104 |
+
color:white;
|
| 105 |
+
margin: 0 0.25rem;
|
| 106 |
+
padding: 0.25rem;
|
| 107 |
+
border-radius:4px
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
.special-token:not(:empty):before {
|
| 111 |
+
/* Special tokens that have text (UNK) are displayed above the actual text*/
|
| 112 |
+
content:attr(data-stok);
|
| 113 |
+
position:absolute;
|
| 114 |
+
bottom:1.75rem;
|
| 115 |
+
min-width:100%;
|
| 116 |
+
width:100%;
|
| 117 |
+
height:1rem;
|
| 118 |
+
line-height:1rem;
|
| 119 |
+
font-size:1rem;
|
| 120 |
+
text-align:center;
|
| 121 |
+
color:white;
|
| 122 |
+
font-weight:bold;
|
| 123 |
+
background:#202020;
|
| 124 |
+
border-radius:10%;
|
| 125 |
+
}
|
| 126 |
+
/*
|
| 127 |
+
We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations
|
| 128 |
+
instead we apply even and odd class at generation time and color them that way
|
| 129 |
+
*/
|
| 130 |
+
.even-token{
|
| 131 |
+
background:#DCDCDC ;
|
| 132 |
+
border: 1px solid #DCDCDC;
|
| 133 |
+
}
|
| 134 |
+
.odd-token{
|
| 135 |
+
background:#A0A0A0;
|
| 136 |
+
border: 1px solid #A0A0A0;
|
| 137 |
+
}
|
| 138 |
+
.even-token.multi-token,.odd-token.multi-token{
|
| 139 |
+
background: repeating-linear-gradient(
|
| 140 |
+
45deg,
|
| 141 |
+
transparent,
|
| 142 |
+
transparent 1px,
|
| 143 |
+
#ccc 1px,
|
| 144 |
+
#ccc 1px
|
| 145 |
+
),
|
| 146 |
+
/* on "bottom" */
|
| 147 |
+
linear-gradient(
|
| 148 |
+
to bottom,
|
| 149 |
+
#FFB6C1,
|
| 150 |
+
#999
|
| 151 |
+
);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
.multi-token:hover::after {
|
| 155 |
+
content:"This char has more than 1 token"; /* The content defines the width of the annotation*/
|
| 156 |
+
color:white;
|
| 157 |
+
background-color: black;
|
| 158 |
+
position:absolute;
|
| 159 |
+
font-size:0.75rem;
|
| 160 |
+
text-align:center;
|
| 161 |
+
font-weight:bold;
|
| 162 |
+
text-overflow:ellipsis;
|
| 163 |
+
top:1.75rem;
|
| 164 |
+
line-height:0;
|
| 165 |
+
overflow: hidden;
|
| 166 |
+
white-space: nowrap;
|
| 167 |
+
left:0;
|
| 168 |
+
width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
|
| 169 |
+
padding:0.5rem 0;
|
| 170 |
+
}
|
wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
from string import Template
|
| 5 |
+
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
from tokenizers import Encoding, Tokenizer
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
dirname = os.path.dirname(__file__)
|
| 11 |
+
css_filename = os.path.join(dirname, "visualizer-styles.css")
|
| 12 |
+
with open(css_filename) as f:
|
| 13 |
+
css = f.read()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Annotation:
|
| 17 |
+
start: int
|
| 18 |
+
end: int
|
| 19 |
+
label: int
|
| 20 |
+
|
| 21 |
+
def __init__(self, start: int, end: int, label: str):
|
| 22 |
+
self.start = start
|
| 23 |
+
self.end = end
|
| 24 |
+
self.label = label
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
AnnotationList = List[Annotation]
|
| 28 |
+
PartialIntList = List[Optional[int]]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class CharStateKey(NamedTuple):
|
| 32 |
+
token_ix: Optional[int]
|
| 33 |
+
anno_ix: Optional[int]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class CharState:
|
| 37 |
+
char_ix: Optional[int]
|
| 38 |
+
|
| 39 |
+
def __init__(self, char_ix):
|
| 40 |
+
self.char_ix = char_ix
|
| 41 |
+
|
| 42 |
+
self.anno_ix: Optional[int] = None
|
| 43 |
+
self.tokens: List[int] = []
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def token_ix(self):
|
| 47 |
+
return self.tokens[0] if len(self.tokens) > 0 else None
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def is_multitoken(self):
|
| 51 |
+
"""
|
| 52 |
+
BPE tokenizers can output more than one token for a char
|
| 53 |
+
"""
|
| 54 |
+
return len(self.tokens) > 1
|
| 55 |
+
|
| 56 |
+
def partition_key(self) -> CharStateKey:
|
| 57 |
+
return CharStateKey(
|
| 58 |
+
token_ix=self.token_ix,
|
| 59 |
+
anno_ix=self.anno_ix,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class Aligned:
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class EncodingVisualizer:
|
| 68 |
+
"""
|
| 69 |
+
Build an EncodingVisualizer
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
|
| 73 |
+
tokenizer (:class:`~tokenizers.Tokenizer`):
|
| 74 |
+
A tokenizer instance
|
| 75 |
+
|
| 76 |
+
default_to_notebook (:obj:`bool`):
|
| 77 |
+
Whether to render html output in a notebook by default
|
| 78 |
+
|
| 79 |
+
annotation_converter (:obj:`Callable`, `optional`):
|
| 80 |
+
An optional (lambda) function that takes an annotation in any format and returns
|
| 81 |
+
an Annotation object
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
tokenizer: Tokenizer,
|
| 89 |
+
default_to_notebook: bool = True,
|
| 90 |
+
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
|
| 91 |
+
):
|
| 92 |
+
if default_to_notebook:
|
| 93 |
+
try:
|
| 94 |
+
from IPython.core.display import HTML, display
|
| 95 |
+
except ImportError:
|
| 96 |
+
raise Exception(
|
| 97 |
+
"""We couldn't import IPython utils for html display.
|
| 98 |
+
Are you running in a notebook?
|
| 99 |
+
You can also pass `default_to_notebook=False` to get back raw HTML
|
| 100 |
+
"""
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
self.tokenizer = tokenizer
|
| 104 |
+
self.default_to_notebook = default_to_notebook
|
| 105 |
+
self.annotation_coverter = annotation_converter
|
| 106 |
+
pass
|
| 107 |
+
|
| 108 |
+
def __call__(
|
| 109 |
+
self,
|
| 110 |
+
text: str,
|
| 111 |
+
annotations: AnnotationList = [],
|
| 112 |
+
default_to_notebook: Optional[bool] = None,
|
| 113 |
+
) -> Optional[str]:
|
| 114 |
+
"""
|
| 115 |
+
Build a visualization of the given text
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
text (:obj:`str`):
|
| 119 |
+
The text to tokenize
|
| 120 |
+
|
| 121 |
+
annotations (:obj:`List[Annotation]`, `optional`):
|
| 122 |
+
An optional list of annotations of the text. The can either be an annotation class
|
| 123 |
+
or anything else if you instantiated the visualizer with a converter function
|
| 124 |
+
|
| 125 |
+
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
|
| 126 |
+
If True, will render the html in a notebook. Otherwise returns an html string.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
The HTML string if default_to_notebook is False, otherwise (default) returns None and
|
| 130 |
+
renders the HTML in the notebook
|
| 131 |
+
|
| 132 |
+
"""
|
| 133 |
+
final_default_to_notebook = self.default_to_notebook
|
| 134 |
+
if default_to_notebook is not None:
|
| 135 |
+
final_default_to_notebook = default_to_notebook
|
| 136 |
+
if final_default_to_notebook:
|
| 137 |
+
try:
|
| 138 |
+
from IPython.core.display import HTML, display
|
| 139 |
+
except ImportError:
|
| 140 |
+
raise Exception(
|
| 141 |
+
"""We couldn't import IPython utils for html display.
|
| 142 |
+
Are you running in a notebook?"""
|
| 143 |
+
)
|
| 144 |
+
if self.annotation_coverter is not None:
|
| 145 |
+
annotations = list(map(self.annotation_coverter, annotations))
|
| 146 |
+
encoding = self.tokenizer.encode(text)
|
| 147 |
+
html = EncodingVisualizer.__make_html(text, encoding, annotations)
|
| 148 |
+
if final_default_to_notebook:
|
| 149 |
+
display(HTML(html))
|
| 150 |
+
else:
|
| 151 |
+
return html
|
| 152 |
+
|
| 153 |
+
@staticmethod
|
| 154 |
+
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
|
| 155 |
+
"""
|
| 156 |
+
Generates a color palette for all the labels in a given set of annotations
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
annotations (:obj:`Annotation`):
|
| 160 |
+
A list of annotations
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
:obj:`dict`: A dictionary mapping labels to colors in HSL format
|
| 164 |
+
"""
|
| 165 |
+
if len(annotations) == 0:
|
| 166 |
+
return {}
|
| 167 |
+
labels = set(map(lambda x: x.label, annotations))
|
| 168 |
+
num_labels = len(labels)
|
| 169 |
+
h_step = int(255 / num_labels)
|
| 170 |
+
if h_step < 20:
|
| 171 |
+
h_step = 20
|
| 172 |
+
s = 32
|
| 173 |
+
l = 64 # noqa: E741
|
| 174 |
+
h = 10
|
| 175 |
+
colors = {}
|
| 176 |
+
|
| 177 |
+
for label in sorted(labels): # sort so we always get the same colors for a given set of labels
|
| 178 |
+
colors[label] = f"hsl({h},{s}%,{l}%"
|
| 179 |
+
h += h_step
|
| 180 |
+
return colors
|
| 181 |
+
|
| 182 |
+
@staticmethod
|
| 183 |
+
def consecutive_chars_to_html(
|
| 184 |
+
consecutive_chars_list: List[CharState],
|
| 185 |
+
text: str,
|
| 186 |
+
encoding: Encoding,
|
| 187 |
+
):
|
| 188 |
+
"""
|
| 189 |
+
Converts a list of "consecutive chars" into a single HTML element.
|
| 190 |
+
Chars are consecutive if they fall under the same word, token and annotation.
|
| 191 |
+
The CharState class is a named tuple with a "partition_key" method that makes it easy to
|
| 192 |
+
compare if two chars are consecutive.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
consecutive_chars_list (:obj:`List[CharState]`):
|
| 196 |
+
A list of CharStates that have been grouped together
|
| 197 |
+
|
| 198 |
+
text (:obj:`str`):
|
| 199 |
+
The original text being processed
|
| 200 |
+
|
| 201 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 202 |
+
The encoding returned from the tokenizer
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
:obj:`str`: The HTML span for a set of consecutive chars
|
| 206 |
+
"""
|
| 207 |
+
first = consecutive_chars_list[0]
|
| 208 |
+
if first.char_ix is None:
|
| 209 |
+
# its a special token
|
| 210 |
+
stoken = encoding.tokens[first.token_ix]
|
| 211 |
+
# special tokens are represented as empty spans. We use the data attribute and css
|
| 212 |
+
# magic to display it
|
| 213 |
+
return f'<span class="special-token" data-stoken={stoken}></span>'
|
| 214 |
+
# We're not in a special token so this group has a start and end.
|
| 215 |
+
last = consecutive_chars_list[-1]
|
| 216 |
+
start = first.char_ix
|
| 217 |
+
end = last.char_ix + 1
|
| 218 |
+
span_text = text[start:end]
|
| 219 |
+
css_classes = [] # What css classes will we apply on the resulting span
|
| 220 |
+
data_items = {} # What data attributes will we apply on the result span
|
| 221 |
+
if first.token_ix is not None:
|
| 222 |
+
# We can either be in a token or not (e.g. in white space)
|
| 223 |
+
css_classes.append("token")
|
| 224 |
+
if first.is_multitoken:
|
| 225 |
+
css_classes.append("multi-token")
|
| 226 |
+
if first.token_ix % 2:
|
| 227 |
+
# We use this to color alternating tokens.
|
| 228 |
+
# A token might be split by an annotation that ends in the middle of it, so this
|
| 229 |
+
# lets us visually indicate a consecutive token despite its possible splitting in
|
| 230 |
+
# the html markup
|
| 231 |
+
css_classes.append("odd-token")
|
| 232 |
+
else:
|
| 233 |
+
# Like above, but a different color so we can see the tokens alternate
|
| 234 |
+
css_classes.append("even-token")
|
| 235 |
+
if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None:
|
| 236 |
+
# This is a special token that is in the text. probably UNK
|
| 237 |
+
css_classes.append("special-token")
|
| 238 |
+
# TODO is this the right name for the data attribute ?
|
| 239 |
+
data_items["stok"] = encoding.tokens[first.token_ix]
|
| 240 |
+
else:
|
| 241 |
+
# In this case we are looking at a group/single char that is not tokenized.
|
| 242 |
+
# e.g. white space
|
| 243 |
+
css_classes.append("non-token")
|
| 244 |
+
css = f'''class="{' '.join(css_classes)}"'''
|
| 245 |
+
data = ""
|
| 246 |
+
for key, val in data_items.items():
|
| 247 |
+
data += f' data-{key}="{val}"'
|
| 248 |
+
return f"<span {css} {data} >{span_text}</span>"
|
| 249 |
+
|
| 250 |
+
@staticmethod
|
| 251 |
+
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
|
| 252 |
+
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
|
| 253 |
+
current_consecutive_chars = [char_states[0]]
|
| 254 |
+
prev_anno_ix = char_states[0].anno_ix
|
| 255 |
+
spans = []
|
| 256 |
+
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
|
| 257 |
+
cur_anno_ix = char_states[0].anno_ix
|
| 258 |
+
if cur_anno_ix is not None:
|
| 259 |
+
# If we started in an annotation make a span for it
|
| 260 |
+
anno = annotations[cur_anno_ix]
|
| 261 |
+
label = anno.label
|
| 262 |
+
color = label_colors_dict[label]
|
| 263 |
+
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
|
| 264 |
+
|
| 265 |
+
for cs in char_states[1:]:
|
| 266 |
+
cur_anno_ix = cs.anno_ix
|
| 267 |
+
if cur_anno_ix != prev_anno_ix:
|
| 268 |
+
# If we've transitioned in or out of an annotation
|
| 269 |
+
spans.append(
|
| 270 |
+
# Create a span from the current consecutive characters
|
| 271 |
+
EncodingVisualizer.consecutive_chars_to_html(
|
| 272 |
+
current_consecutive_chars,
|
| 273 |
+
text=text,
|
| 274 |
+
encoding=encoding,
|
| 275 |
+
)
|
| 276 |
+
)
|
| 277 |
+
current_consecutive_chars = [cs]
|
| 278 |
+
|
| 279 |
+
if prev_anno_ix is not None:
|
| 280 |
+
# if we transitioned out of an annotation close it's span
|
| 281 |
+
spans.append("</span>")
|
| 282 |
+
if cur_anno_ix is not None:
|
| 283 |
+
# If we entered a new annotation make a span for it
|
| 284 |
+
anno = annotations[cur_anno_ix]
|
| 285 |
+
label = anno.label
|
| 286 |
+
color = label_colors_dict[label]
|
| 287 |
+
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
|
| 288 |
+
prev_anno_ix = cur_anno_ix
|
| 289 |
+
|
| 290 |
+
if cs.partition_key() == current_consecutive_chars[0].partition_key():
|
| 291 |
+
# If the current charchter is in the same "group" as the previous one
|
| 292 |
+
current_consecutive_chars.append(cs)
|
| 293 |
+
else:
|
| 294 |
+
# Otherwise we make a span for the previous group
|
| 295 |
+
spans.append(
|
| 296 |
+
EncodingVisualizer.consecutive_chars_to_html(
|
| 297 |
+
current_consecutive_chars,
|
| 298 |
+
text=text,
|
| 299 |
+
encoding=encoding,
|
| 300 |
+
)
|
| 301 |
+
)
|
| 302 |
+
# An reset the consecutive_char_list to form a new group
|
| 303 |
+
current_consecutive_chars = [cs]
|
| 304 |
+
# All that's left is to fill out the final span
|
| 305 |
+
# TODO I think there is an edge case here where an annotation's span might not close
|
| 306 |
+
spans.append(
|
| 307 |
+
EncodingVisualizer.consecutive_chars_to_html(
|
| 308 |
+
current_consecutive_chars,
|
| 309 |
+
text=text,
|
| 310 |
+
encoding=encoding,
|
| 311 |
+
)
|
| 312 |
+
)
|
| 313 |
+
res = HTMLBody(spans) # Send the list of spans to the body of our html
|
| 314 |
+
return res
|
| 315 |
+
|
| 316 |
+
@staticmethod
|
| 317 |
+
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
|
| 318 |
+
"""
|
| 319 |
+
Args:
|
| 320 |
+
text (:obj:`str`):
|
| 321 |
+
The raw text we want to align to
|
| 322 |
+
|
| 323 |
+
annotations (:obj:`AnnotationList`):
|
| 324 |
+
A (possibly empty) list of annotations
|
| 325 |
+
|
| 326 |
+
Returns:
|
| 327 |
+
A list of length len(text) whose entry at index i is None if there is no annotation on
|
| 328 |
+
charachter i or k, the index of the annotation that covers index i where k is with
|
| 329 |
+
respect to the list of annotations
|
| 330 |
+
"""
|
| 331 |
+
annotation_map = [None] * len(text)
|
| 332 |
+
for anno_ix, a in enumerate(annotations):
|
| 333 |
+
for i in range(a.start, a.end):
|
| 334 |
+
annotation_map[i] = anno_ix
|
| 335 |
+
return annotation_map
|
| 336 |
+
|
| 337 |
+
@staticmethod
|
| 338 |
+
def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]:
|
| 339 |
+
"""
|
| 340 |
+
For each character in the original text, we emit a tuple representing it's "state":
|
| 341 |
+
|
| 342 |
+
* which token_ix it corresponds to
|
| 343 |
+
* which word_ix it corresponds to
|
| 344 |
+
* which annotation_ix it corresponds to
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
text (:obj:`str`):
|
| 348 |
+
The raw text we want to align to
|
| 349 |
+
|
| 350 |
+
annotations (:obj:`List[Annotation]`):
|
| 351 |
+
A (possibly empty) list of annotations
|
| 352 |
+
|
| 353 |
+
encoding: (:class:`~tokenizers.Encoding`):
|
| 354 |
+
The encoding returned from the tokenizer
|
| 355 |
+
|
| 356 |
+
Returns:
|
| 357 |
+
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
|
| 358 |
+
it's state is
|
| 359 |
+
"""
|
| 360 |
+
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
|
| 361 |
+
# Todo make this a dataclass or named tuple
|
| 362 |
+
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
|
| 363 |
+
for token_ix, token in enumerate(encoding.tokens):
|
| 364 |
+
offsets = encoding.token_to_chars(token_ix)
|
| 365 |
+
if offsets is not None:
|
| 366 |
+
start, end = offsets
|
| 367 |
+
for i in range(start, end):
|
| 368 |
+
char_states[i].tokens.append(token_ix)
|
| 369 |
+
for char_ix, anno_ix in enumerate(annotation_map):
|
| 370 |
+
char_states[char_ix].anno_ix = anno_ix
|
| 371 |
+
|
| 372 |
+
return char_states
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def HTMLBody(children: List[str], css_styles=css) -> str:
|
| 376 |
+
"""
|
| 377 |
+
Generates the full html with css from a list of html spans
|
| 378 |
+
|
| 379 |
+
Args:
|
| 380 |
+
children (:obj:`List[str]`):
|
| 381 |
+
A list of strings, assumed to be html elements
|
| 382 |
+
|
| 383 |
+
css_styles (:obj:`str`, `optional`):
|
| 384 |
+
Optional alternative implementation of the css
|
| 385 |
+
|
| 386 |
+
Returns:
|
| 387 |
+
:obj:`str`: An HTML string with style markup
|
| 388 |
+
"""
|
| 389 |
+
children_text = "".join(children)
|
| 390 |
+
return f"""
|
| 391 |
+
<html>
|
| 392 |
+
<head>
|
| 393 |
+
<style>
|
| 394 |
+
{css_styles}
|
| 395 |
+
</style>
|
| 396 |
+
</head>
|
| 397 |
+
<body>
|
| 398 |
+
<div class="tokenized-text" dir=auto>
|
| 399 |
+
{children_text}
|
| 400 |
+
</div>
|
| 401 |
+
</body>
|
| 402 |
+
</html>
|
| 403 |
+
"""
|
wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
from .. import trainers
|
| 3 |
+
|
| 4 |
+
Trainer = trainers.Trainer
|
| 5 |
+
BpeTrainer = trainers.BpeTrainer
|
| 6 |
+
UnigramTrainer = trainers.UnigramTrainer
|
| 7 |
+
WordLevelTrainer = trainers.WordLevelTrainer
|
| 8 |
+
WordPieceTrainer = trainers.WordPieceTrainer
|
wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class Trainer:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all trainers
|
| 5 |
+
|
| 6 |
+
This class is not supposed to be instantiated directly. Instead, any implementation of a
|
| 7 |
+
Trainer will return an instance of this class when instantiated.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
class BpeTrainer(Trainer):
|
| 11 |
+
"""
|
| 12 |
+
Trainer capable of training a BPE model
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
vocab_size (:obj:`int`, `optional`):
|
| 16 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 17 |
+
|
| 18 |
+
min_frequency (:obj:`int`, `optional`):
|
| 19 |
+
The minimum frequency a pair should have in order to be merged.
|
| 20 |
+
|
| 21 |
+
show_progress (:obj:`bool`, `optional`):
|
| 22 |
+
Whether to show progress bars while training.
|
| 23 |
+
|
| 24 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
|
| 25 |
+
A list of special tokens the model should know of.
|
| 26 |
+
|
| 27 |
+
limit_alphabet (:obj:`int`, `optional`):
|
| 28 |
+
The maximum different characters to keep in the alphabet.
|
| 29 |
+
|
| 30 |
+
initial_alphabet (:obj:`List[str]`, `optional`):
|
| 31 |
+
A list of characters to include in the initial alphabet, even
|
| 32 |
+
if not seen in the training dataset.
|
| 33 |
+
If the strings contain more than one character, only the first one
|
| 34 |
+
is kept.
|
| 35 |
+
|
| 36 |
+
continuing_subword_prefix (:obj:`str`, `optional`):
|
| 37 |
+
A prefix to be used for every subword that is not a beginning-of-word.
|
| 38 |
+
|
| 39 |
+
end_of_word_suffix (:obj:`str`, `optional`):
|
| 40 |
+
A suffix to be used for every subword that is a end-of-word.
|
| 41 |
+
|
| 42 |
+
max_token_length (:obj:`int`, `optional`):
|
| 43 |
+
Prevents creating tokens longer than the specified size.
|
| 44 |
+
This can help with reducing polluting your vocabulary with
|
| 45 |
+
highly repetitive tokens like `======` for wikipedia
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
class UnigramTrainer(Trainer):
|
| 50 |
+
"""
|
| 51 |
+
Trainer capable of training a Unigram model
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
vocab_size (:obj:`int`):
|
| 55 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 56 |
+
|
| 57 |
+
show_progress (:obj:`bool`):
|
| 58 |
+
Whether to show progress bars while training.
|
| 59 |
+
|
| 60 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`):
|
| 61 |
+
A list of special tokens the model should know of.
|
| 62 |
+
|
| 63 |
+
initial_alphabet (:obj:`List[str]`):
|
| 64 |
+
A list of characters to include in the initial alphabet, even
|
| 65 |
+
if not seen in the training dataset.
|
| 66 |
+
If the strings contain more than one character, only the first one
|
| 67 |
+
is kept.
|
| 68 |
+
|
| 69 |
+
shrinking_factor (:obj:`float`):
|
| 70 |
+
The shrinking factor used at each step of the training to prune the
|
| 71 |
+
vocabulary.
|
| 72 |
+
|
| 73 |
+
unk_token (:obj:`str`):
|
| 74 |
+
The token used for out-of-vocabulary tokens.
|
| 75 |
+
|
| 76 |
+
max_piece_length (:obj:`int`):
|
| 77 |
+
The maximum length of a given token.
|
| 78 |
+
|
| 79 |
+
n_sub_iterations (:obj:`int`):
|
| 80 |
+
The number of iterations of the EM algorithm to perform before
|
| 81 |
+
pruning the vocabulary.
|
| 82 |
+
"""
|
| 83 |
+
def __init__(
|
| 84 |
+
self,
|
| 85 |
+
vocab_size=8000,
|
| 86 |
+
show_progress=True,
|
| 87 |
+
special_tokens=[],
|
| 88 |
+
shrinking_factor=0.75,
|
| 89 |
+
unk_token=None,
|
| 90 |
+
max_piece_length=16,
|
| 91 |
+
n_sub_iterations=2,
|
| 92 |
+
):
|
| 93 |
+
pass
|
| 94 |
+
|
| 95 |
+
class WordLevelTrainer(Trainer):
|
| 96 |
+
"""
|
| 97 |
+
Trainer capable of training a WorldLevel model
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
vocab_size (:obj:`int`, `optional`):
|
| 101 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 102 |
+
|
| 103 |
+
min_frequency (:obj:`int`, `optional`):
|
| 104 |
+
The minimum frequency a pair should have in order to be merged.
|
| 105 |
+
|
| 106 |
+
show_progress (:obj:`bool`, `optional`):
|
| 107 |
+
Whether to show progress bars while training.
|
| 108 |
+
|
| 109 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`):
|
| 110 |
+
A list of special tokens the model should know of.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
class WordPieceTrainer(Trainer):
|
| 114 |
+
"""
|
| 115 |
+
Trainer capable of training a WordPiece model
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
vocab_size (:obj:`int`, `optional`):
|
| 119 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 120 |
+
|
| 121 |
+
min_frequency (:obj:`int`, `optional`):
|
| 122 |
+
The minimum frequency a pair should have in order to be merged.
|
| 123 |
+
|
| 124 |
+
show_progress (:obj:`bool`, `optional`):
|
| 125 |
+
Whether to show progress bars while training.
|
| 126 |
+
|
| 127 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
|
| 128 |
+
A list of special tokens the model should know of.
|
| 129 |
+
|
| 130 |
+
limit_alphabet (:obj:`int`, `optional`):
|
| 131 |
+
The maximum different characters to keep in the alphabet.
|
| 132 |
+
|
| 133 |
+
initial_alphabet (:obj:`List[str]`, `optional`):
|
| 134 |
+
A list of characters to include in the initial alphabet, even
|
| 135 |
+
if not seen in the training dataset.
|
| 136 |
+
If the strings contain more than one character, only the first one
|
| 137 |
+
is kept.
|
| 138 |
+
|
| 139 |
+
continuing_subword_prefix (:obj:`str`, `optional`):
|
| 140 |
+
A prefix to be used for every subword that is not a beginning-of-word.
|
| 141 |
+
|
| 142 |
+
end_of_word_suffix (:obj:`str`, `optional`):
|
| 143 |
+
A suffix to be used for every subword that is a end-of-word.
|
| 144 |
+
"""
|
| 145 |
+
def __init__(
|
| 146 |
+
self,
|
| 147 |
+
vocab_size=30000,
|
| 148 |
+
min_frequency=0,
|
| 149 |
+
show_progress=True,
|
| 150 |
+
special_tokens=[],
|
| 151 |
+
limit_alphabet=None,
|
| 152 |
+
initial_alphabet=[],
|
| 153 |
+
continuing_subword_prefix="##",
|
| 154 |
+
end_of_word_suffix=None,
|
| 155 |
+
):
|
| 156 |
+
pass
|
wemm/lib/python3.10/site-packages/torchvision/__init__.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import warnings
|
| 3 |
+
from modulefinder import Module
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torchvision import datasets, io, models, ops, transforms, utils
|
| 7 |
+
|
| 8 |
+
from .extension import _HAS_OPS
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
from .version import __version__ # noqa: F401
|
| 12 |
+
except ImportError:
|
| 13 |
+
pass
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Check if torchvision is being imported within the root folder
|
| 17 |
+
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
|
| 18 |
+
os.path.realpath(os.getcwd()), "torchvision"
|
| 19 |
+
):
|
| 20 |
+
message = (
|
| 21 |
+
"You are importing torchvision within its own root folder ({}). "
|
| 22 |
+
"This is not expected to work and may give errors. Please exit the "
|
| 23 |
+
"torchvision project source and relaunch your python interpreter."
|
| 24 |
+
)
|
| 25 |
+
warnings.warn(message.format(os.getcwd()))
|
| 26 |
+
|
| 27 |
+
_image_backend = "PIL"
|
| 28 |
+
|
| 29 |
+
_video_backend = "pyav"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def set_image_backend(backend):
|
| 33 |
+
"""
|
| 34 |
+
Specifies the package used to load images.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
|
| 38 |
+
The :mod:`accimage` package uses the Intel IPP library. It is
|
| 39 |
+
generally faster than PIL, but does not support as many operations.
|
| 40 |
+
"""
|
| 41 |
+
global _image_backend
|
| 42 |
+
if backend not in ["PIL", "accimage"]:
|
| 43 |
+
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
|
| 44 |
+
_image_backend = backend
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def get_image_backend():
|
| 48 |
+
"""
|
| 49 |
+
Gets the name of the package used to load images
|
| 50 |
+
"""
|
| 51 |
+
return _image_backend
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def set_video_backend(backend):
|
| 55 |
+
"""
|
| 56 |
+
Specifies the package used to decode videos.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
|
| 60 |
+
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
|
| 61 |
+
binding for the FFmpeg libraries.
|
| 62 |
+
The :mod:`video_reader` package includes a native C++ implementation on
|
| 63 |
+
top of FFMPEG libraries, and a python API of TorchScript custom operator.
|
| 64 |
+
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
|
| 65 |
+
|
| 66 |
+
.. note::
|
| 67 |
+
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
|
| 68 |
+
backend, please compile torchvision from source.
|
| 69 |
+
"""
|
| 70 |
+
global _video_backend
|
| 71 |
+
if backend not in ["pyav", "video_reader", "cuda"]:
|
| 72 |
+
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
|
| 73 |
+
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
|
| 74 |
+
# TODO: better messages
|
| 75 |
+
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
|
| 76 |
+
raise RuntimeError(message)
|
| 77 |
+
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
|
| 78 |
+
# TODO: better messages
|
| 79 |
+
message = "cuda video backend is not available."
|
| 80 |
+
raise RuntimeError(message)
|
| 81 |
+
else:
|
| 82 |
+
_video_backend = backend
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def get_video_backend():
|
| 86 |
+
"""
|
| 87 |
+
Returns the currently active video backend used to decode videos.
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
str: Name of the video backend. one of {'pyav', 'video_reader'}.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
return _video_backend
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _is_tracing():
|
| 97 |
+
return torch._C._get_tracing_state()
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
_WARN_ABOUT_BETA_TRANSFORMS = True
|
| 101 |
+
_BETA_TRANSFORMS_WARNING = (
|
| 102 |
+
"The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. "
|
| 103 |
+
"While we do not expect major breaking changes, some APIs may still change "
|
| 104 |
+
"according to user feedback. Please submit any feedback you may have in "
|
| 105 |
+
"this issue: https://github.com/pytorch/vision/issues/6753, and you can also "
|
| 106 |
+
"check out https://github.com/pytorch/vision/issues/7319 to learn more about "
|
| 107 |
+
"the APIs that we suspect might involve future changes. "
|
| 108 |
+
"You can silence this warning by calling torchvision.disable_beta_transforms_warning()."
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def disable_beta_transforms_warning():
|
| 113 |
+
global _WARN_ABOUT_BETA_TRANSFORMS
|
| 114 |
+
_WARN_ABOUT_BETA_TRANSFORMS = False
|
wemm/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib.machinery
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from torch.hub import _get_torch_home
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
_HOME = os.path.join(_get_torch_home(), "datasets", "vision")
|
| 8 |
+
_USE_SHARDED_DATASETS = False
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _download_file_from_remote_location(fpath: str, url: str) -> None:
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _is_remote_location_available() -> bool:
|
| 16 |
+
return False
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
from torch.hub import load_state_dict_from_url # noqa: 401
|
| 21 |
+
except ImportError:
|
| 22 |
+
from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _get_extension_path(lib_name):
|
| 26 |
+
|
| 27 |
+
lib_dir = os.path.dirname(__file__)
|
| 28 |
+
if os.name == "nt":
|
| 29 |
+
# Register the main torchvision library location on the default DLL path
|
| 30 |
+
import ctypes
|
| 31 |
+
import sys
|
| 32 |
+
|
| 33 |
+
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
|
| 34 |
+
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
|
| 35 |
+
prev_error_mode = kernel32.SetErrorMode(0x0001)
|
| 36 |
+
|
| 37 |
+
if with_load_library_flags:
|
| 38 |
+
kernel32.AddDllDirectory.restype = ctypes.c_void_p
|
| 39 |
+
|
| 40 |
+
if sys.version_info >= (3, 8):
|
| 41 |
+
os.add_dll_directory(lib_dir)
|
| 42 |
+
elif with_load_library_flags:
|
| 43 |
+
res = kernel32.AddDllDirectory(lib_dir)
|
| 44 |
+
if res is None:
|
| 45 |
+
err = ctypes.WinError(ctypes.get_last_error())
|
| 46 |
+
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
|
| 47 |
+
raise err
|
| 48 |
+
|
| 49 |
+
kernel32.SetErrorMode(prev_error_mode)
|
| 50 |
+
|
| 51 |
+
loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)
|
| 52 |
+
|
| 53 |
+
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
|
| 54 |
+
ext_specs = extfinder.find_spec(lib_name)
|
| 55 |
+
if ext_specs is None:
|
| 56 |
+
raise ImportError
|
| 57 |
+
|
| 58 |
+
return ext_specs.origin
|
wemm/lib/python3.10/site-packages/torchvision/_utils.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
from typing import Sequence, Type, TypeVar
|
| 3 |
+
|
| 4 |
+
T = TypeVar("T", bound=enum.Enum)
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class StrEnumMeta(enum.EnumMeta):
|
| 8 |
+
auto = enum.auto
|
| 9 |
+
|
| 10 |
+
def from_str(self: Type[T], member: str) -> T: # type: ignore[misc]
|
| 11 |
+
try:
|
| 12 |
+
return self[member]
|
| 13 |
+
except KeyError:
|
| 14 |
+
# TODO: use `add_suggestion` from torchvision.prototype.utils._internal to improve the error message as
|
| 15 |
+
# soon as it is migrated.
|
| 16 |
+
raise ValueError(f"Unknown value '{member}' for {self.__name__}.") from None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class StrEnum(enum.Enum, metaclass=StrEnumMeta):
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def sequence_to_str(seq: Sequence, separate_last: str = "") -> str:
|
| 24 |
+
if not seq:
|
| 25 |
+
return ""
|
| 26 |
+
if len(seq) == 1:
|
| 27 |
+
return f"'{seq[0]}'"
|
| 28 |
+
|
| 29 |
+
head = "'" + "', '".join([str(item) for item in seq[:-1]]) + "'"
|
| 30 |
+
tail = f"{'' if separate_last and len(seq) == 2 else ','} {separate_last}'{seq[-1]}'"
|
| 31 |
+
|
| 32 |
+
return head + tail
|
wemm/lib/python3.10/site-packages/torchvision/datapoints/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
|
| 2 |
+
|
| 3 |
+
from ._bounding_box import BoundingBox, BoundingBoxFormat
|
| 4 |
+
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT
|
| 5 |
+
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
|
| 6 |
+
from ._mask import Mask
|
| 7 |
+
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
|
| 8 |
+
|
| 9 |
+
if _WARN_ABOUT_BETA_TRANSFORMS:
|
| 10 |
+
import warnings
|
| 11 |
+
|
| 12 |
+
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_datapoint.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_image.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_mask.cpython-310.pyc
ADDED
|
Binary file (6.22 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datapoints/_datapoint.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from types import ModuleType
|
| 4 |
+
from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union
|
| 5 |
+
|
| 6 |
+
import PIL.Image
|
| 7 |
+
import torch
|
| 8 |
+
from torch._C import DisableTorchFunctionSubclass
|
| 9 |
+
from torch.types import _device, _dtype, _size
|
| 10 |
+
from torchvision.transforms import InterpolationMode
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
D = TypeVar("D", bound="Datapoint")
|
| 14 |
+
_FillType = Union[int, float, Sequence[int], Sequence[float], None]
|
| 15 |
+
_FillTypeJIT = Optional[List[float]]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Datapoint(torch.Tensor):
|
| 19 |
+
__F: Optional[ModuleType] = None
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def _to_tensor(
|
| 23 |
+
data: Any,
|
| 24 |
+
dtype: Optional[torch.dtype] = None,
|
| 25 |
+
device: Optional[Union[torch.device, str, int]] = None,
|
| 26 |
+
requires_grad: Optional[bool] = None,
|
| 27 |
+
) -> torch.Tensor:
|
| 28 |
+
if requires_grad is None:
|
| 29 |
+
requires_grad = data.requires_grad if isinstance(data, torch.Tensor) else False
|
| 30 |
+
return torch.as_tensor(data, dtype=dtype, device=device).requires_grad_(requires_grad)
|
| 31 |
+
|
| 32 |
+
@classmethod
|
| 33 |
+
def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:
|
| 34 |
+
raise NotImplementedError
|
| 35 |
+
|
| 36 |
+
_NO_WRAPPING_EXCEPTIONS = {
|
| 37 |
+
torch.Tensor.clone: lambda cls, input, output: cls.wrap_like(input, output),
|
| 38 |
+
torch.Tensor.to: lambda cls, input, output: cls.wrap_like(input, output),
|
| 39 |
+
# We don't need to wrap the output of `Tensor.requires_grad_`, since it is an inplace operation and thus
|
| 40 |
+
# retains the type automatically
|
| 41 |
+
torch.Tensor.requires_grad_: lambda cls, input, output: output,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
@classmethod
|
| 45 |
+
def __torch_function__(
|
| 46 |
+
cls,
|
| 47 |
+
func: Callable[..., torch.Tensor],
|
| 48 |
+
types: Tuple[Type[torch.Tensor], ...],
|
| 49 |
+
args: Sequence[Any] = (),
|
| 50 |
+
kwargs: Optional[Mapping[str, Any]] = None,
|
| 51 |
+
) -> torch.Tensor:
|
| 52 |
+
"""For general information about how the __torch_function__ protocol works,
|
| 53 |
+
see https://pytorch.org/docs/stable/notes/extending.html#extending-torch
|
| 54 |
+
|
| 55 |
+
TL;DR: Every time a PyTorch operator is called, it goes through the inputs and looks for the
|
| 56 |
+
``__torch_function__`` method. If one is found, it is invoked with the operator as ``func`` as well as the
|
| 57 |
+
``args`` and ``kwargs`` of the original call.
|
| 58 |
+
|
| 59 |
+
The default behavior of :class:`~torch.Tensor`'s is to retain a custom tensor type. For the :class:`Datapoint`
|
| 60 |
+
use case, this has two downsides:
|
| 61 |
+
|
| 62 |
+
1. Since some :class:`Datapoint`'s require metadata to be constructed, the default wrapping, i.e.
|
| 63 |
+
``return cls(func(*args, **kwargs))``, will fail for them.
|
| 64 |
+
2. For most operations, there is no way of knowing if the input type is still valid for the output.
|
| 65 |
+
|
| 66 |
+
For these reasons, the automatic output wrapping is turned off for most operators. The only exceptions are
|
| 67 |
+
listed in :attr:`Datapoint._NO_WRAPPING_EXCEPTIONS`
|
| 68 |
+
"""
|
| 69 |
+
# Since super().__torch_function__ has no hook to prevent the coercing of the output into the input type, we
|
| 70 |
+
# need to reimplement the functionality.
|
| 71 |
+
|
| 72 |
+
if not all(issubclass(cls, t) for t in types):
|
| 73 |
+
return NotImplemented
|
| 74 |
+
|
| 75 |
+
with DisableTorchFunctionSubclass():
|
| 76 |
+
output = func(*args, **kwargs or dict())
|
| 77 |
+
|
| 78 |
+
wrapper = cls._NO_WRAPPING_EXCEPTIONS.get(func)
|
| 79 |
+
# Apart from `func` needing to be an exception, we also require the primary operand, i.e. `args[0]`, to be
|
| 80 |
+
# an instance of the class that `__torch_function__` was invoked on. The __torch_function__ protocol will
|
| 81 |
+
# invoke this method on *all* types involved in the computation by walking the MRO upwards. For example,
|
| 82 |
+
# `torch.Tensor(...).to(datapoints.Image(...))` will invoke `datapoints.Image.__torch_function__` with
|
| 83 |
+
# `args = (torch.Tensor(), datapoints.Image())` first. Without this guard, the original `torch.Tensor` would
|
| 84 |
+
# be wrapped into a `datapoints.Image`.
|
| 85 |
+
if wrapper and isinstance(args[0], cls):
|
| 86 |
+
return wrapper(cls, args[0], output)
|
| 87 |
+
|
| 88 |
+
# Inplace `func`'s, canonically identified with a trailing underscore in their name like `.add_(...)`,
|
| 89 |
+
# will retain the input type. Thus, we need to unwrap here.
|
| 90 |
+
if isinstance(output, cls):
|
| 91 |
+
return output.as_subclass(torch.Tensor)
|
| 92 |
+
|
| 93 |
+
return output
|
| 94 |
+
|
| 95 |
+
def _make_repr(self, **kwargs: Any) -> str:
|
| 96 |
+
# This is a poor man's implementation of the proposal in https://github.com/pytorch/pytorch/issues/76532.
|
| 97 |
+
# If that ever gets implemented, remove this in favor of the solution on the `torch.Tensor` class.
|
| 98 |
+
extra_repr = ", ".join(f"{key}={value}" for key, value in kwargs.items())
|
| 99 |
+
return f"{super().__repr__()[:-1]}, {extra_repr})"
|
| 100 |
+
|
| 101 |
+
@property
|
| 102 |
+
def _F(self) -> ModuleType:
|
| 103 |
+
# This implements a lazy import of the functional to get around the cyclic import. This import is deferred
|
| 104 |
+
# until the first time we need reference to the functional module and it's shared across all instances of
|
| 105 |
+
# the class. This approach avoids the DataLoader issue described at
|
| 106 |
+
# https://github.com/pytorch/vision/pull/6476#discussion_r953588621
|
| 107 |
+
if Datapoint.__F is None:
|
| 108 |
+
from ..transforms.v2 import functional
|
| 109 |
+
|
| 110 |
+
Datapoint.__F = functional
|
| 111 |
+
return Datapoint.__F
|
| 112 |
+
|
| 113 |
+
# Add properties for common attributes like shape, dtype, device, ndim etc
|
| 114 |
+
# this way we return the result without passing into __torch_function__
|
| 115 |
+
@property
|
| 116 |
+
def shape(self) -> _size: # type: ignore[override]
|
| 117 |
+
with DisableTorchFunctionSubclass():
|
| 118 |
+
return super().shape
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def ndim(self) -> int: # type: ignore[override]
|
| 122 |
+
with DisableTorchFunctionSubclass():
|
| 123 |
+
return super().ndim
|
| 124 |
+
|
| 125 |
+
@property
|
| 126 |
+
def device(self, *args: Any, **kwargs: Any) -> _device: # type: ignore[override]
|
| 127 |
+
with DisableTorchFunctionSubclass():
|
| 128 |
+
return super().device
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def dtype(self) -> _dtype: # type: ignore[override]
|
| 132 |
+
with DisableTorchFunctionSubclass():
|
| 133 |
+
return super().dtype
|
| 134 |
+
|
| 135 |
+
def horizontal_flip(self) -> Datapoint:
|
| 136 |
+
return self
|
| 137 |
+
|
| 138 |
+
def vertical_flip(self) -> Datapoint:
|
| 139 |
+
return self
|
| 140 |
+
|
| 141 |
+
# TODO: We have to ignore override mypy error as there is torch.Tensor built-in deprecated op: Tensor.resize
|
| 142 |
+
# https://github.com/pytorch/pytorch/blob/e8727994eb7cdb2ab642749d6549bc497563aa06/torch/_tensor.py#L588-L593
|
| 143 |
+
def resize( # type: ignore[override]
|
| 144 |
+
self,
|
| 145 |
+
size: List[int],
|
| 146 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
|
| 147 |
+
max_size: Optional[int] = None,
|
| 148 |
+
antialias: Optional[Union[str, bool]] = "warn",
|
| 149 |
+
) -> Datapoint:
|
| 150 |
+
return self
|
| 151 |
+
|
| 152 |
+
def crop(self, top: int, left: int, height: int, width: int) -> Datapoint:
|
| 153 |
+
return self
|
| 154 |
+
|
| 155 |
+
def center_crop(self, output_size: List[int]) -> Datapoint:
|
| 156 |
+
return self
|
| 157 |
+
|
| 158 |
+
def resized_crop(
|
| 159 |
+
self,
|
| 160 |
+
top: int,
|
| 161 |
+
left: int,
|
| 162 |
+
height: int,
|
| 163 |
+
width: int,
|
| 164 |
+
size: List[int],
|
| 165 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
|
| 166 |
+
antialias: Optional[Union[str, bool]] = "warn",
|
| 167 |
+
) -> Datapoint:
|
| 168 |
+
return self
|
| 169 |
+
|
| 170 |
+
def pad(
|
| 171 |
+
self,
|
| 172 |
+
padding: List[int],
|
| 173 |
+
fill: Optional[Union[int, float, List[float]]] = None,
|
| 174 |
+
padding_mode: str = "constant",
|
| 175 |
+
) -> Datapoint:
|
| 176 |
+
return self
|
| 177 |
+
|
| 178 |
+
def rotate(
|
| 179 |
+
self,
|
| 180 |
+
angle: float,
|
| 181 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
|
| 182 |
+
expand: bool = False,
|
| 183 |
+
center: Optional[List[float]] = None,
|
| 184 |
+
fill: _FillTypeJIT = None,
|
| 185 |
+
) -> Datapoint:
|
| 186 |
+
return self
|
| 187 |
+
|
| 188 |
+
def affine(
|
| 189 |
+
self,
|
| 190 |
+
angle: Union[int, float],
|
| 191 |
+
translate: List[float],
|
| 192 |
+
scale: float,
|
| 193 |
+
shear: List[float],
|
| 194 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
|
| 195 |
+
fill: _FillTypeJIT = None,
|
| 196 |
+
center: Optional[List[float]] = None,
|
| 197 |
+
) -> Datapoint:
|
| 198 |
+
return self
|
| 199 |
+
|
| 200 |
+
def perspective(
|
| 201 |
+
self,
|
| 202 |
+
startpoints: Optional[List[List[int]]],
|
| 203 |
+
endpoints: Optional[List[List[int]]],
|
| 204 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
|
| 205 |
+
fill: _FillTypeJIT = None,
|
| 206 |
+
coefficients: Optional[List[float]] = None,
|
| 207 |
+
) -> Datapoint:
|
| 208 |
+
return self
|
| 209 |
+
|
| 210 |
+
def elastic(
|
| 211 |
+
self,
|
| 212 |
+
displacement: torch.Tensor,
|
| 213 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
|
| 214 |
+
fill: _FillTypeJIT = None,
|
| 215 |
+
) -> Datapoint:
|
| 216 |
+
return self
|
| 217 |
+
|
| 218 |
+
def rgb_to_grayscale(self, num_output_channels: int = 1) -> Datapoint:
|
| 219 |
+
return self
|
| 220 |
+
|
| 221 |
+
def adjust_brightness(self, brightness_factor: float) -> Datapoint:
|
| 222 |
+
return self
|
| 223 |
+
|
| 224 |
+
def adjust_saturation(self, saturation_factor: float) -> Datapoint:
|
| 225 |
+
return self
|
| 226 |
+
|
| 227 |
+
def adjust_contrast(self, contrast_factor: float) -> Datapoint:
|
| 228 |
+
return self
|
| 229 |
+
|
| 230 |
+
def adjust_sharpness(self, sharpness_factor: float) -> Datapoint:
|
| 231 |
+
return self
|
| 232 |
+
|
| 233 |
+
def adjust_hue(self, hue_factor: float) -> Datapoint:
|
| 234 |
+
return self
|
| 235 |
+
|
| 236 |
+
def adjust_gamma(self, gamma: float, gain: float = 1) -> Datapoint:
|
| 237 |
+
return self
|
| 238 |
+
|
| 239 |
+
def posterize(self, bits: int) -> Datapoint:
|
| 240 |
+
return self
|
| 241 |
+
|
| 242 |
+
def solarize(self, threshold: float) -> Datapoint:
|
| 243 |
+
return self
|
| 244 |
+
|
| 245 |
+
def autocontrast(self) -> Datapoint:
|
| 246 |
+
return self
|
| 247 |
+
|
| 248 |
+
def equalize(self) -> Datapoint:
|
| 249 |
+
return self
|
| 250 |
+
|
| 251 |
+
def invert(self) -> Datapoint:
|
| 252 |
+
return self
|
| 253 |
+
|
| 254 |
+
def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Datapoint:
|
| 255 |
+
return self
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
_InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint]
|
| 259 |
+
_InputTypeJIT = torch.Tensor
|
wemm/lib/python3.10/site-packages/torchvision/datapoints/_image.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Any, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import PIL.Image
|
| 6 |
+
import torch
|
| 7 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 8 |
+
|
| 9 |
+
from ._datapoint import _FillTypeJIT, Datapoint
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Image(Datapoint):
|
| 13 |
+
"""[BETA] :class:`torch.Tensor` subclass for images.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
|
| 17 |
+
well as PIL images.
|
| 18 |
+
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
|
| 19 |
+
``data``.
|
| 20 |
+
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
|
| 21 |
+
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
|
| 22 |
+
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
|
| 23 |
+
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
@classmethod
|
| 27 |
+
def _wrap(cls, tensor: torch.Tensor) -> Image:
|
| 28 |
+
image = tensor.as_subclass(cls)
|
| 29 |
+
return image
|
| 30 |
+
|
| 31 |
+
def __new__(
|
| 32 |
+
cls,
|
| 33 |
+
data: Any,
|
| 34 |
+
*,
|
| 35 |
+
dtype: Optional[torch.dtype] = None,
|
| 36 |
+
device: Optional[Union[torch.device, str, int]] = None,
|
| 37 |
+
requires_grad: Optional[bool] = None,
|
| 38 |
+
) -> Image:
|
| 39 |
+
if isinstance(data, PIL.Image.Image):
|
| 40 |
+
from torchvision.transforms.v2 import functional as F
|
| 41 |
+
|
| 42 |
+
data = F.pil_to_tensor(data)
|
| 43 |
+
|
| 44 |
+
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
|
| 45 |
+
if tensor.ndim < 2:
|
| 46 |
+
raise ValueError
|
| 47 |
+
elif tensor.ndim == 2:
|
| 48 |
+
tensor = tensor.unsqueeze(0)
|
| 49 |
+
|
| 50 |
+
return cls._wrap(tensor)
|
| 51 |
+
|
| 52 |
+
@classmethod
|
| 53 |
+
def wrap_like(cls, other: Image, tensor: torch.Tensor) -> Image:
|
| 54 |
+
return cls._wrap(tensor)
|
| 55 |
+
|
| 56 |
+
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
|
| 57 |
+
return self._make_repr()
|
| 58 |
+
|
| 59 |
+
@property
|
| 60 |
+
def spatial_size(self) -> Tuple[int, int]:
|
| 61 |
+
return tuple(self.shape[-2:]) # type: ignore[return-value]
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def num_channels(self) -> int:
|
| 65 |
+
return self.shape[-3]
|
| 66 |
+
|
| 67 |
+
def horizontal_flip(self) -> Image:
|
| 68 |
+
output = self._F.horizontal_flip_image_tensor(self.as_subclass(torch.Tensor))
|
| 69 |
+
return Image.wrap_like(self, output)
|
| 70 |
+
|
| 71 |
+
def vertical_flip(self) -> Image:
|
| 72 |
+
output = self._F.vertical_flip_image_tensor(self.as_subclass(torch.Tensor))
|
| 73 |
+
return Image.wrap_like(self, output)
|
| 74 |
+
|
| 75 |
+
def resize( # type: ignore[override]
|
| 76 |
+
self,
|
| 77 |
+
size: List[int],
|
| 78 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
|
| 79 |
+
max_size: Optional[int] = None,
|
| 80 |
+
antialias: Optional[Union[str, bool]] = "warn",
|
| 81 |
+
) -> Image:
|
| 82 |
+
output = self._F.resize_image_tensor(
|
| 83 |
+
self.as_subclass(torch.Tensor), size, interpolation=interpolation, max_size=max_size, antialias=antialias
|
| 84 |
+
)
|
| 85 |
+
return Image.wrap_like(self, output)
|
| 86 |
+
|
| 87 |
+
def crop(self, top: int, left: int, height: int, width: int) -> Image:
|
| 88 |
+
output = self._F.crop_image_tensor(self.as_subclass(torch.Tensor), top, left, height, width)
|
| 89 |
+
return Image.wrap_like(self, output)
|
| 90 |
+
|
| 91 |
+
def center_crop(self, output_size: List[int]) -> Image:
|
| 92 |
+
output = self._F.center_crop_image_tensor(self.as_subclass(torch.Tensor), output_size=output_size)
|
| 93 |
+
return Image.wrap_like(self, output)
|
| 94 |
+
|
| 95 |
+
def resized_crop(
|
| 96 |
+
self,
|
| 97 |
+
top: int,
|
| 98 |
+
left: int,
|
| 99 |
+
height: int,
|
| 100 |
+
width: int,
|
| 101 |
+
size: List[int],
|
| 102 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
|
| 103 |
+
antialias: Optional[Union[str, bool]] = "warn",
|
| 104 |
+
) -> Image:
|
| 105 |
+
output = self._F.resized_crop_image_tensor(
|
| 106 |
+
self.as_subclass(torch.Tensor),
|
| 107 |
+
top,
|
| 108 |
+
left,
|
| 109 |
+
height,
|
| 110 |
+
width,
|
| 111 |
+
size=list(size),
|
| 112 |
+
interpolation=interpolation,
|
| 113 |
+
antialias=antialias,
|
| 114 |
+
)
|
| 115 |
+
return Image.wrap_like(self, output)
|
| 116 |
+
|
| 117 |
+
def pad(
|
| 118 |
+
self,
|
| 119 |
+
padding: List[int],
|
| 120 |
+
fill: Optional[Union[int, float, List[float]]] = None,
|
| 121 |
+
padding_mode: str = "constant",
|
| 122 |
+
) -> Image:
|
| 123 |
+
output = self._F.pad_image_tensor(self.as_subclass(torch.Tensor), padding, fill=fill, padding_mode=padding_mode)
|
| 124 |
+
return Image.wrap_like(self, output)
|
| 125 |
+
|
| 126 |
+
def rotate(
|
| 127 |
+
self,
|
| 128 |
+
angle: float,
|
| 129 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
|
| 130 |
+
expand: bool = False,
|
| 131 |
+
center: Optional[List[float]] = None,
|
| 132 |
+
fill: _FillTypeJIT = None,
|
| 133 |
+
) -> Image:
|
| 134 |
+
output = self._F.rotate_image_tensor(
|
| 135 |
+
self.as_subclass(torch.Tensor), angle, interpolation=interpolation, expand=expand, fill=fill, center=center
|
| 136 |
+
)
|
| 137 |
+
return Image.wrap_like(self, output)
|
| 138 |
+
|
| 139 |
+
def affine(
|
| 140 |
+
self,
|
| 141 |
+
angle: Union[int, float],
|
| 142 |
+
translate: List[float],
|
| 143 |
+
scale: float,
|
| 144 |
+
shear: List[float],
|
| 145 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
|
| 146 |
+
fill: _FillTypeJIT = None,
|
| 147 |
+
center: Optional[List[float]] = None,
|
| 148 |
+
) -> Image:
|
| 149 |
+
output = self._F.affine_image_tensor(
|
| 150 |
+
self.as_subclass(torch.Tensor),
|
| 151 |
+
angle,
|
| 152 |
+
translate=translate,
|
| 153 |
+
scale=scale,
|
| 154 |
+
shear=shear,
|
| 155 |
+
interpolation=interpolation,
|
| 156 |
+
fill=fill,
|
| 157 |
+
center=center,
|
| 158 |
+
)
|
| 159 |
+
return Image.wrap_like(self, output)
|
| 160 |
+
|
| 161 |
+
def perspective(
|
| 162 |
+
self,
|
| 163 |
+
startpoints: Optional[List[List[int]]],
|
| 164 |
+
endpoints: Optional[List[List[int]]],
|
| 165 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
|
| 166 |
+
fill: _FillTypeJIT = None,
|
| 167 |
+
coefficients: Optional[List[float]] = None,
|
| 168 |
+
) -> Image:
|
| 169 |
+
output = self._F.perspective_image_tensor(
|
| 170 |
+
self.as_subclass(torch.Tensor),
|
| 171 |
+
startpoints,
|
| 172 |
+
endpoints,
|
| 173 |
+
interpolation=interpolation,
|
| 174 |
+
fill=fill,
|
| 175 |
+
coefficients=coefficients,
|
| 176 |
+
)
|
| 177 |
+
return Image.wrap_like(self, output)
|
| 178 |
+
|
| 179 |
+
def elastic(
|
| 180 |
+
self,
|
| 181 |
+
displacement: torch.Tensor,
|
| 182 |
+
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
|
| 183 |
+
fill: _FillTypeJIT = None,
|
| 184 |
+
) -> Image:
|
| 185 |
+
output = self._F.elastic_image_tensor(
|
| 186 |
+
self.as_subclass(torch.Tensor), displacement, interpolation=interpolation, fill=fill
|
| 187 |
+
)
|
| 188 |
+
return Image.wrap_like(self, output)
|
| 189 |
+
|
| 190 |
+
def rgb_to_grayscale(self, num_output_channels: int = 1) -> Image:
|
| 191 |
+
output = self._F.rgb_to_grayscale_image_tensor(
|
| 192 |
+
self.as_subclass(torch.Tensor), num_output_channels=num_output_channels
|
| 193 |
+
)
|
| 194 |
+
return Image.wrap_like(self, output)
|
| 195 |
+
|
| 196 |
+
def adjust_brightness(self, brightness_factor: float) -> Image:
|
| 197 |
+
output = self._F.adjust_brightness_image_tensor(
|
| 198 |
+
self.as_subclass(torch.Tensor), brightness_factor=brightness_factor
|
| 199 |
+
)
|
| 200 |
+
return Image.wrap_like(self, output)
|
| 201 |
+
|
| 202 |
+
def adjust_saturation(self, saturation_factor: float) -> Image:
|
| 203 |
+
output = self._F.adjust_saturation_image_tensor(
|
| 204 |
+
self.as_subclass(torch.Tensor), saturation_factor=saturation_factor
|
| 205 |
+
)
|
| 206 |
+
return Image.wrap_like(self, output)
|
| 207 |
+
|
| 208 |
+
def adjust_contrast(self, contrast_factor: float) -> Image:
|
| 209 |
+
output = self._F.adjust_contrast_image_tensor(self.as_subclass(torch.Tensor), contrast_factor=contrast_factor)
|
| 210 |
+
return Image.wrap_like(self, output)
|
| 211 |
+
|
| 212 |
+
def adjust_sharpness(self, sharpness_factor: float) -> Image:
|
| 213 |
+
output = self._F.adjust_sharpness_image_tensor(
|
| 214 |
+
self.as_subclass(torch.Tensor), sharpness_factor=sharpness_factor
|
| 215 |
+
)
|
| 216 |
+
return Image.wrap_like(self, output)
|
| 217 |
+
|
| 218 |
+
def adjust_hue(self, hue_factor: float) -> Image:
|
| 219 |
+
output = self._F.adjust_hue_image_tensor(self.as_subclass(torch.Tensor), hue_factor=hue_factor)
|
| 220 |
+
return Image.wrap_like(self, output)
|
| 221 |
+
|
| 222 |
+
def adjust_gamma(self, gamma: float, gain: float = 1) -> Image:
|
| 223 |
+
output = self._F.adjust_gamma_image_tensor(self.as_subclass(torch.Tensor), gamma=gamma, gain=gain)
|
| 224 |
+
return Image.wrap_like(self, output)
|
| 225 |
+
|
| 226 |
+
def posterize(self, bits: int) -> Image:
|
| 227 |
+
output = self._F.posterize_image_tensor(self.as_subclass(torch.Tensor), bits=bits)
|
| 228 |
+
return Image.wrap_like(self, output)
|
| 229 |
+
|
| 230 |
+
def solarize(self, threshold: float) -> Image:
|
| 231 |
+
output = self._F.solarize_image_tensor(self.as_subclass(torch.Tensor), threshold=threshold)
|
| 232 |
+
return Image.wrap_like(self, output)
|
| 233 |
+
|
| 234 |
+
def autocontrast(self) -> Image:
|
| 235 |
+
output = self._F.autocontrast_image_tensor(self.as_subclass(torch.Tensor))
|
| 236 |
+
return Image.wrap_like(self, output)
|
| 237 |
+
|
| 238 |
+
def equalize(self) -> Image:
|
| 239 |
+
output = self._F.equalize_image_tensor(self.as_subclass(torch.Tensor))
|
| 240 |
+
return Image.wrap_like(self, output)
|
| 241 |
+
|
| 242 |
+
def invert(self) -> Image:
|
| 243 |
+
output = self._F.invert_image_tensor(self.as_subclass(torch.Tensor))
|
| 244 |
+
return Image.wrap_like(self, output)
|
| 245 |
+
|
| 246 |
+
def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Image:
|
| 247 |
+
output = self._F.gaussian_blur_image_tensor(
|
| 248 |
+
self.as_subclass(torch.Tensor), kernel_size=kernel_size, sigma=sigma
|
| 249 |
+
)
|
| 250 |
+
return Image.wrap_like(self, output)
|
| 251 |
+
|
| 252 |
+
def normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Image:
|
| 253 |
+
output = self._F.normalize_image_tensor(self.as_subclass(torch.Tensor), mean=mean, std=std, inplace=inplace)
|
| 254 |
+
return Image.wrap_like(self, output)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
_ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
|
| 258 |
+
_ImageTypeJIT = torch.Tensor
|
| 259 |
+
_TensorImageType = Union[torch.Tensor, Image]
|
| 260 |
+
_TensorImageTypeJIT = torch.Tensor
|