Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- parrot/lib/python3.10/site-packages/anyio/streams/__pycache__/memory.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/__init__.py +100 -0
- parrot/lib/python3.10/site-packages/tokenizers/__init__.pyi +1200 -0
- parrot/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/implementations/__init__.py +6 -0
- parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py +122 -0
- parrot/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py +196 -0
- parrot/lib/python3.10/site-packages/tokenizers/models/__init__.py +8 -0
- parrot/lib/python3.10/site-packages/tokenizers/models/__init__.pyi +591 -0
- parrot/lib/python3.10/site-packages/tokenizers/models/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.py +29 -0
- parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi +595 -0
- parrot/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.py +9 -0
- parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi +342 -0
- parrot/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/tools/__init__.py +1 -0
- parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css +170 -0
- parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer.py +403 -0
- parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.py +8 -0
- parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi +156 -0
- parrot/lib/python3.10/site-packages/tokenizers/trainers/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/__init__.py +105 -0
- parrot/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py +50 -0
- parrot/lib/python3.10/site-packages/torchvision/_meta_registrations.py +225 -0
- parrot/lib/python3.10/site-packages/torchvision/_utils.py +32 -0
- parrot/lib/python3.10/site-packages/torchvision/extension.py +92 -0
- parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_load_gpu_decoder.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_video_opt.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/image.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video_reader.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/io/_video_opt.py +512 -0
- parrot/lib/python3.10/site-packages/torchvision/ops/_utils.py +106 -0
- parrot/lib/python3.10/site-packages/torchvision/ops/giou_loss.py +76 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/__init__.py +2 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_pil.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_tensor.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_video.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_pil.py +393 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_tensor.py +960 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py +114 -0
- parrot/lib/python3.10/site-packages/torchvision/transforms/_presets.py +216 -0
parrot/lib/python3.10/site-packages/anyio/streams/__pycache__/memory.cpython-310.pyc
ADDED
|
Binary file (8.62 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/__init__.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from enum import Enum
|
| 2 |
+
from typing import List, Tuple, Union
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
Offsets = Tuple[int, int]
|
| 6 |
+
|
| 7 |
+
TextInputSequence = str
|
| 8 |
+
"""A :obj:`str` that represents an input sequence """
|
| 9 |
+
|
| 10 |
+
PreTokenizedInputSequence = Union[List[str], Tuple[str]]
|
| 11 |
+
"""A pre-tokenized input sequence. Can be one of:
|
| 12 |
+
|
| 13 |
+
- A :obj:`List` of :obj:`str`
|
| 14 |
+
- A :obj:`Tuple` of :obj:`str`
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
TextEncodeInput = Union[
|
| 18 |
+
TextInputSequence,
|
| 19 |
+
Tuple[TextInputSequence, TextInputSequence],
|
| 20 |
+
List[TextInputSequence],
|
| 21 |
+
]
|
| 22 |
+
"""Represents a textual input for encoding. Can be either:
|
| 23 |
+
|
| 24 |
+
- A single sequence: :data:`~tokenizers.TextInputSequence`
|
| 25 |
+
- A pair of sequences:
|
| 26 |
+
|
| 27 |
+
- A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence`
|
| 28 |
+
- Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
PreTokenizedEncodeInput = Union[
|
| 32 |
+
PreTokenizedInputSequence,
|
| 33 |
+
Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
|
| 34 |
+
List[PreTokenizedInputSequence],
|
| 35 |
+
]
|
| 36 |
+
"""Represents a pre-tokenized input for encoding. Can be either:
|
| 37 |
+
|
| 38 |
+
- A single sequence: :data:`~tokenizers.PreTokenizedInputSequence`
|
| 39 |
+
- A pair of sequences:
|
| 40 |
+
|
| 41 |
+
- A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence`
|
| 42 |
+
- Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
|
| 46 |
+
"""Represents all the possible types of input sequences for encoding. Can be:
|
| 47 |
+
|
| 48 |
+
- When ``is_pretokenized=False``: :data:`~TextInputSequence`
|
| 49 |
+
- When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence`
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
|
| 53 |
+
"""Represents all the possible types of input for encoding. Can be:
|
| 54 |
+
|
| 55 |
+
- When ``is_pretokenized=False``: :data:`~TextEncodeInput`
|
| 56 |
+
- When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput`
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class OffsetReferential(Enum):
|
| 61 |
+
ORIGINAL = "original"
|
| 62 |
+
NORMALIZED = "normalized"
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class OffsetType(Enum):
|
| 66 |
+
BYTE = "byte"
|
| 67 |
+
CHAR = "char"
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class SplitDelimiterBehavior(Enum):
|
| 71 |
+
REMOVED = "removed"
|
| 72 |
+
ISOLATED = "isolated"
|
| 73 |
+
MERGED_WITH_PREVIOUS = "merged_with_previous"
|
| 74 |
+
MERGED_WITH_NEXT = "merged_with_next"
|
| 75 |
+
CONTIGUOUS = "contiguous"
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
from .tokenizers import (
|
| 79 |
+
AddedToken,
|
| 80 |
+
Encoding,
|
| 81 |
+
NormalizedString,
|
| 82 |
+
PreTokenizedString,
|
| 83 |
+
Regex,
|
| 84 |
+
Token,
|
| 85 |
+
Tokenizer,
|
| 86 |
+
decoders,
|
| 87 |
+
models,
|
| 88 |
+
normalizers,
|
| 89 |
+
pre_tokenizers,
|
| 90 |
+
processors,
|
| 91 |
+
trainers,
|
| 92 |
+
__version__,
|
| 93 |
+
)
|
| 94 |
+
from .implementations import (
|
| 95 |
+
BertWordPieceTokenizer,
|
| 96 |
+
ByteLevelBPETokenizer,
|
| 97 |
+
CharBPETokenizer,
|
| 98 |
+
SentencePieceBPETokenizer,
|
| 99 |
+
SentencePieceUnigramTokenizer,
|
| 100 |
+
)
|
parrot/lib/python3.10/site-packages/tokenizers/__init__.pyi
ADDED
|
@@ -0,0 +1,1200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class AddedToken:
|
| 3 |
+
"""
|
| 4 |
+
Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
|
| 5 |
+
It can have special options that defines the way it should behave.
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
content (:obj:`str`): The content of the token
|
| 9 |
+
|
| 10 |
+
single_word (:obj:`bool`, defaults to :obj:`False`):
|
| 11 |
+
Defines whether this token should only match single words. If :obj:`True`, this
|
| 12 |
+
token will never match inside of a word. For example the token ``ing`` would match
|
| 13 |
+
on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
|
| 14 |
+
The notion of "`inside of a word`" is defined by the word boundaries pattern in
|
| 15 |
+
regular expressions (ie. the token should start and end with word boundaries).
|
| 16 |
+
|
| 17 |
+
lstrip (:obj:`bool`, defaults to :obj:`False`):
|
| 18 |
+
Defines whether this token should strip all potential whitespaces on its left side.
|
| 19 |
+
If :obj:`True`, this token will greedily match any whitespace on its left. For
|
| 20 |
+
example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
|
| 21 |
+
``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
|
| 22 |
+
|
| 23 |
+
rstrip (:obj:`bool`, defaults to :obj:`False`):
|
| 24 |
+
Defines whether this token should strip all potential whitespaces on its right
|
| 25 |
+
side. If :obj:`True`, this token will greedily match any whitespace on its right.
|
| 26 |
+
It works just like :obj:`lstrip` but on the right.
|
| 27 |
+
|
| 28 |
+
normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
|
| 29 |
+
Defines whether this token should match against the normalized version of the input
|
| 30 |
+
text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
|
| 31 |
+
lowercasing the text, the token could be extract from the input ``"I saw a lion
|
| 32 |
+
Yesterday"``.
|
| 33 |
+
special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
|
| 34 |
+
Defines whether this token should be skipped when decoding.
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False):
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def content(self):
|
| 42 |
+
"""
|
| 43 |
+
Get the content of this :obj:`AddedToken`
|
| 44 |
+
"""
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def lstrip(self):
|
| 49 |
+
"""
|
| 50 |
+
Get the value of the :obj:`lstrip` option
|
| 51 |
+
"""
|
| 52 |
+
pass
|
| 53 |
+
|
| 54 |
+
@property
|
| 55 |
+
def normalized(self):
|
| 56 |
+
"""
|
| 57 |
+
Get the value of the :obj:`normalized` option
|
| 58 |
+
"""
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def rstrip(self):
|
| 63 |
+
"""
|
| 64 |
+
Get the value of the :obj:`rstrip` option
|
| 65 |
+
"""
|
| 66 |
+
pass
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def single_word(self):
|
| 70 |
+
"""
|
| 71 |
+
Get the value of the :obj:`single_word` option
|
| 72 |
+
"""
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def special(self):
|
| 77 |
+
"""
|
| 78 |
+
Get the value of the :obj:`special` option
|
| 79 |
+
"""
|
| 80 |
+
pass
|
| 81 |
+
|
| 82 |
+
class Encoding:
|
| 83 |
+
"""
|
| 84 |
+
The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
|
| 85 |
+
"""
|
| 86 |
+
@property
|
| 87 |
+
def attention_mask(self):
|
| 88 |
+
"""
|
| 89 |
+
The attention mask
|
| 90 |
+
|
| 91 |
+
This indicates to the LM which tokens should be attended to, and which should not.
|
| 92 |
+
This is especially important when batching sequences, where we need to applying
|
| 93 |
+
padding.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
:obj:`List[int]`: The attention mask
|
| 97 |
+
"""
|
| 98 |
+
pass
|
| 99 |
+
|
| 100 |
+
def char_to_token(self, char_pos, sequence_index=0):
|
| 101 |
+
"""
|
| 102 |
+
Get the token that contains the char at the given position in the input sequence.
|
| 103 |
+
|
| 104 |
+
Args:
|
| 105 |
+
char_pos (:obj:`int`):
|
| 106 |
+
The position of a char in the input string
|
| 107 |
+
sequence_index (:obj:`int`, defaults to :obj:`0`):
|
| 108 |
+
The index of the sequence that contains the target char
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
:obj:`int`: The index of the token that contains this char in the encoded sequence
|
| 112 |
+
"""
|
| 113 |
+
pass
|
| 114 |
+
|
| 115 |
+
def char_to_word(self, char_pos, sequence_index=0):
|
| 116 |
+
"""
|
| 117 |
+
Get the word that contains the char at the given position in the input sequence.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
char_pos (:obj:`int`):
|
| 121 |
+
The position of a char in the input string
|
| 122 |
+
sequence_index (:obj:`int`, defaults to :obj:`0`):
|
| 123 |
+
The index of the sequence that contains the target char
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
:obj:`int`: The index of the word that contains this char in the input sequence
|
| 127 |
+
"""
|
| 128 |
+
pass
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def ids(self):
|
| 132 |
+
"""
|
| 133 |
+
The generated IDs
|
| 134 |
+
|
| 135 |
+
The IDs are the main input to a Language Model. They are the token indices,
|
| 136 |
+
the numerical representations that a LM understands.
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
:obj:`List[int]`: The list of IDs
|
| 140 |
+
"""
|
| 141 |
+
pass
|
| 142 |
+
|
| 143 |
+
@staticmethod
|
| 144 |
+
def merge(encodings, growing_offsets=True):
|
| 145 |
+
"""
|
| 146 |
+
Merge the list of encodings into one final :class:`~tokenizers.Encoding`
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
|
| 150 |
+
The list of encodings that should be merged in one
|
| 151 |
+
|
| 152 |
+
growing_offsets (:obj:`bool`, defaults to :obj:`True`):
|
| 153 |
+
Whether the offsets should accumulate while merging
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
:class:`~tokenizers.Encoding`: The resulting Encoding
|
| 157 |
+
"""
|
| 158 |
+
pass
|
| 159 |
+
|
| 160 |
+
@property
|
| 161 |
+
def n_sequences(self):
|
| 162 |
+
"""
|
| 163 |
+
The number of sequences represented
|
| 164 |
+
|
| 165 |
+
Returns:
|
| 166 |
+
:obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
|
| 167 |
+
"""
|
| 168 |
+
pass
|
| 169 |
+
|
| 170 |
+
@property
|
| 171 |
+
def offsets(self):
|
| 172 |
+
"""
|
| 173 |
+
The offsets associated to each token
|
| 174 |
+
|
| 175 |
+
These offsets let's you slice the input string, and thus retrieve the original
|
| 176 |
+
part that led to producing the corresponding token.
|
| 177 |
+
|
| 178 |
+
Returns:
|
| 179 |
+
A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
|
| 180 |
+
"""
|
| 181 |
+
pass
|
| 182 |
+
|
| 183 |
+
@property
|
| 184 |
+
def overflowing(self):
|
| 185 |
+
"""
|
| 186 |
+
A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
|
| 187 |
+
|
| 188 |
+
When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
|
| 189 |
+
the output into as many pieces as required to match the specified maximum length.
|
| 190 |
+
This field lets you retrieve all the subsequent pieces.
|
| 191 |
+
|
| 192 |
+
When you use pairs of sequences, the overflowing pieces will contain enough
|
| 193 |
+
variations to cover all the possible combinations, while respecting the provided
|
| 194 |
+
maximum length.
|
| 195 |
+
"""
|
| 196 |
+
pass
|
| 197 |
+
|
| 198 |
+
def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
|
| 199 |
+
"""
|
| 200 |
+
Pad the :class:`~tokenizers.Encoding` at the given length
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
length (:obj:`int`):
|
| 204 |
+
The desired length
|
| 205 |
+
|
| 206 |
+
direction: (:obj:`str`, defaults to :obj:`right`):
|
| 207 |
+
The expected padding direction. Can be either :obj:`right` or :obj:`left`
|
| 208 |
+
|
| 209 |
+
pad_id (:obj:`int`, defaults to :obj:`0`):
|
| 210 |
+
The ID corresponding to the padding token
|
| 211 |
+
|
| 212 |
+
pad_type_id (:obj:`int`, defaults to :obj:`0`):
|
| 213 |
+
The type ID corresponding to the padding token
|
| 214 |
+
|
| 215 |
+
pad_token (:obj:`str`, defaults to `[PAD]`):
|
| 216 |
+
The pad token to use
|
| 217 |
+
"""
|
| 218 |
+
pass
|
| 219 |
+
|
| 220 |
+
@property
|
| 221 |
+
def sequence_ids(self):
|
| 222 |
+
"""
|
| 223 |
+
The generated sequence indices.
|
| 224 |
+
|
| 225 |
+
They represent the index of the input sequence associated to each token.
|
| 226 |
+
The sequence id can be None if the token is not related to any input sequence,
|
| 227 |
+
like for example with special tokens.
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
|
| 231 |
+
"""
|
| 232 |
+
pass
|
| 233 |
+
|
| 234 |
+
def set_sequence_id(self, sequence_id):
|
| 235 |
+
"""
|
| 236 |
+
Set the given sequence index
|
| 237 |
+
|
| 238 |
+
Set the given sequence index for the whole range of tokens contained in this
|
| 239 |
+
:class:`~tokenizers.Encoding`.
|
| 240 |
+
"""
|
| 241 |
+
pass
|
| 242 |
+
|
| 243 |
+
@property
|
| 244 |
+
def special_tokens_mask(self):
|
| 245 |
+
"""
|
| 246 |
+
The special token mask
|
| 247 |
+
|
| 248 |
+
This indicates which tokens are special tokens, and which are not.
|
| 249 |
+
|
| 250 |
+
Returns:
|
| 251 |
+
:obj:`List[int]`: The special tokens mask
|
| 252 |
+
"""
|
| 253 |
+
pass
|
| 254 |
+
|
| 255 |
+
def token_to_chars(self, token_index):
|
| 256 |
+
"""
|
| 257 |
+
Get the offsets of the token at the given index.
|
| 258 |
+
|
| 259 |
+
The returned offsets are related to the input sequence that contains the
|
| 260 |
+
token. In order to determine in which input sequence it belongs, you
|
| 261 |
+
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
token_index (:obj:`int`):
|
| 265 |
+
The index of a token in the encoded sequence.
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
:obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
|
| 269 |
+
"""
|
| 270 |
+
pass
|
| 271 |
+
|
| 272 |
+
def token_to_sequence(self, token_index):
|
| 273 |
+
"""
|
| 274 |
+
Get the index of the sequence represented by the given token.
|
| 275 |
+
|
| 276 |
+
In the general use case, this method returns :obj:`0` for a single sequence or
|
| 277 |
+
the first sequence of a pair, and :obj:`1` for the second sequence of a pair
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
token_index (:obj:`int`):
|
| 281 |
+
The index of a token in the encoded sequence.
|
| 282 |
+
|
| 283 |
+
Returns:
|
| 284 |
+
:obj:`int`: The sequence id of the given token
|
| 285 |
+
"""
|
| 286 |
+
pass
|
| 287 |
+
|
| 288 |
+
def token_to_word(self, token_index):
|
| 289 |
+
"""
|
| 290 |
+
Get the index of the word that contains the token in one of the input sequences.
|
| 291 |
+
|
| 292 |
+
The returned word index is related to the input sequence that contains
|
| 293 |
+
the token. In order to determine in which input sequence it belongs, you
|
| 294 |
+
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
|
| 295 |
+
|
| 296 |
+
Args:
|
| 297 |
+
token_index (:obj:`int`):
|
| 298 |
+
The index of a token in the encoded sequence.
|
| 299 |
+
|
| 300 |
+
Returns:
|
| 301 |
+
:obj:`int`: The index of the word in the relevant input sequence.
|
| 302 |
+
"""
|
| 303 |
+
pass
|
| 304 |
+
|
| 305 |
+
@property
|
| 306 |
+
def tokens(self):
|
| 307 |
+
"""
|
| 308 |
+
The generated tokens
|
| 309 |
+
|
| 310 |
+
They are the string representation of the IDs.
|
| 311 |
+
|
| 312 |
+
Returns:
|
| 313 |
+
:obj:`List[str]`: The list of tokens
|
| 314 |
+
"""
|
| 315 |
+
pass
|
| 316 |
+
|
| 317 |
+
def truncate(self, max_length, stride=0, direction="right"):
|
| 318 |
+
"""
|
| 319 |
+
Truncate the :class:`~tokenizers.Encoding` at the given length
|
| 320 |
+
|
| 321 |
+
If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
|
| 322 |
+
this information is lost. It will be considered as representing a single sequence.
|
| 323 |
+
|
| 324 |
+
Args:
|
| 325 |
+
max_length (:obj:`int`):
|
| 326 |
+
The desired length
|
| 327 |
+
|
| 328 |
+
stride (:obj:`int`, defaults to :obj:`0`):
|
| 329 |
+
The length of previous content to be included in each overflowing piece
|
| 330 |
+
|
| 331 |
+
direction (:obj:`str`, defaults to :obj:`right`):
|
| 332 |
+
Truncate direction
|
| 333 |
+
"""
|
| 334 |
+
pass
|
| 335 |
+
|
| 336 |
+
@property
|
| 337 |
+
def type_ids(self):
|
| 338 |
+
"""
|
| 339 |
+
The generated type IDs
|
| 340 |
+
|
| 341 |
+
Generally used for tasks like sequence classification or question answering,
|
| 342 |
+
these tokens let the LM know which input sequence corresponds to each tokens.
|
| 343 |
+
|
| 344 |
+
Returns:
|
| 345 |
+
:obj:`List[int]`: The list of type ids
|
| 346 |
+
"""
|
| 347 |
+
pass
|
| 348 |
+
|
| 349 |
+
@property
|
| 350 |
+
def word_ids(self):
|
| 351 |
+
"""
|
| 352 |
+
The generated word indices.
|
| 353 |
+
|
| 354 |
+
They represent the index of the word associated to each token.
|
| 355 |
+
When the input is pre-tokenized, they correspond to the ID of the given input label,
|
| 356 |
+
otherwise they correspond to the words indices as defined by the
|
| 357 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
|
| 358 |
+
|
| 359 |
+
For special tokens and such (any token that was generated from something that was
|
| 360 |
+
not part of the input), the output is :obj:`None`
|
| 361 |
+
|
| 362 |
+
Returns:
|
| 363 |
+
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
|
| 364 |
+
"""
|
| 365 |
+
pass
|
| 366 |
+
|
| 367 |
+
def word_to_chars(self, word_index, sequence_index=0):
|
| 368 |
+
"""
|
| 369 |
+
Get the offsets of the word at the given index in one of the input sequences.
|
| 370 |
+
|
| 371 |
+
Args:
|
| 372 |
+
word_index (:obj:`int`):
|
| 373 |
+
The index of a word in one of the input sequences.
|
| 374 |
+
sequence_index (:obj:`int`, defaults to :obj:`0`):
|
| 375 |
+
The index of the sequence that contains the target word
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
:obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
|
| 379 |
+
"""
|
| 380 |
+
pass
|
| 381 |
+
|
| 382 |
+
def word_to_tokens(self, word_index, sequence_index=0):
|
| 383 |
+
"""
|
| 384 |
+
Get the encoded tokens corresponding to the word at the given index
|
| 385 |
+
in one of the input sequences.
|
| 386 |
+
|
| 387 |
+
Args:
|
| 388 |
+
word_index (:obj:`int`):
|
| 389 |
+
The index of a word in one of the input sequences.
|
| 390 |
+
sequence_index (:obj:`int`, defaults to :obj:`0`):
|
| 391 |
+
The index of the sequence that contains the target word
|
| 392 |
+
|
| 393 |
+
Returns:
|
| 394 |
+
:obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
|
| 395 |
+
"""
|
| 396 |
+
pass
|
| 397 |
+
|
| 398 |
+
@property
|
| 399 |
+
def words(self):
|
| 400 |
+
"""
|
| 401 |
+
The generated word indices.
|
| 402 |
+
|
| 403 |
+
.. warning::
|
| 404 |
+
This is deprecated and will be removed in a future version.
|
| 405 |
+
Please use :obj:`~tokenizers.Encoding.word_ids` instead.
|
| 406 |
+
|
| 407 |
+
They represent the index of the word associated to each token.
|
| 408 |
+
When the input is pre-tokenized, they correspond to the ID of the given input label,
|
| 409 |
+
otherwise they correspond to the words indices as defined by the
|
| 410 |
+
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
|
| 411 |
+
|
| 412 |
+
For special tokens and such (any token that was generated from something that was
|
| 413 |
+
not part of the input), the output is :obj:`None`
|
| 414 |
+
|
| 415 |
+
Returns:
|
| 416 |
+
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
|
| 417 |
+
"""
|
| 418 |
+
pass
|
| 419 |
+
|
| 420 |
+
class NormalizedString:
|
| 421 |
+
"""
|
| 422 |
+
NormalizedString
|
| 423 |
+
|
| 424 |
+
A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
|
| 425 |
+
While making all the requested modifications, it keeps track of the alignment information
|
| 426 |
+
between the two versions of the string.
|
| 427 |
+
|
| 428 |
+
Args:
|
| 429 |
+
sequence: str:
|
| 430 |
+
The string sequence used to initialize this NormalizedString
|
| 431 |
+
"""
|
| 432 |
+
def append(self, s):
|
| 433 |
+
"""
|
| 434 |
+
Append the given sequence to the string
|
| 435 |
+
"""
|
| 436 |
+
pass
|
| 437 |
+
|
| 438 |
+
def clear(self):
|
| 439 |
+
"""
|
| 440 |
+
Clears the string
|
| 441 |
+
"""
|
| 442 |
+
pass
|
| 443 |
+
|
| 444 |
+
def filter(self, func):
|
| 445 |
+
"""
|
| 446 |
+
Filter each character of the string using the given func
|
| 447 |
+
"""
|
| 448 |
+
pass
|
| 449 |
+
|
| 450 |
+
def for_each(self, func):
|
| 451 |
+
"""
|
| 452 |
+
Calls the given function for each character of the string
|
| 453 |
+
"""
|
| 454 |
+
pass
|
| 455 |
+
|
| 456 |
+
def lowercase(self):
|
| 457 |
+
"""
|
| 458 |
+
Lowercase the string
|
| 459 |
+
"""
|
| 460 |
+
pass
|
| 461 |
+
|
| 462 |
+
def lstrip(self):
|
| 463 |
+
"""
|
| 464 |
+
Strip the left of the string
|
| 465 |
+
"""
|
| 466 |
+
pass
|
| 467 |
+
|
| 468 |
+
def map(self, func):
|
| 469 |
+
"""
|
| 470 |
+
Calls the given function for each character of the string
|
| 471 |
+
|
| 472 |
+
Replaces each character of the string using the returned value. Each
|
| 473 |
+
returned value **must** be a str of length 1 (ie a character).
|
| 474 |
+
"""
|
| 475 |
+
pass
|
| 476 |
+
|
| 477 |
+
def nfc(self):
|
| 478 |
+
"""
|
| 479 |
+
Runs the NFC normalization
|
| 480 |
+
"""
|
| 481 |
+
pass
|
| 482 |
+
|
| 483 |
+
def nfd(self):
|
| 484 |
+
"""
|
| 485 |
+
Runs the NFD normalization
|
| 486 |
+
"""
|
| 487 |
+
pass
|
| 488 |
+
|
| 489 |
+
def nfkc(self):
|
| 490 |
+
"""
|
| 491 |
+
Runs the NFKC normalization
|
| 492 |
+
"""
|
| 493 |
+
pass
|
| 494 |
+
|
| 495 |
+
def nfkd(self):
|
| 496 |
+
"""
|
| 497 |
+
Runs the NFKD normalization
|
| 498 |
+
"""
|
| 499 |
+
pass
|
| 500 |
+
|
| 501 |
+
@property
|
| 502 |
+
def normalized(self):
|
| 503 |
+
"""
|
| 504 |
+
The normalized part of the string
|
| 505 |
+
"""
|
| 506 |
+
pass
|
| 507 |
+
|
| 508 |
+
def prepend(self, s):
|
| 509 |
+
"""
|
| 510 |
+
Prepend the given sequence to the string
|
| 511 |
+
"""
|
| 512 |
+
pass
|
| 513 |
+
|
| 514 |
+
def replace(self, pattern, content):
|
| 515 |
+
"""
|
| 516 |
+
Replace the content of the given pattern with the provided content
|
| 517 |
+
|
| 518 |
+
Args:
|
| 519 |
+
pattern: Pattern:
|
| 520 |
+
A pattern used to match the string. Usually a string or a Regex
|
| 521 |
+
|
| 522 |
+
content: str:
|
| 523 |
+
The content to be used as replacement
|
| 524 |
+
"""
|
| 525 |
+
pass
|
| 526 |
+
|
| 527 |
+
def rstrip(self):
|
| 528 |
+
"""
|
| 529 |
+
Strip the right of the string
|
| 530 |
+
"""
|
| 531 |
+
pass
|
| 532 |
+
|
| 533 |
+
def slice(self, range):
|
| 534 |
+
"""
|
| 535 |
+
Slice the string using the given range
|
| 536 |
+
"""
|
| 537 |
+
pass
|
| 538 |
+
|
| 539 |
+
def split(self, pattern, behavior):
|
| 540 |
+
"""
|
| 541 |
+
Split the NormalizedString using the given pattern and the specified behavior
|
| 542 |
+
|
| 543 |
+
Args:
|
| 544 |
+
pattern: Pattern:
|
| 545 |
+
A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
|
| 546 |
+
|
| 547 |
+
behavior: SplitDelimiterBehavior:
|
| 548 |
+
The behavior to use when splitting.
|
| 549 |
+
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
|
| 550 |
+
"contiguous"
|
| 551 |
+
|
| 552 |
+
Returns:
|
| 553 |
+
A list of NormalizedString, representing each split
|
| 554 |
+
"""
|
| 555 |
+
pass
|
| 556 |
+
|
| 557 |
+
def strip(self):
|
| 558 |
+
"""
|
| 559 |
+
Strip both ends of the string
|
| 560 |
+
"""
|
| 561 |
+
pass
|
| 562 |
+
|
| 563 |
+
def uppercase(self):
|
| 564 |
+
"""
|
| 565 |
+
Uppercase the string
|
| 566 |
+
"""
|
| 567 |
+
pass
|
| 568 |
+
|
| 569 |
+
class PreTokenizedString:
|
| 570 |
+
"""
|
| 571 |
+
PreTokenizedString
|
| 572 |
+
|
| 573 |
+
Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
|
| 574 |
+
underlying string, while keeping track of the alignment information (offsets).
|
| 575 |
+
|
| 576 |
+
The PreTokenizedString manages what we call `splits`. Each split represents a substring
|
| 577 |
+
which is a subpart of the original string, with the relevant offsets and tokens.
|
| 578 |
+
|
| 579 |
+
When calling one of the methods used to modify the PreTokenizedString (namely one of
|
| 580 |
+
`split`, `normalize` or `tokenize), only the `splits` that don't have any associated
|
| 581 |
+
tokens will get modified.
|
| 582 |
+
|
| 583 |
+
Args:
|
| 584 |
+
sequence: str:
|
| 585 |
+
The string sequence used to initialize this PreTokenizedString
|
| 586 |
+
"""
|
| 587 |
+
def __init__(self, sequence):
|
| 588 |
+
pass
|
| 589 |
+
|
| 590 |
+
def get_splits(self, offset_referential="original", offset_type="char"):
|
| 591 |
+
"""
|
| 592 |
+
Get the splits currently managed by the PreTokenizedString
|
| 593 |
+
|
| 594 |
+
Args:
|
| 595 |
+
offset_referential: :obj:`str`
|
| 596 |
+
Whether the returned splits should have offsets expressed relative
|
| 597 |
+
to the original string, or the normalized one. choices: "original", "normalized".
|
| 598 |
+
|
| 599 |
+
offset_type: :obj:`str`
|
| 600 |
+
Whether the returned splits should have offsets expressed in bytes or chars.
|
| 601 |
+
When slicing an str, we usually want to use chars, which is the default value.
|
| 602 |
+
Now in some cases it might be interesting to get these offsets expressed in bytes,
|
| 603 |
+
so it is possible to change this here.
|
| 604 |
+
choices: "char", "bytes"
|
| 605 |
+
|
| 606 |
+
Returns
|
| 607 |
+
A list of splits
|
| 608 |
+
"""
|
| 609 |
+
pass
|
| 610 |
+
|
| 611 |
+
def normalize(self, func):
|
| 612 |
+
"""
|
| 613 |
+
Normalize each split of the `PreTokenizedString` using the given `func`
|
| 614 |
+
|
| 615 |
+
Args:
|
| 616 |
+
func: Callable[[NormalizedString], None]:
|
| 617 |
+
The function used to normalize each underlying split. This function
|
| 618 |
+
does not need to return anything, just calling the methods on the provided
|
| 619 |
+
NormalizedString allow its modification.
|
| 620 |
+
"""
|
| 621 |
+
pass
|
| 622 |
+
|
| 623 |
+
def split(self, func):
|
| 624 |
+
"""
|
| 625 |
+
Split the PreTokenizedString using the given `func`
|
| 626 |
+
|
| 627 |
+
Args:
|
| 628 |
+
func: Callable[[index, NormalizedString], List[NormalizedString]]:
|
| 629 |
+
The function used to split each underlying split.
|
| 630 |
+
It is expected to return a list of `NormalizedString`, that represent the new
|
| 631 |
+
splits. If the given `NormalizedString` does not need any splitting, we can
|
| 632 |
+
just return it directly.
|
| 633 |
+
In order for the offsets to be tracked accurately, any returned `NormalizedString`
|
| 634 |
+
should come from calling either `.split` or `.slice` on the received one.
|
| 635 |
+
"""
|
| 636 |
+
pass
|
| 637 |
+
|
| 638 |
+
def to_encoding(self, type_id=0, word_idx=None):
|
| 639 |
+
"""
|
| 640 |
+
Return an Encoding generated from this PreTokenizedString
|
| 641 |
+
|
| 642 |
+
Args:
|
| 643 |
+
type_id: int = 0:
|
| 644 |
+
The type_id to be used on the generated Encoding.
|
| 645 |
+
|
| 646 |
+
word_idx: Optional[int] = None:
|
| 647 |
+
An optional word index to be used for each token of this Encoding. If provided,
|
| 648 |
+
all the word indices in the generated Encoding will use this value, instead
|
| 649 |
+
of the one automatically tracked during pre-tokenization.
|
| 650 |
+
|
| 651 |
+
Returns:
|
| 652 |
+
An Encoding
|
| 653 |
+
"""
|
| 654 |
+
pass
|
| 655 |
+
|
| 656 |
+
def tokenize(self, func):
|
| 657 |
+
"""
|
| 658 |
+
Tokenize each split of the `PreTokenizedString` using the given `func`
|
| 659 |
+
|
| 660 |
+
Args:
|
| 661 |
+
func: Callable[[str], List[Token]]:
|
| 662 |
+
The function used to tokenize each underlying split. This function must return
|
| 663 |
+
a list of Token generated from the input str.
|
| 664 |
+
"""
|
| 665 |
+
pass
|
| 666 |
+
|
| 667 |
+
class Regex:
|
| 668 |
+
"""
|
| 669 |
+
Instantiate a new Regex with the given pattern
|
| 670 |
+
"""
|
| 671 |
+
def __init__(self, pattern):
|
| 672 |
+
pass
|
| 673 |
+
|
| 674 |
+
class Token:
|
| 675 |
+
pass
|
| 676 |
+
|
| 677 |
+
class Tokenizer:
|
| 678 |
+
"""
|
| 679 |
+
A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
|
| 680 |
+
and outputs an :class:`~tokenizers.Encoding`.
|
| 681 |
+
|
| 682 |
+
Args:
|
| 683 |
+
model (:class:`~tokenizers.models.Model`):
|
| 684 |
+
The core algorithm that this :obj:`Tokenizer` should be using.
|
| 685 |
+
|
| 686 |
+
"""
|
| 687 |
+
def __init__(self, model):
|
| 688 |
+
pass
|
| 689 |
+
|
| 690 |
+
def add_special_tokens(self, tokens):
|
| 691 |
+
"""
|
| 692 |
+
Add the given special tokens to the Tokenizer.
|
| 693 |
+
|
| 694 |
+
If these tokens are already part of the vocabulary, it just let the Tokenizer know about
|
| 695 |
+
them. If they don't exist, the Tokenizer creates them, giving them a new id.
|
| 696 |
+
|
| 697 |
+
These special tokens will never be processed by the model (ie won't be split into
|
| 698 |
+
multiple tokens), and they can be removed from the output when decoding.
|
| 699 |
+
|
| 700 |
+
Args:
|
| 701 |
+
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
|
| 702 |
+
The list of special tokens we want to add to the vocabulary. Each token can either
|
| 703 |
+
be a string or an instance of :class:`~tokenizers.AddedToken` for more
|
| 704 |
+
customization.
|
| 705 |
+
|
| 706 |
+
Returns:
|
| 707 |
+
:obj:`int`: The number of tokens that were created in the vocabulary
|
| 708 |
+
"""
|
| 709 |
+
pass
|
| 710 |
+
|
| 711 |
+
def add_tokens(self, tokens):
|
| 712 |
+
"""
|
| 713 |
+
Add the given tokens to the vocabulary
|
| 714 |
+
|
| 715 |
+
The given tokens are added only if they don't already exist in the vocabulary.
|
| 716 |
+
Each token then gets a new attributed id.
|
| 717 |
+
|
| 718 |
+
Args:
|
| 719 |
+
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
|
| 720 |
+
The list of tokens we want to add to the vocabulary. Each token can be either a
|
| 721 |
+
string or an instance of :class:`~tokenizers.AddedToken` for more customization.
|
| 722 |
+
|
| 723 |
+
Returns:
|
| 724 |
+
:obj:`int`: The number of tokens that were created in the vocabulary
|
| 725 |
+
"""
|
| 726 |
+
pass
|
| 727 |
+
|
| 728 |
+
def decode(self, ids, skip_special_tokens=True):
|
| 729 |
+
"""
|
| 730 |
+
Decode the given list of ids back to a string
|
| 731 |
+
|
| 732 |
+
This is used to decode anything coming back from a Language Model
|
| 733 |
+
|
| 734 |
+
Args:
|
| 735 |
+
ids (A :obj:`List/Tuple` of :obj:`int`):
|
| 736 |
+
The list of ids that we want to decode
|
| 737 |
+
|
| 738 |
+
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 739 |
+
Whether the special tokens should be removed from the decoded string
|
| 740 |
+
|
| 741 |
+
Returns:
|
| 742 |
+
:obj:`str`: The decoded string
|
| 743 |
+
"""
|
| 744 |
+
pass
|
| 745 |
+
|
| 746 |
+
def decode_batch(self, sequences, skip_special_tokens=True):
|
| 747 |
+
"""
|
| 748 |
+
Decode a batch of ids back to their corresponding string
|
| 749 |
+
|
| 750 |
+
Args:
|
| 751 |
+
sequences (:obj:`List` of :obj:`List[int]`):
|
| 752 |
+
The batch of sequences we want to decode
|
| 753 |
+
|
| 754 |
+
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 755 |
+
Whether the special tokens should be removed from the decoded strings
|
| 756 |
+
|
| 757 |
+
Returns:
|
| 758 |
+
:obj:`List[str]`: A list of decoded strings
|
| 759 |
+
"""
|
| 760 |
+
pass
|
| 761 |
+
|
| 762 |
+
@property
|
| 763 |
+
def decoder(self):
|
| 764 |
+
"""
|
| 765 |
+
The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
|
| 766 |
+
"""
|
| 767 |
+
pass
|
| 768 |
+
|
| 769 |
+
def enable_padding(
|
| 770 |
+
self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
|
| 771 |
+
):
|
| 772 |
+
"""
|
| 773 |
+
Enable the padding
|
| 774 |
+
|
| 775 |
+
Args:
|
| 776 |
+
direction (:obj:`str`, `optional`, defaults to :obj:`right`):
|
| 777 |
+
The direction in which to pad. Can be either ``right`` or ``left``
|
| 778 |
+
|
| 779 |
+
pad_to_multiple_of (:obj:`int`, `optional`):
|
| 780 |
+
If specified, the padding length should always snap to the next multiple of the
|
| 781 |
+
given value. For example if we were going to pad witha length of 250 but
|
| 782 |
+
``pad_to_multiple_of=8`` then we will pad to 256.
|
| 783 |
+
|
| 784 |
+
pad_id (:obj:`int`, defaults to 0):
|
| 785 |
+
The id to be used when padding
|
| 786 |
+
|
| 787 |
+
pad_type_id (:obj:`int`, defaults to 0):
|
| 788 |
+
The type id to be used when padding
|
| 789 |
+
|
| 790 |
+
pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
|
| 791 |
+
The pad token to be used when padding
|
| 792 |
+
|
| 793 |
+
length (:obj:`int`, `optional`):
|
| 794 |
+
If specified, the length at which to pad. If not specified we pad using the size of
|
| 795 |
+
the longest sequence in a batch.
|
| 796 |
+
"""
|
| 797 |
+
pass
|
| 798 |
+
|
| 799 |
+
def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
|
| 800 |
+
"""
|
| 801 |
+
Enable truncation
|
| 802 |
+
|
| 803 |
+
Args:
|
| 804 |
+
max_length (:obj:`int`):
|
| 805 |
+
The max length at which to truncate
|
| 806 |
+
|
| 807 |
+
stride (:obj:`int`, `optional`):
|
| 808 |
+
The length of the previous first sequence to be included in the overflowing
|
| 809 |
+
sequence
|
| 810 |
+
|
| 811 |
+
strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
|
| 812 |
+
The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
|
| 813 |
+
``only_second``.
|
| 814 |
+
|
| 815 |
+
direction (:obj:`str`, defaults to :obj:`right`):
|
| 816 |
+
Truncate direction
|
| 817 |
+
"""
|
| 818 |
+
pass
|
| 819 |
+
|
| 820 |
+
def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
|
| 821 |
+
"""
|
| 822 |
+
Encode the given sequence and pair. This method can process raw text sequences
|
| 823 |
+
as well as already pre-tokenized sequences.
|
| 824 |
+
|
| 825 |
+
Example:
|
| 826 |
+
Here are some examples of the inputs that are accepted::
|
| 827 |
+
|
| 828 |
+
encode("A single sequence")`
|
| 829 |
+
encode("A sequence", "And its pair")`
|
| 830 |
+
encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
|
| 831 |
+
encode(
|
| 832 |
+
[ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
|
| 833 |
+
is_pretokenized=True
|
| 834 |
+
)
|
| 835 |
+
|
| 836 |
+
Args:
|
| 837 |
+
sequence (:obj:`~tokenizers.InputSequence`):
|
| 838 |
+
The main input sequence we want to encode. This sequence can be either raw
|
| 839 |
+
text or pre-tokenized, according to the ``is_pretokenized`` argument:
|
| 840 |
+
|
| 841 |
+
- If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
|
| 842 |
+
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
|
| 843 |
+
|
| 844 |
+
pair (:obj:`~tokenizers.InputSequence`, `optional`):
|
| 845 |
+
An optional input sequence. The expected format is the same that for ``sequence``.
|
| 846 |
+
|
| 847 |
+
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
|
| 848 |
+
Whether the input is already pre-tokenized
|
| 849 |
+
|
| 850 |
+
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 851 |
+
Whether to add the special tokens
|
| 852 |
+
|
| 853 |
+
Returns:
|
| 854 |
+
:class:`~tokenizers.Encoding`: The encoded result
|
| 855 |
+
|
| 856 |
+
"""
|
| 857 |
+
pass
|
| 858 |
+
|
| 859 |
+
def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
|
| 860 |
+
"""
|
| 861 |
+
Encode the given batch of inputs. This method accept both raw text sequences
|
| 862 |
+
as well as already pre-tokenized sequences.
|
| 863 |
+
|
| 864 |
+
Example:
|
| 865 |
+
Here are some examples of the inputs that are accepted::
|
| 866 |
+
|
| 867 |
+
encode_batch([
|
| 868 |
+
"A single sequence",
|
| 869 |
+
("A tuple with a sequence", "And its pair"),
|
| 870 |
+
[ "A", "pre", "tokenized", "sequence" ],
|
| 871 |
+
([ "A", "pre", "tokenized", "sequence" ], "And its pair")
|
| 872 |
+
])
|
| 873 |
+
|
| 874 |
+
Args:
|
| 875 |
+
input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
|
| 876 |
+
A list of single sequences or pair sequences to encode. Each sequence
|
| 877 |
+
can be either raw text or pre-tokenized, according to the ``is_pretokenized``
|
| 878 |
+
argument:
|
| 879 |
+
|
| 880 |
+
- If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
|
| 881 |
+
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
|
| 882 |
+
|
| 883 |
+
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
|
| 884 |
+
Whether the input is already pre-tokenized
|
| 885 |
+
|
| 886 |
+
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 887 |
+
Whether to add the special tokens
|
| 888 |
+
|
| 889 |
+
Returns:
|
| 890 |
+
A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
|
| 891 |
+
|
| 892 |
+
"""
|
| 893 |
+
pass
|
| 894 |
+
|
| 895 |
+
@property
|
| 896 |
+
def encode_special_tokens(self):
|
| 897 |
+
"""
|
| 898 |
+
Modifies the tokenizer in order to use or not the special tokens
|
| 899 |
+
during encoding.
|
| 900 |
+
|
| 901 |
+
Args:
|
| 902 |
+
value (:obj:`bool`):
|
| 903 |
+
Whether to use the special tokens or not
|
| 904 |
+
|
| 905 |
+
"""
|
| 906 |
+
pass
|
| 907 |
+
|
| 908 |
+
@staticmethod
|
| 909 |
+
def from_buffer(buffer):
|
| 910 |
+
"""
|
| 911 |
+
Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
|
| 912 |
+
|
| 913 |
+
Args:
|
| 914 |
+
buffer (:obj:`bytes`):
|
| 915 |
+
A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
|
| 916 |
+
|
| 917 |
+
Returns:
|
| 918 |
+
:class:`~tokenizers.Tokenizer`: The new tokenizer
|
| 919 |
+
"""
|
| 920 |
+
pass
|
| 921 |
+
|
| 922 |
+
@staticmethod
|
| 923 |
+
def from_file(path):
|
| 924 |
+
"""
|
| 925 |
+
Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
|
| 926 |
+
|
| 927 |
+
Args:
|
| 928 |
+
path (:obj:`str`):
|
| 929 |
+
A path to a local JSON file representing a previously serialized
|
| 930 |
+
:class:`~tokenizers.Tokenizer`
|
| 931 |
+
|
| 932 |
+
Returns:
|
| 933 |
+
:class:`~tokenizers.Tokenizer`: The new tokenizer
|
| 934 |
+
"""
|
| 935 |
+
pass
|
| 936 |
+
|
| 937 |
+
@staticmethod
|
| 938 |
+
def from_pretrained(identifier, revision="main", auth_token=None):
|
| 939 |
+
"""
|
| 940 |
+
Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
|
| 941 |
+
Hugging Face Hub.
|
| 942 |
+
|
| 943 |
+
Args:
|
| 944 |
+
identifier (:obj:`str`):
|
| 945 |
+
The identifier of a Model on the Hugging Face Hub, that contains
|
| 946 |
+
a tokenizer.json file
|
| 947 |
+
revision (:obj:`str`, defaults to `main`):
|
| 948 |
+
A branch or commit id
|
| 949 |
+
auth_token (:obj:`str`, `optional`, defaults to `None`):
|
| 950 |
+
An optional auth token used to access private repositories on the
|
| 951 |
+
Hugging Face Hub
|
| 952 |
+
|
| 953 |
+
Returns:
|
| 954 |
+
:class:`~tokenizers.Tokenizer`: The new tokenizer
|
| 955 |
+
"""
|
| 956 |
+
pass
|
| 957 |
+
|
| 958 |
+
@staticmethod
|
| 959 |
+
def from_str(json):
|
| 960 |
+
"""
|
| 961 |
+
Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
|
| 962 |
+
|
| 963 |
+
Args:
|
| 964 |
+
json (:obj:`str`):
|
| 965 |
+
A valid JSON string representing a previously serialized
|
| 966 |
+
:class:`~tokenizers.Tokenizer`
|
| 967 |
+
|
| 968 |
+
Returns:
|
| 969 |
+
:class:`~tokenizers.Tokenizer`: The new tokenizer
|
| 970 |
+
"""
|
| 971 |
+
pass
|
| 972 |
+
|
| 973 |
+
def get_added_tokens_decoder(self):
|
| 974 |
+
"""
|
| 975 |
+
Get the underlying vocabulary
|
| 976 |
+
|
| 977 |
+
Returns:
|
| 978 |
+
:obj:`Dict[int, AddedToken]`: The vocabulary
|
| 979 |
+
"""
|
| 980 |
+
pass
|
| 981 |
+
|
| 982 |
+
def get_vocab(self, with_added_tokens=True):
|
| 983 |
+
"""
|
| 984 |
+
Get the underlying vocabulary
|
| 985 |
+
|
| 986 |
+
Args:
|
| 987 |
+
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 988 |
+
Whether to include the added tokens
|
| 989 |
+
|
| 990 |
+
Returns:
|
| 991 |
+
:obj:`Dict[str, int]`: The vocabulary
|
| 992 |
+
"""
|
| 993 |
+
pass
|
| 994 |
+
|
| 995 |
+
def get_vocab_size(self, with_added_tokens=True):
|
| 996 |
+
"""
|
| 997 |
+
Get the size of the underlying vocabulary
|
| 998 |
+
|
| 999 |
+
Args:
|
| 1000 |
+
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
|
| 1001 |
+
Whether to include the added tokens
|
| 1002 |
+
|
| 1003 |
+
Returns:
|
| 1004 |
+
:obj:`int`: The size of the vocabulary
|
| 1005 |
+
"""
|
| 1006 |
+
pass
|
| 1007 |
+
|
| 1008 |
+
def id_to_token(self, id):
|
| 1009 |
+
"""
|
| 1010 |
+
Convert the given id to its corresponding token if it exists
|
| 1011 |
+
|
| 1012 |
+
Args:
|
| 1013 |
+
id (:obj:`int`):
|
| 1014 |
+
The id to convert
|
| 1015 |
+
|
| 1016 |
+
Returns:
|
| 1017 |
+
:obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
|
| 1018 |
+
"""
|
| 1019 |
+
pass
|
| 1020 |
+
|
| 1021 |
+
@property
|
| 1022 |
+
def model(self):
|
| 1023 |
+
"""
|
| 1024 |
+
The :class:`~tokenizers.models.Model` in use by the Tokenizer
|
| 1025 |
+
"""
|
| 1026 |
+
pass
|
| 1027 |
+
|
| 1028 |
+
def no_padding(self):
|
| 1029 |
+
"""
|
| 1030 |
+
Disable padding
|
| 1031 |
+
"""
|
| 1032 |
+
pass
|
| 1033 |
+
|
| 1034 |
+
def no_truncation(self):
|
| 1035 |
+
"""
|
| 1036 |
+
Disable truncation
|
| 1037 |
+
"""
|
| 1038 |
+
pass
|
| 1039 |
+
|
| 1040 |
+
@property
|
| 1041 |
+
def normalizer(self):
|
| 1042 |
+
"""
|
| 1043 |
+
The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
|
| 1044 |
+
"""
|
| 1045 |
+
pass
|
| 1046 |
+
|
| 1047 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 1048 |
+
"""
|
| 1049 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 1050 |
+
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
|
| 1051 |
+
:return:
|
| 1052 |
+
"""
|
| 1053 |
+
pass
|
| 1054 |
+
|
| 1055 |
+
@property
|
| 1056 |
+
def padding(self):
|
| 1057 |
+
"""
|
| 1058 |
+
Get the current padding parameters
|
| 1059 |
+
|
| 1060 |
+
`Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
|
| 1061 |
+
|
| 1062 |
+
Returns:
|
| 1063 |
+
(:obj:`dict`, `optional`):
|
| 1064 |
+
A dict with the current padding parameters if padding is enabled
|
| 1065 |
+
"""
|
| 1066 |
+
pass
|
| 1067 |
+
|
| 1068 |
+
def post_process(self, encoding, pair=None, add_special_tokens=True):
|
| 1069 |
+
"""
|
| 1070 |
+
Apply all the post-processing steps to the given encodings.
|
| 1071 |
+
|
| 1072 |
+
The various steps are:
|
| 1073 |
+
|
| 1074 |
+
1. Truncate according to the set truncation params (provided with
|
| 1075 |
+
:meth:`~tokenizers.Tokenizer.enable_truncation`)
|
| 1076 |
+
2. Apply the :class:`~tokenizers.processors.PostProcessor`
|
| 1077 |
+
3. Pad according to the set padding params (provided with
|
| 1078 |
+
:meth:`~tokenizers.Tokenizer.enable_padding`)
|
| 1079 |
+
|
| 1080 |
+
Args:
|
| 1081 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 1082 |
+
The :class:`~tokenizers.Encoding` corresponding to the main sequence.
|
| 1083 |
+
|
| 1084 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 1085 |
+
An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
|
| 1086 |
+
|
| 1087 |
+
add_special_tokens (:obj:`bool`):
|
| 1088 |
+
Whether to add the special tokens
|
| 1089 |
+
|
| 1090 |
+
Returns:
|
| 1091 |
+
:class:`~tokenizers.Encoding`: The final post-processed encoding
|
| 1092 |
+
"""
|
| 1093 |
+
pass
|
| 1094 |
+
|
| 1095 |
+
@property
|
| 1096 |
+
def post_processor(self):
|
| 1097 |
+
"""
|
| 1098 |
+
The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
|
| 1099 |
+
"""
|
| 1100 |
+
pass
|
| 1101 |
+
|
| 1102 |
+
@property
|
| 1103 |
+
def pre_tokenizer(self):
|
| 1104 |
+
"""
|
| 1105 |
+
The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
|
| 1106 |
+
"""
|
| 1107 |
+
pass
|
| 1108 |
+
|
| 1109 |
+
def save(self, path, pretty=True):
|
| 1110 |
+
"""
|
| 1111 |
+
Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
|
| 1112 |
+
|
| 1113 |
+
Args:
|
| 1114 |
+
path (:obj:`str`):
|
| 1115 |
+
A path to a file in which to save the serialized tokenizer.
|
| 1116 |
+
|
| 1117 |
+
pretty (:obj:`bool`, defaults to :obj:`True`):
|
| 1118 |
+
Whether the JSON file should be pretty formatted.
|
| 1119 |
+
"""
|
| 1120 |
+
pass
|
| 1121 |
+
|
| 1122 |
+
def to_str(self, pretty=False):
|
| 1123 |
+
"""
|
| 1124 |
+
Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
|
| 1125 |
+
|
| 1126 |
+
Args:
|
| 1127 |
+
pretty (:obj:`bool`, defaults to :obj:`False`):
|
| 1128 |
+
Whether the JSON string should be pretty formatted.
|
| 1129 |
+
|
| 1130 |
+
Returns:
|
| 1131 |
+
:obj:`str`: A string representing the serialized Tokenizer
|
| 1132 |
+
"""
|
| 1133 |
+
pass
|
| 1134 |
+
|
| 1135 |
+
def token_to_id(self, token):
|
| 1136 |
+
"""
|
| 1137 |
+
Convert the given token to its corresponding id if it exists
|
| 1138 |
+
|
| 1139 |
+
Args:
|
| 1140 |
+
token (:obj:`str`):
|
| 1141 |
+
The token to convert
|
| 1142 |
+
|
| 1143 |
+
Returns:
|
| 1144 |
+
:obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
|
| 1145 |
+
"""
|
| 1146 |
+
pass
|
| 1147 |
+
|
| 1148 |
+
def train(self, files, trainer=None):
|
| 1149 |
+
"""
|
| 1150 |
+
Train the Tokenizer using the given files.
|
| 1151 |
+
|
| 1152 |
+
Reads the files line by line, while keeping all the whitespace, even new lines.
|
| 1153 |
+
If you want to train from data store in-memory, you can check
|
| 1154 |
+
:meth:`~tokenizers.Tokenizer.train_from_iterator`
|
| 1155 |
+
|
| 1156 |
+
Args:
|
| 1157 |
+
files (:obj:`List[str]`):
|
| 1158 |
+
A list of path to the files that we should use for training
|
| 1159 |
+
|
| 1160 |
+
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
|
| 1161 |
+
An optional trainer that should be used to train our Model
|
| 1162 |
+
"""
|
| 1163 |
+
pass
|
| 1164 |
+
|
| 1165 |
+
def train_from_iterator(self, iterator, trainer=None, length=None):
|
| 1166 |
+
"""
|
| 1167 |
+
Train the Tokenizer using the provided iterator.
|
| 1168 |
+
|
| 1169 |
+
You can provide anything that is a Python Iterator
|
| 1170 |
+
|
| 1171 |
+
* A list of sequences :obj:`List[str]`
|
| 1172 |
+
* A generator that yields :obj:`str` or :obj:`List[str]`
|
| 1173 |
+
* A Numpy array of strings
|
| 1174 |
+
* ...
|
| 1175 |
+
|
| 1176 |
+
Args:
|
| 1177 |
+
iterator (:obj:`Iterator`):
|
| 1178 |
+
Any iterator over strings or list of strings
|
| 1179 |
+
|
| 1180 |
+
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
|
| 1181 |
+
An optional trainer that should be used to train our Model
|
| 1182 |
+
|
| 1183 |
+
length (:obj:`int`, `optional`):
|
| 1184 |
+
The total number of sequences in the iterator. This is used to
|
| 1185 |
+
provide meaningful progress tracking
|
| 1186 |
+
"""
|
| 1187 |
+
pass
|
| 1188 |
+
|
| 1189 |
+
@property
|
| 1190 |
+
def truncation(self):
|
| 1191 |
+
"""
|
| 1192 |
+
Get the currently set truncation parameters
|
| 1193 |
+
|
| 1194 |
+
`Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
|
| 1195 |
+
|
| 1196 |
+
Returns:
|
| 1197 |
+
(:obj:`dict`, `optional`):
|
| 1198 |
+
A dict with the current truncation parameters if truncation is enabled
|
| 1199 |
+
"""
|
| 1200 |
+
pass
|
parrot/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.78 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/implementations/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base_tokenizer import BaseTokenizer
|
| 2 |
+
from .bert_wordpiece import BertWordPieceTokenizer
|
| 3 |
+
from .byte_level_bpe import ByteLevelBPETokenizer
|
| 4 |
+
from .char_level_bpe import CharBPETokenizer
|
| 5 |
+
from .sentencepiece_bpe import SentencePieceBPETokenizer
|
| 6 |
+
from .sentencepiece_unigram import SentencePieceUnigramTokenizer
|
parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (548 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc
ADDED
|
Binary file (4.25 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc
ADDED
|
Binary file (3.25 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Iterator, List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
|
| 4 |
+
from tokenizers.models import BPE
|
| 5 |
+
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
|
| 6 |
+
|
| 7 |
+
from .base_tokenizer import BaseTokenizer
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ByteLevelBPETokenizer(BaseTokenizer):
|
| 11 |
+
"""ByteLevelBPETokenizer
|
| 12 |
+
|
| 13 |
+
Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
vocab: Optional[Union[str, Dict[str, int]]] = None,
|
| 19 |
+
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
|
| 20 |
+
add_prefix_space: bool = False,
|
| 21 |
+
lowercase: bool = False,
|
| 22 |
+
dropout: Optional[float] = None,
|
| 23 |
+
unicode_normalizer: Optional[str] = None,
|
| 24 |
+
continuing_subword_prefix: Optional[str] = None,
|
| 25 |
+
end_of_word_suffix: Optional[str] = None,
|
| 26 |
+
trim_offsets: bool = False,
|
| 27 |
+
):
|
| 28 |
+
if vocab is not None and merges is not None:
|
| 29 |
+
tokenizer = Tokenizer(
|
| 30 |
+
BPE(
|
| 31 |
+
vocab,
|
| 32 |
+
merges,
|
| 33 |
+
dropout=dropout,
|
| 34 |
+
continuing_subword_prefix=continuing_subword_prefix or "",
|
| 35 |
+
end_of_word_suffix=end_of_word_suffix or "",
|
| 36 |
+
)
|
| 37 |
+
)
|
| 38 |
+
else:
|
| 39 |
+
tokenizer = Tokenizer(BPE())
|
| 40 |
+
|
| 41 |
+
# Check for Unicode normalization first (before everything else)
|
| 42 |
+
normalizers = []
|
| 43 |
+
|
| 44 |
+
if unicode_normalizer:
|
| 45 |
+
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
|
| 46 |
+
|
| 47 |
+
if lowercase:
|
| 48 |
+
normalizers += [Lowercase()]
|
| 49 |
+
|
| 50 |
+
# Create the normalizer structure
|
| 51 |
+
if len(normalizers) > 0:
|
| 52 |
+
if len(normalizers) > 1:
|
| 53 |
+
tokenizer.normalizer = Sequence(normalizers)
|
| 54 |
+
else:
|
| 55 |
+
tokenizer.normalizer = normalizers[0]
|
| 56 |
+
|
| 57 |
+
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
|
| 58 |
+
tokenizer.decoder = decoders.ByteLevel()
|
| 59 |
+
tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
|
| 60 |
+
|
| 61 |
+
parameters = {
|
| 62 |
+
"model": "ByteLevelBPE",
|
| 63 |
+
"add_prefix_space": add_prefix_space,
|
| 64 |
+
"lowercase": lowercase,
|
| 65 |
+
"dropout": dropout,
|
| 66 |
+
"unicode_normalizer": unicode_normalizer,
|
| 67 |
+
"continuing_subword_prefix": continuing_subword_prefix,
|
| 68 |
+
"end_of_word_suffix": end_of_word_suffix,
|
| 69 |
+
"trim_offsets": trim_offsets,
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
super().__init__(tokenizer, parameters)
|
| 73 |
+
|
| 74 |
+
@staticmethod
|
| 75 |
+
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
|
| 76 |
+
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
|
| 77 |
+
return ByteLevelBPETokenizer(vocab, merges, **kwargs)
|
| 78 |
+
|
| 79 |
+
def train(
|
| 80 |
+
self,
|
| 81 |
+
files: Union[str, List[str]],
|
| 82 |
+
vocab_size: int = 30000,
|
| 83 |
+
min_frequency: int = 2,
|
| 84 |
+
show_progress: bool = True,
|
| 85 |
+
special_tokens: List[Union[str, AddedToken]] = [],
|
| 86 |
+
):
|
| 87 |
+
"""Train the model using the given files"""
|
| 88 |
+
|
| 89 |
+
trainer = trainers.BpeTrainer(
|
| 90 |
+
vocab_size=vocab_size,
|
| 91 |
+
min_frequency=min_frequency,
|
| 92 |
+
show_progress=show_progress,
|
| 93 |
+
special_tokens=special_tokens,
|
| 94 |
+
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
|
| 95 |
+
)
|
| 96 |
+
if isinstance(files, str):
|
| 97 |
+
files = [files]
|
| 98 |
+
self._tokenizer.train(files, trainer=trainer)
|
| 99 |
+
|
| 100 |
+
def train_from_iterator(
|
| 101 |
+
self,
|
| 102 |
+
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
|
| 103 |
+
vocab_size: int = 30000,
|
| 104 |
+
min_frequency: int = 2,
|
| 105 |
+
show_progress: bool = True,
|
| 106 |
+
special_tokens: List[Union[str, AddedToken]] = [],
|
| 107 |
+
length: Optional[int] = None,
|
| 108 |
+
):
|
| 109 |
+
"""Train the model using the given iterator"""
|
| 110 |
+
|
| 111 |
+
trainer = trainers.BpeTrainer(
|
| 112 |
+
vocab_size=vocab_size,
|
| 113 |
+
min_frequency=min_frequency,
|
| 114 |
+
show_progress=show_progress,
|
| 115 |
+
special_tokens=special_tokens,
|
| 116 |
+
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
|
| 117 |
+
)
|
| 118 |
+
self._tokenizer.train_from_iterator(
|
| 119 |
+
iterator,
|
| 120 |
+
trainer=trainer,
|
| 121 |
+
length=length,
|
| 122 |
+
)
|
parrot/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from typing import Iterator, List, Optional, Union, Tuple
|
| 4 |
+
|
| 5 |
+
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
|
| 6 |
+
from tokenizers.models import Unigram
|
| 7 |
+
|
| 8 |
+
from .base_tokenizer import BaseTokenizer
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SentencePieceUnigramTokenizer(BaseTokenizer):
|
| 12 |
+
"""SentencePiece Unigram Tokenizer
|
| 13 |
+
|
| 14 |
+
Represents the Unigram algorithm, with the pretokenization used by SentencePiece
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
vocab: Optional[List[Tuple[str, float]]] = None,
|
| 20 |
+
replacement: str = "▁",
|
| 21 |
+
add_prefix_space: bool = True,
|
| 22 |
+
):
|
| 23 |
+
if vocab is not None:
|
| 24 |
+
# Let Unigram(..) fail if only one of them is None
|
| 25 |
+
tokenizer = Tokenizer(Unigram(vocab))
|
| 26 |
+
else:
|
| 27 |
+
tokenizer = Tokenizer(Unigram())
|
| 28 |
+
|
| 29 |
+
tokenizer.normalizer = normalizers.Sequence(
|
| 30 |
+
[normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
|
| 31 |
+
)
|
| 32 |
+
prepend_scheme = "always" if add_prefix_space else "never"
|
| 33 |
+
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
| 34 |
+
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
| 35 |
+
|
| 36 |
+
parameters = {
|
| 37 |
+
"model": "SentencePieceUnigram",
|
| 38 |
+
"replacement": replacement,
|
| 39 |
+
"add_prefix_space": add_prefix_space,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
super().__init__(tokenizer, parameters)
|
| 43 |
+
|
| 44 |
+
def train(
|
| 45 |
+
self,
|
| 46 |
+
files: Union[str, List[str]],
|
| 47 |
+
vocab_size: int = 8000,
|
| 48 |
+
show_progress: bool = True,
|
| 49 |
+
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
|
| 50 |
+
initial_alphabet: Optional[List[str]] = None,
|
| 51 |
+
unk_token: Optional[str] = None,
|
| 52 |
+
):
|
| 53 |
+
"""
|
| 54 |
+
Train the model using the given files
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
files (:obj:`List[str]`):
|
| 58 |
+
A list of path to the files that we should use for training
|
| 59 |
+
vocab_size (:obj:`int`):
|
| 60 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 61 |
+
show_progress (:obj:`bool`):
|
| 62 |
+
Whether to show progress bars while training.
|
| 63 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
|
| 64 |
+
A list of special tokens the model should know of.
|
| 65 |
+
initial_alphabet (:obj:`List[str]`, `optional`):
|
| 66 |
+
A list of characters to include in the initial alphabet, even
|
| 67 |
+
if not seen in the training dataset.
|
| 68 |
+
If the strings contain more than one character, only the first one
|
| 69 |
+
is kept.
|
| 70 |
+
unk_token (:obj:`str`, `optional`):
|
| 71 |
+
The unknown token to be used by the model.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
if special_tokens is None:
|
| 75 |
+
special_tokens = []
|
| 76 |
+
|
| 77 |
+
if initial_alphabet is None:
|
| 78 |
+
initial_alphabet = []
|
| 79 |
+
|
| 80 |
+
trainer = trainers.UnigramTrainer(
|
| 81 |
+
vocab_size=vocab_size,
|
| 82 |
+
special_tokens=special_tokens,
|
| 83 |
+
show_progress=show_progress,
|
| 84 |
+
initial_alphabet=initial_alphabet,
|
| 85 |
+
unk_token=unk_token,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
if isinstance(files, str):
|
| 89 |
+
files = [files]
|
| 90 |
+
self._tokenizer.train(files, trainer=trainer)
|
| 91 |
+
|
| 92 |
+
def train_from_iterator(
|
| 93 |
+
self,
|
| 94 |
+
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
|
| 95 |
+
vocab_size: int = 8000,
|
| 96 |
+
show_progress: bool = True,
|
| 97 |
+
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
|
| 98 |
+
initial_alphabet: Optional[List[str]] = None,
|
| 99 |
+
unk_token: Optional[str] = None,
|
| 100 |
+
length: Optional[int] = None,
|
| 101 |
+
):
|
| 102 |
+
"""
|
| 103 |
+
Train the model using the given iterator
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
|
| 107 |
+
Any iterator over strings or list of strings
|
| 108 |
+
vocab_size (:obj:`int`):
|
| 109 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 110 |
+
show_progress (:obj:`bool`):
|
| 111 |
+
Whether to show progress bars while training.
|
| 112 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
|
| 113 |
+
A list of special tokens the model should know of.
|
| 114 |
+
initial_alphabet (:obj:`List[str]`, `optional`):
|
| 115 |
+
A list of characters to include in the initial alphabet, even
|
| 116 |
+
if not seen in the training dataset.
|
| 117 |
+
If the strings contain more than one character, only the first one
|
| 118 |
+
is kept.
|
| 119 |
+
unk_token (:obj:`str`, `optional`):
|
| 120 |
+
The unknown token to be used by the model.
|
| 121 |
+
length (:obj:`int`, `optional`):
|
| 122 |
+
The total number of sequences in the iterator. This is used to
|
| 123 |
+
provide meaningful progress tracking
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
if special_tokens is None:
|
| 127 |
+
special_tokens = []
|
| 128 |
+
|
| 129 |
+
if initial_alphabet is None:
|
| 130 |
+
initial_alphabet = []
|
| 131 |
+
|
| 132 |
+
trainer = trainers.UnigramTrainer(
|
| 133 |
+
vocab_size=vocab_size,
|
| 134 |
+
special_tokens=special_tokens,
|
| 135 |
+
show_progress=show_progress,
|
| 136 |
+
initial_alphabet=initial_alphabet,
|
| 137 |
+
unk_token=unk_token,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
self._tokenizer.train_from_iterator(
|
| 141 |
+
iterator,
|
| 142 |
+
trainer=trainer,
|
| 143 |
+
length=length,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
@staticmethod
|
| 147 |
+
def from_spm(filename: str):
|
| 148 |
+
try:
|
| 149 |
+
import sys
|
| 150 |
+
|
| 151 |
+
sys.path.append(".")
|
| 152 |
+
|
| 153 |
+
import sentencepiece_model_pb2 as model
|
| 154 |
+
except Exception:
|
| 155 |
+
raise Exception(
|
| 156 |
+
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
m = model.ModelProto()
|
| 160 |
+
m.ParseFromString(open(filename, "rb").read())
|
| 161 |
+
|
| 162 |
+
precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
|
| 163 |
+
vocab = [(piece.piece, piece.score) for piece in m.pieces]
|
| 164 |
+
unk_id = m.trainer_spec.unk_id
|
| 165 |
+
model_type = m.trainer_spec.model_type
|
| 166 |
+
byte_fallback = m.trainer_spec.byte_fallback
|
| 167 |
+
if model_type != 1:
|
| 168 |
+
raise Exception(
|
| 169 |
+
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
replacement = "▁"
|
| 173 |
+
add_prefix_space = True
|
| 174 |
+
|
| 175 |
+
tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
|
| 176 |
+
|
| 177 |
+
if precompiled_charsmap:
|
| 178 |
+
tokenizer.normalizer = normalizers.Sequence(
|
| 179 |
+
[
|
| 180 |
+
normalizers.Precompiled(precompiled_charsmap),
|
| 181 |
+
normalizers.Replace(Regex(" {2,}"), " "),
|
| 182 |
+
]
|
| 183 |
+
)
|
| 184 |
+
else:
|
| 185 |
+
tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
|
| 186 |
+
prepend_scheme = "always" if add_prefix_space else "never"
|
| 187 |
+
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
| 188 |
+
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
| 189 |
+
|
| 190 |
+
parameters = {
|
| 191 |
+
"model": "SentencePieceUnigram",
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
|
| 195 |
+
BaseTokenizer.__init__(obj, tokenizer, parameters)
|
| 196 |
+
return obj
|
parrot/lib/python3.10/site-packages/tokenizers/models/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
from .. import models
|
| 3 |
+
|
| 4 |
+
Model = models.Model
|
| 5 |
+
BPE = models.BPE
|
| 6 |
+
Unigram = models.Unigram
|
| 7 |
+
WordLevel = models.WordLevel
|
| 8 |
+
WordPiece = models.WordPiece
|
parrot/lib/python3.10/site-packages/tokenizers/models/__init__.pyi
ADDED
|
@@ -0,0 +1,591 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class Model:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all models
|
| 5 |
+
|
| 6 |
+
The model represents the actual tokenization algorithm. This is the part that
|
| 7 |
+
will contain and manage the learned vocabulary.
|
| 8 |
+
|
| 9 |
+
This class cannot be constructed directly. Please use one of the concrete models.
|
| 10 |
+
"""
|
| 11 |
+
def get_trainer(self):
|
| 12 |
+
"""
|
| 13 |
+
Get the associated :class:`~tokenizers.trainers.Trainer`
|
| 14 |
+
|
| 15 |
+
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
|
| 16 |
+
:class:`~tokenizers.models.Model`.
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
|
| 20 |
+
"""
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
def id_to_token(self, id):
|
| 24 |
+
"""
|
| 25 |
+
Get the token associated to an ID
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
id (:obj:`int`):
|
| 29 |
+
An ID to convert to a token
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
:obj:`str`: The token associated to the ID
|
| 33 |
+
"""
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
def save(self, folder, prefix):
|
| 37 |
+
"""
|
| 38 |
+
Save the current model
|
| 39 |
+
|
| 40 |
+
Save the current model in the given folder, using the given prefix for the various
|
| 41 |
+
files that will get created.
|
| 42 |
+
Any file with the same name that already exists in this folder will be overwritten.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
folder (:obj:`str`):
|
| 46 |
+
The path to the target folder in which to save the various files
|
| 47 |
+
|
| 48 |
+
prefix (:obj:`str`, `optional`):
|
| 49 |
+
An optional prefix, used to prefix each file name
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
:obj:`List[str]`: The list of saved files
|
| 53 |
+
"""
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
def token_to_id(self, tokens):
|
| 57 |
+
"""
|
| 58 |
+
Get the ID associated to a token
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
token (:obj:`str`):
|
| 62 |
+
A token to convert to an ID
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
:obj:`int`: The ID associated to the token
|
| 66 |
+
"""
|
| 67 |
+
pass
|
| 68 |
+
|
| 69 |
+
def tokenize(self, sequence):
|
| 70 |
+
"""
|
| 71 |
+
Tokenize a sequence
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
sequence (:obj:`str`):
|
| 75 |
+
A sequence to tokenize
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
|
| 79 |
+
"""
|
| 80 |
+
pass
|
| 81 |
+
|
| 82 |
+
class BPE(Model):
|
| 83 |
+
"""
|
| 84 |
+
An implementation of the BPE (Byte-Pair Encoding) algorithm
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
vocab (:obj:`Dict[str, int]`, `optional`):
|
| 88 |
+
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
|
| 89 |
+
|
| 90 |
+
merges (:obj:`List[Tuple[str, str]]`, `optional`):
|
| 91 |
+
A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]`
|
| 92 |
+
|
| 93 |
+
cache_capacity (:obj:`int`, `optional`):
|
| 94 |
+
The number of words that the BPE cache can contain. The cache allows
|
| 95 |
+
to speed-up the process by keeping the result of the merge operations
|
| 96 |
+
for a number of words.
|
| 97 |
+
|
| 98 |
+
dropout (:obj:`float`, `optional`):
|
| 99 |
+
A float between 0 and 1 that represents the BPE dropout to use.
|
| 100 |
+
|
| 101 |
+
unk_token (:obj:`str`, `optional`):
|
| 102 |
+
The unknown token to be used by the model.
|
| 103 |
+
|
| 104 |
+
continuing_subword_prefix (:obj:`str`, `optional`):
|
| 105 |
+
The prefix to attach to subword units that don't represent a beginning of word.
|
| 106 |
+
|
| 107 |
+
end_of_word_suffix (:obj:`str`, `optional`):
|
| 108 |
+
The suffix to attach to subword units that represent an end of word.
|
| 109 |
+
|
| 110 |
+
fuse_unk (:obj:`bool`, `optional`):
|
| 111 |
+
Whether to fuse any subsequent unknown tokens into a single one
|
| 112 |
+
|
| 113 |
+
byte_fallback (:obj:`bool`, `optional`):
|
| 114 |
+
Whether to use spm byte-fallback trick (defaults to False)
|
| 115 |
+
|
| 116 |
+
ignore_merges (:obj:`bool`, `optional`):
|
| 117 |
+
Whether or not to match tokens with the vocab before using merges.
|
| 118 |
+
"""
|
| 119 |
+
def __init__(
|
| 120 |
+
self,
|
| 121 |
+
vocab=None,
|
| 122 |
+
merges=None,
|
| 123 |
+
cache_capacity=None,
|
| 124 |
+
dropout=None,
|
| 125 |
+
unk_token=None,
|
| 126 |
+
continuing_subword_prefix=None,
|
| 127 |
+
end_of_word_suffix=None,
|
| 128 |
+
fuse_unk=None,
|
| 129 |
+
byte_fallback=False,
|
| 130 |
+
ignore_merges=False,
|
| 131 |
+
):
|
| 132 |
+
pass
|
| 133 |
+
|
| 134 |
+
@staticmethod
|
| 135 |
+
def from_file(cls, vocab, merge, **kwargs):
|
| 136 |
+
"""
|
| 137 |
+
Instantiate a BPE model from the given files.
|
| 138 |
+
|
| 139 |
+
This method is roughly equivalent to doing::
|
| 140 |
+
|
| 141 |
+
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
|
| 142 |
+
bpe = BPE(vocab, merges)
|
| 143 |
+
|
| 144 |
+
If you don't need to keep the :obj:`vocab, merges` values lying around,
|
| 145 |
+
this method is more optimized than manually calling
|
| 146 |
+
:meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE`
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
vocab (:obj:`str`):
|
| 150 |
+
The path to a :obj:`vocab.json` file
|
| 151 |
+
|
| 152 |
+
merges (:obj:`str`):
|
| 153 |
+
The path to a :obj:`merges.txt` file
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
:class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files
|
| 157 |
+
"""
|
| 158 |
+
pass
|
| 159 |
+
|
| 160 |
+
def get_trainer(self):
|
| 161 |
+
"""
|
| 162 |
+
Get the associated :class:`~tokenizers.trainers.Trainer`
|
| 163 |
+
|
| 164 |
+
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
|
| 165 |
+
:class:`~tokenizers.models.Model`.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
|
| 169 |
+
"""
|
| 170 |
+
pass
|
| 171 |
+
|
| 172 |
+
def id_to_token(self, id):
|
| 173 |
+
"""
|
| 174 |
+
Get the token associated to an ID
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
id (:obj:`int`):
|
| 178 |
+
An ID to convert to a token
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
:obj:`str`: The token associated to the ID
|
| 182 |
+
"""
|
| 183 |
+
pass
|
| 184 |
+
|
| 185 |
+
@staticmethod
|
| 186 |
+
def read_file(self, vocab, merges):
|
| 187 |
+
"""
|
| 188 |
+
Read a :obj:`vocab.json` and a :obj:`merges.txt` files
|
| 189 |
+
|
| 190 |
+
This method provides a way to read and parse the content of these files,
|
| 191 |
+
returning the relevant data structures. If you want to instantiate some BPE models
|
| 192 |
+
from memory, this method gives you the expected input from the standard files.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
vocab (:obj:`str`):
|
| 196 |
+
The path to a :obj:`vocab.json` file
|
| 197 |
+
|
| 198 |
+
merges (:obj:`str`):
|
| 199 |
+
The path to a :obj:`merges.txt` file
|
| 200 |
+
|
| 201 |
+
Returns:
|
| 202 |
+
A :obj:`Tuple` with the vocab and the merges:
|
| 203 |
+
The vocabulary and merges loaded into memory
|
| 204 |
+
"""
|
| 205 |
+
pass
|
| 206 |
+
|
| 207 |
+
def save(self, folder, prefix):
|
| 208 |
+
"""
|
| 209 |
+
Save the current model
|
| 210 |
+
|
| 211 |
+
Save the current model in the given folder, using the given prefix for the various
|
| 212 |
+
files that will get created.
|
| 213 |
+
Any file with the same name that already exists in this folder will be overwritten.
|
| 214 |
+
|
| 215 |
+
Args:
|
| 216 |
+
folder (:obj:`str`):
|
| 217 |
+
The path to the target folder in which to save the various files
|
| 218 |
+
|
| 219 |
+
prefix (:obj:`str`, `optional`):
|
| 220 |
+
An optional prefix, used to prefix each file name
|
| 221 |
+
|
| 222 |
+
Returns:
|
| 223 |
+
:obj:`List[str]`: The list of saved files
|
| 224 |
+
"""
|
| 225 |
+
pass
|
| 226 |
+
|
| 227 |
+
def token_to_id(self, tokens):
|
| 228 |
+
"""
|
| 229 |
+
Get the ID associated to a token
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
token (:obj:`str`):
|
| 233 |
+
A token to convert to an ID
|
| 234 |
+
|
| 235 |
+
Returns:
|
| 236 |
+
:obj:`int`: The ID associated to the token
|
| 237 |
+
"""
|
| 238 |
+
pass
|
| 239 |
+
|
| 240 |
+
def tokenize(self, sequence):
|
| 241 |
+
"""
|
| 242 |
+
Tokenize a sequence
|
| 243 |
+
|
| 244 |
+
Args:
|
| 245 |
+
sequence (:obj:`str`):
|
| 246 |
+
A sequence to tokenize
|
| 247 |
+
|
| 248 |
+
Returns:
|
| 249 |
+
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
|
| 250 |
+
"""
|
| 251 |
+
pass
|
| 252 |
+
|
| 253 |
+
class Unigram(Model):
|
| 254 |
+
"""
|
| 255 |
+
An implementation of the Unigram algorithm
|
| 256 |
+
|
| 257 |
+
Args:
|
| 258 |
+
vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`):
|
| 259 |
+
A list of vocabulary items and their relative score [("am", -0.2442),...]
|
| 260 |
+
"""
|
| 261 |
+
def __init__(self, vocab, unk_id, byte_fallback):
|
| 262 |
+
pass
|
| 263 |
+
|
| 264 |
+
def get_trainer(self):
|
| 265 |
+
"""
|
| 266 |
+
Get the associated :class:`~tokenizers.trainers.Trainer`
|
| 267 |
+
|
| 268 |
+
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
|
| 269 |
+
:class:`~tokenizers.models.Model`.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
|
| 273 |
+
"""
|
| 274 |
+
pass
|
| 275 |
+
|
| 276 |
+
def id_to_token(self, id):
|
| 277 |
+
"""
|
| 278 |
+
Get the token associated to an ID
|
| 279 |
+
|
| 280 |
+
Args:
|
| 281 |
+
id (:obj:`int`):
|
| 282 |
+
An ID to convert to a token
|
| 283 |
+
|
| 284 |
+
Returns:
|
| 285 |
+
:obj:`str`: The token associated to the ID
|
| 286 |
+
"""
|
| 287 |
+
pass
|
| 288 |
+
|
| 289 |
+
def save(self, folder, prefix):
|
| 290 |
+
"""
|
| 291 |
+
Save the current model
|
| 292 |
+
|
| 293 |
+
Save the current model in the given folder, using the given prefix for the various
|
| 294 |
+
files that will get created.
|
| 295 |
+
Any file with the same name that already exists in this folder will be overwritten.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
folder (:obj:`str`):
|
| 299 |
+
The path to the target folder in which to save the various files
|
| 300 |
+
|
| 301 |
+
prefix (:obj:`str`, `optional`):
|
| 302 |
+
An optional prefix, used to prefix each file name
|
| 303 |
+
|
| 304 |
+
Returns:
|
| 305 |
+
:obj:`List[str]`: The list of saved files
|
| 306 |
+
"""
|
| 307 |
+
pass
|
| 308 |
+
|
| 309 |
+
def token_to_id(self, tokens):
|
| 310 |
+
"""
|
| 311 |
+
Get the ID associated to a token
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
token (:obj:`str`):
|
| 315 |
+
A token to convert to an ID
|
| 316 |
+
|
| 317 |
+
Returns:
|
| 318 |
+
:obj:`int`: The ID associated to the token
|
| 319 |
+
"""
|
| 320 |
+
pass
|
| 321 |
+
|
| 322 |
+
def tokenize(self, sequence):
|
| 323 |
+
"""
|
| 324 |
+
Tokenize a sequence
|
| 325 |
+
|
| 326 |
+
Args:
|
| 327 |
+
sequence (:obj:`str`):
|
| 328 |
+
A sequence to tokenize
|
| 329 |
+
|
| 330 |
+
Returns:
|
| 331 |
+
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
|
| 332 |
+
"""
|
| 333 |
+
pass
|
| 334 |
+
|
| 335 |
+
class WordLevel(Model):
|
| 336 |
+
"""
|
| 337 |
+
An implementation of the WordLevel algorithm
|
| 338 |
+
|
| 339 |
+
Most simple tokenizer model based on mapping tokens to their corresponding id.
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
vocab (:obj:`str`, `optional`):
|
| 343 |
+
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
|
| 344 |
+
|
| 345 |
+
unk_token (:obj:`str`, `optional`):
|
| 346 |
+
The unknown token to be used by the model.
|
| 347 |
+
"""
|
| 348 |
+
def __init__(self, vocab, unk_token):
|
| 349 |
+
pass
|
| 350 |
+
|
| 351 |
+
@staticmethod
|
| 352 |
+
def from_file(vocab, unk_token):
|
| 353 |
+
"""
|
| 354 |
+
Instantiate a WordLevel model from the given file
|
| 355 |
+
|
| 356 |
+
This method is roughly equivalent to doing::
|
| 357 |
+
|
| 358 |
+
vocab = WordLevel.read_file(vocab_filename)
|
| 359 |
+
wordlevel = WordLevel(vocab)
|
| 360 |
+
|
| 361 |
+
If you don't need to keep the :obj:`vocab` values lying around, this method is
|
| 362 |
+
more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to
|
| 363 |
+
initialize a :class:`~tokenizers.models.WordLevel`
|
| 364 |
+
|
| 365 |
+
Args:
|
| 366 |
+
vocab (:obj:`str`):
|
| 367 |
+
The path to a :obj:`vocab.json` file
|
| 368 |
+
|
| 369 |
+
Returns:
|
| 370 |
+
:class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file
|
| 371 |
+
"""
|
| 372 |
+
pass
|
| 373 |
+
|
| 374 |
+
def get_trainer(self):
|
| 375 |
+
"""
|
| 376 |
+
Get the associated :class:`~tokenizers.trainers.Trainer`
|
| 377 |
+
|
| 378 |
+
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
|
| 379 |
+
:class:`~tokenizers.models.Model`.
|
| 380 |
+
|
| 381 |
+
Returns:
|
| 382 |
+
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
|
| 383 |
+
"""
|
| 384 |
+
pass
|
| 385 |
+
|
| 386 |
+
def id_to_token(self, id):
|
| 387 |
+
"""
|
| 388 |
+
Get the token associated to an ID
|
| 389 |
+
|
| 390 |
+
Args:
|
| 391 |
+
id (:obj:`int`):
|
| 392 |
+
An ID to convert to a token
|
| 393 |
+
|
| 394 |
+
Returns:
|
| 395 |
+
:obj:`str`: The token associated to the ID
|
| 396 |
+
"""
|
| 397 |
+
pass
|
| 398 |
+
|
| 399 |
+
@staticmethod
|
| 400 |
+
def read_file(vocab):
|
| 401 |
+
"""
|
| 402 |
+
Read a :obj:`vocab.json`
|
| 403 |
+
|
| 404 |
+
This method provides a way to read and parse the content of a vocabulary file,
|
| 405 |
+
returning the relevant data structures. If you want to instantiate some WordLevel models
|
| 406 |
+
from memory, this method gives you the expected input from the standard files.
|
| 407 |
+
|
| 408 |
+
Args:
|
| 409 |
+
vocab (:obj:`str`):
|
| 410 |
+
The path to a :obj:`vocab.json` file
|
| 411 |
+
|
| 412 |
+
Returns:
|
| 413 |
+
:obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
|
| 414 |
+
"""
|
| 415 |
+
pass
|
| 416 |
+
|
| 417 |
+
def save(self, folder, prefix):
|
| 418 |
+
"""
|
| 419 |
+
Save the current model
|
| 420 |
+
|
| 421 |
+
Save the current model in the given folder, using the given prefix for the various
|
| 422 |
+
files that will get created.
|
| 423 |
+
Any file with the same name that already exists in this folder will be overwritten.
|
| 424 |
+
|
| 425 |
+
Args:
|
| 426 |
+
folder (:obj:`str`):
|
| 427 |
+
The path to the target folder in which to save the various files
|
| 428 |
+
|
| 429 |
+
prefix (:obj:`str`, `optional`):
|
| 430 |
+
An optional prefix, used to prefix each file name
|
| 431 |
+
|
| 432 |
+
Returns:
|
| 433 |
+
:obj:`List[str]`: The list of saved files
|
| 434 |
+
"""
|
| 435 |
+
pass
|
| 436 |
+
|
| 437 |
+
def token_to_id(self, tokens):
|
| 438 |
+
"""
|
| 439 |
+
Get the ID associated to a token
|
| 440 |
+
|
| 441 |
+
Args:
|
| 442 |
+
token (:obj:`str`):
|
| 443 |
+
A token to convert to an ID
|
| 444 |
+
|
| 445 |
+
Returns:
|
| 446 |
+
:obj:`int`: The ID associated to the token
|
| 447 |
+
"""
|
| 448 |
+
pass
|
| 449 |
+
|
| 450 |
+
def tokenize(self, sequence):
|
| 451 |
+
"""
|
| 452 |
+
Tokenize a sequence
|
| 453 |
+
|
| 454 |
+
Args:
|
| 455 |
+
sequence (:obj:`str`):
|
| 456 |
+
A sequence to tokenize
|
| 457 |
+
|
| 458 |
+
Returns:
|
| 459 |
+
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
|
| 460 |
+
"""
|
| 461 |
+
pass
|
| 462 |
+
|
| 463 |
+
class WordPiece(Model):
|
| 464 |
+
"""
|
| 465 |
+
An implementation of the WordPiece algorithm
|
| 466 |
+
|
| 467 |
+
Args:
|
| 468 |
+
vocab (:obj:`Dict[str, int]`, `optional`):
|
| 469 |
+
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
|
| 470 |
+
|
| 471 |
+
unk_token (:obj:`str`, `optional`):
|
| 472 |
+
The unknown token to be used by the model.
|
| 473 |
+
|
| 474 |
+
max_input_chars_per_word (:obj:`int`, `optional`):
|
| 475 |
+
The maximum number of characters to authorize in a single word.
|
| 476 |
+
"""
|
| 477 |
+
def __init__(self, vocab, unk_token, max_input_chars_per_word):
|
| 478 |
+
pass
|
| 479 |
+
|
| 480 |
+
@staticmethod
|
| 481 |
+
def from_file(vocab, **kwargs):
|
| 482 |
+
"""
|
| 483 |
+
Instantiate a WordPiece model from the given file
|
| 484 |
+
|
| 485 |
+
This method is roughly equivalent to doing::
|
| 486 |
+
|
| 487 |
+
vocab = WordPiece.read_file(vocab_filename)
|
| 488 |
+
wordpiece = WordPiece(vocab)
|
| 489 |
+
|
| 490 |
+
If you don't need to keep the :obj:`vocab` values lying around, this method is
|
| 491 |
+
more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to
|
| 492 |
+
initialize a :class:`~tokenizers.models.WordPiece`
|
| 493 |
+
|
| 494 |
+
Args:
|
| 495 |
+
vocab (:obj:`str`):
|
| 496 |
+
The path to a :obj:`vocab.txt` file
|
| 497 |
+
|
| 498 |
+
Returns:
|
| 499 |
+
:class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file
|
| 500 |
+
"""
|
| 501 |
+
pass
|
| 502 |
+
|
| 503 |
+
def get_trainer(self):
|
| 504 |
+
"""
|
| 505 |
+
Get the associated :class:`~tokenizers.trainers.Trainer`
|
| 506 |
+
|
| 507 |
+
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
|
| 508 |
+
:class:`~tokenizers.models.Model`.
|
| 509 |
+
|
| 510 |
+
Returns:
|
| 511 |
+
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
|
| 512 |
+
"""
|
| 513 |
+
pass
|
| 514 |
+
|
| 515 |
+
def id_to_token(self, id):
|
| 516 |
+
"""
|
| 517 |
+
Get the token associated to an ID
|
| 518 |
+
|
| 519 |
+
Args:
|
| 520 |
+
id (:obj:`int`):
|
| 521 |
+
An ID to convert to a token
|
| 522 |
+
|
| 523 |
+
Returns:
|
| 524 |
+
:obj:`str`: The token associated to the ID
|
| 525 |
+
"""
|
| 526 |
+
pass
|
| 527 |
+
|
| 528 |
+
@staticmethod
|
| 529 |
+
def read_file(vocab):
|
| 530 |
+
"""
|
| 531 |
+
Read a :obj:`vocab.txt` file
|
| 532 |
+
|
| 533 |
+
This method provides a way to read and parse the content of a standard `vocab.txt`
|
| 534 |
+
file as used by the WordPiece Model, returning the relevant data structures. If you
|
| 535 |
+
want to instantiate some WordPiece models from memory, this method gives you the
|
| 536 |
+
expected input from the standard files.
|
| 537 |
+
|
| 538 |
+
Args:
|
| 539 |
+
vocab (:obj:`str`):
|
| 540 |
+
The path to a :obj:`vocab.txt` file
|
| 541 |
+
|
| 542 |
+
Returns:
|
| 543 |
+
:obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
|
| 544 |
+
"""
|
| 545 |
+
pass
|
| 546 |
+
|
| 547 |
+
def save(self, folder, prefix):
|
| 548 |
+
"""
|
| 549 |
+
Save the current model
|
| 550 |
+
|
| 551 |
+
Save the current model in the given folder, using the given prefix for the various
|
| 552 |
+
files that will get created.
|
| 553 |
+
Any file with the same name that already exists in this folder will be overwritten.
|
| 554 |
+
|
| 555 |
+
Args:
|
| 556 |
+
folder (:obj:`str`):
|
| 557 |
+
The path to the target folder in which to save the various files
|
| 558 |
+
|
| 559 |
+
prefix (:obj:`str`, `optional`):
|
| 560 |
+
An optional prefix, used to prefix each file name
|
| 561 |
+
|
| 562 |
+
Returns:
|
| 563 |
+
:obj:`List[str]`: The list of saved files
|
| 564 |
+
"""
|
| 565 |
+
pass
|
| 566 |
+
|
| 567 |
+
def token_to_id(self, tokens):
|
| 568 |
+
"""
|
| 569 |
+
Get the ID associated to a token
|
| 570 |
+
|
| 571 |
+
Args:
|
| 572 |
+
token (:obj:`str`):
|
| 573 |
+
A token to convert to an ID
|
| 574 |
+
|
| 575 |
+
Returns:
|
| 576 |
+
:obj:`int`: The ID associated to the token
|
| 577 |
+
"""
|
| 578 |
+
pass
|
| 579 |
+
|
| 580 |
+
def tokenize(self, sequence):
|
| 581 |
+
"""
|
| 582 |
+
Tokenize a sequence
|
| 583 |
+
|
| 584 |
+
Args:
|
| 585 |
+
sequence (:obj:`str`):
|
| 586 |
+
A sequence to tokenize
|
| 587 |
+
|
| 588 |
+
Returns:
|
| 589 |
+
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
|
| 590 |
+
"""
|
| 591 |
+
pass
|
parrot/lib/python3.10/site-packages/tokenizers/models/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (283 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .. import normalizers
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
Normalizer = normalizers.Normalizer
|
| 5 |
+
BertNormalizer = normalizers.BertNormalizer
|
| 6 |
+
NFD = normalizers.NFD
|
| 7 |
+
NFKD = normalizers.NFKD
|
| 8 |
+
NFC = normalizers.NFC
|
| 9 |
+
NFKC = normalizers.NFKC
|
| 10 |
+
Sequence = normalizers.Sequence
|
| 11 |
+
Lowercase = normalizers.Lowercase
|
| 12 |
+
Prepend = normalizers.Prepend
|
| 13 |
+
Strip = normalizers.Strip
|
| 14 |
+
StripAccents = normalizers.StripAccents
|
| 15 |
+
Nmt = normalizers.Nmt
|
| 16 |
+
Precompiled = normalizers.Precompiled
|
| 17 |
+
Replace = normalizers.Replace
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def unicode_normalizer_from_str(normalizer: str) -> Normalizer:
|
| 24 |
+
if normalizer not in NORMALIZERS:
|
| 25 |
+
raise ValueError(
|
| 26 |
+
"{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys())
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
return NORMALIZERS[normalizer]()
|
parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi
ADDED
|
@@ -0,0 +1,595 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class Normalizer:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all normalizers
|
| 5 |
+
|
| 6 |
+
This class is not supposed to be instantiated directly. Instead, any implementation of a
|
| 7 |
+
Normalizer will return an instance of this class when instantiated.
|
| 8 |
+
"""
|
| 9 |
+
def normalize(self, normalized):
|
| 10 |
+
"""
|
| 11 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 12 |
+
|
| 13 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 14 |
+
keep track of the alignment information. If you just want to see the result
|
| 15 |
+
of the normalization on a raw string, you can use
|
| 16 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 20 |
+
The normalized string on which to apply this
|
| 21 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 22 |
+
"""
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def normalize_str(self, sequence):
|
| 26 |
+
"""
|
| 27 |
+
Normalize the given string
|
| 28 |
+
|
| 29 |
+
This method provides a way to visualize the effect of a
|
| 30 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 31 |
+
information. If you need to get/convert offsets, you can use
|
| 32 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
sequence (:obj:`str`):
|
| 36 |
+
A string to normalize
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
:obj:`str`: A string after normalization
|
| 40 |
+
"""
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
class BertNormalizer(Normalizer):
|
| 44 |
+
"""
|
| 45 |
+
BertNormalizer
|
| 46 |
+
|
| 47 |
+
Takes care of normalizing raw text before giving it to a Bert model.
|
| 48 |
+
This includes cleaning the text, handling accents, chinese chars and lowercasing
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 52 |
+
Whether to clean the text, by removing any control characters
|
| 53 |
+
and replacing all whitespaces by the classic one.
|
| 54 |
+
|
| 55 |
+
handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 56 |
+
Whether to handle chinese chars by putting spaces around them.
|
| 57 |
+
|
| 58 |
+
strip_accents (:obj:`bool`, `optional`):
|
| 59 |
+
Whether to strip all accents. If this option is not specified (ie == None),
|
| 60 |
+
then it will be determined by the value for `lowercase` (as in the original Bert).
|
| 61 |
+
|
| 62 |
+
lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 63 |
+
Whether to lowercase.
|
| 64 |
+
"""
|
| 65 |
+
def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True):
|
| 66 |
+
pass
|
| 67 |
+
|
| 68 |
+
def normalize(self, normalized):
|
| 69 |
+
"""
|
| 70 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 71 |
+
|
| 72 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 73 |
+
keep track of the alignment information. If you just want to see the result
|
| 74 |
+
of the normalization on a raw string, you can use
|
| 75 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 79 |
+
The normalized string on which to apply this
|
| 80 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 81 |
+
"""
|
| 82 |
+
pass
|
| 83 |
+
|
| 84 |
+
def normalize_str(self, sequence):
|
| 85 |
+
"""
|
| 86 |
+
Normalize the given string
|
| 87 |
+
|
| 88 |
+
This method provides a way to visualize the effect of a
|
| 89 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 90 |
+
information. If you need to get/convert offsets, you can use
|
| 91 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
sequence (:obj:`str`):
|
| 95 |
+
A string to normalize
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
:obj:`str`: A string after normalization
|
| 99 |
+
"""
|
| 100 |
+
pass
|
| 101 |
+
|
| 102 |
+
class Lowercase(Normalizer):
|
| 103 |
+
"""
|
| 104 |
+
Lowercase Normalizer
|
| 105 |
+
"""
|
| 106 |
+
def __init__(self):
|
| 107 |
+
pass
|
| 108 |
+
|
| 109 |
+
def normalize(self, normalized):
|
| 110 |
+
"""
|
| 111 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 112 |
+
|
| 113 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 114 |
+
keep track of the alignment information. If you just want to see the result
|
| 115 |
+
of the normalization on a raw string, you can use
|
| 116 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 120 |
+
The normalized string on which to apply this
|
| 121 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 122 |
+
"""
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
def normalize_str(self, sequence):
|
| 126 |
+
"""
|
| 127 |
+
Normalize the given string
|
| 128 |
+
|
| 129 |
+
This method provides a way to visualize the effect of a
|
| 130 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 131 |
+
information. If you need to get/convert offsets, you can use
|
| 132 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
sequence (:obj:`str`):
|
| 136 |
+
A string to normalize
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
:obj:`str`: A string after normalization
|
| 140 |
+
"""
|
| 141 |
+
pass
|
| 142 |
+
|
| 143 |
+
class NFC(Normalizer):
|
| 144 |
+
"""
|
| 145 |
+
NFC Unicode Normalizer
|
| 146 |
+
"""
|
| 147 |
+
def __init__(self):
|
| 148 |
+
pass
|
| 149 |
+
|
| 150 |
+
def normalize(self, normalized):
|
| 151 |
+
"""
|
| 152 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 153 |
+
|
| 154 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 155 |
+
keep track of the alignment information. If you just want to see the result
|
| 156 |
+
of the normalization on a raw string, you can use
|
| 157 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 161 |
+
The normalized string on which to apply this
|
| 162 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 163 |
+
"""
|
| 164 |
+
pass
|
| 165 |
+
|
| 166 |
+
def normalize_str(self, sequence):
|
| 167 |
+
"""
|
| 168 |
+
Normalize the given string
|
| 169 |
+
|
| 170 |
+
This method provides a way to visualize the effect of a
|
| 171 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 172 |
+
information. If you need to get/convert offsets, you can use
|
| 173 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
sequence (:obj:`str`):
|
| 177 |
+
A string to normalize
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
:obj:`str`: A string after normalization
|
| 181 |
+
"""
|
| 182 |
+
pass
|
| 183 |
+
|
| 184 |
+
class NFD(Normalizer):
|
| 185 |
+
"""
|
| 186 |
+
NFD Unicode Normalizer
|
| 187 |
+
"""
|
| 188 |
+
def __init__(self):
|
| 189 |
+
pass
|
| 190 |
+
|
| 191 |
+
def normalize(self, normalized):
|
| 192 |
+
"""
|
| 193 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 194 |
+
|
| 195 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 196 |
+
keep track of the alignment information. If you just want to see the result
|
| 197 |
+
of the normalization on a raw string, you can use
|
| 198 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 202 |
+
The normalized string on which to apply this
|
| 203 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 204 |
+
"""
|
| 205 |
+
pass
|
| 206 |
+
|
| 207 |
+
def normalize_str(self, sequence):
|
| 208 |
+
"""
|
| 209 |
+
Normalize the given string
|
| 210 |
+
|
| 211 |
+
This method provides a way to visualize the effect of a
|
| 212 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 213 |
+
information. If you need to get/convert offsets, you can use
|
| 214 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
sequence (:obj:`str`):
|
| 218 |
+
A string to normalize
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
:obj:`str`: A string after normalization
|
| 222 |
+
"""
|
| 223 |
+
pass
|
| 224 |
+
|
| 225 |
+
class NFKC(Normalizer):
|
| 226 |
+
"""
|
| 227 |
+
NFKC Unicode Normalizer
|
| 228 |
+
"""
|
| 229 |
+
def __init__(self):
|
| 230 |
+
pass
|
| 231 |
+
|
| 232 |
+
def normalize(self, normalized):
|
| 233 |
+
"""
|
| 234 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 235 |
+
|
| 236 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 237 |
+
keep track of the alignment information. If you just want to see the result
|
| 238 |
+
of the normalization on a raw string, you can use
|
| 239 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 243 |
+
The normalized string on which to apply this
|
| 244 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 245 |
+
"""
|
| 246 |
+
pass
|
| 247 |
+
|
| 248 |
+
def normalize_str(self, sequence):
|
| 249 |
+
"""
|
| 250 |
+
Normalize the given string
|
| 251 |
+
|
| 252 |
+
This method provides a way to visualize the effect of a
|
| 253 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 254 |
+
information. If you need to get/convert offsets, you can use
|
| 255 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 256 |
+
|
| 257 |
+
Args:
|
| 258 |
+
sequence (:obj:`str`):
|
| 259 |
+
A string to normalize
|
| 260 |
+
|
| 261 |
+
Returns:
|
| 262 |
+
:obj:`str`: A string after normalization
|
| 263 |
+
"""
|
| 264 |
+
pass
|
| 265 |
+
|
| 266 |
+
class NFKD(Normalizer):
|
| 267 |
+
"""
|
| 268 |
+
NFKD Unicode Normalizer
|
| 269 |
+
"""
|
| 270 |
+
def __init__(self):
|
| 271 |
+
pass
|
| 272 |
+
|
| 273 |
+
def normalize(self, normalized):
|
| 274 |
+
"""
|
| 275 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 276 |
+
|
| 277 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 278 |
+
keep track of the alignment information. If you just want to see the result
|
| 279 |
+
of the normalization on a raw string, you can use
|
| 280 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 284 |
+
The normalized string on which to apply this
|
| 285 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 286 |
+
"""
|
| 287 |
+
pass
|
| 288 |
+
|
| 289 |
+
def normalize_str(self, sequence):
|
| 290 |
+
"""
|
| 291 |
+
Normalize the given string
|
| 292 |
+
|
| 293 |
+
This method provides a way to visualize the effect of a
|
| 294 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 295 |
+
information. If you need to get/convert offsets, you can use
|
| 296 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
sequence (:obj:`str`):
|
| 300 |
+
A string to normalize
|
| 301 |
+
|
| 302 |
+
Returns:
|
| 303 |
+
:obj:`str`: A string after normalization
|
| 304 |
+
"""
|
| 305 |
+
pass
|
| 306 |
+
|
| 307 |
+
class Nmt(Normalizer):
|
| 308 |
+
"""
|
| 309 |
+
Nmt normalizer
|
| 310 |
+
"""
|
| 311 |
+
def __init__(self):
|
| 312 |
+
pass
|
| 313 |
+
|
| 314 |
+
def normalize(self, normalized):
|
| 315 |
+
"""
|
| 316 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 317 |
+
|
| 318 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 319 |
+
keep track of the alignment information. If you just want to see the result
|
| 320 |
+
of the normalization on a raw string, you can use
|
| 321 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 322 |
+
|
| 323 |
+
Args:
|
| 324 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 325 |
+
The normalized string on which to apply this
|
| 326 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 327 |
+
"""
|
| 328 |
+
pass
|
| 329 |
+
|
| 330 |
+
def normalize_str(self, sequence):
|
| 331 |
+
"""
|
| 332 |
+
Normalize the given string
|
| 333 |
+
|
| 334 |
+
This method provides a way to visualize the effect of a
|
| 335 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 336 |
+
information. If you need to get/convert offsets, you can use
|
| 337 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 338 |
+
|
| 339 |
+
Args:
|
| 340 |
+
sequence (:obj:`str`):
|
| 341 |
+
A string to normalize
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
:obj:`str`: A string after normalization
|
| 345 |
+
"""
|
| 346 |
+
pass
|
| 347 |
+
|
| 348 |
+
class Precompiled(Normalizer):
|
| 349 |
+
"""
|
| 350 |
+
Precompiled normalizer
|
| 351 |
+
Don't use manually it is used for compatiblity for SentencePiece.
|
| 352 |
+
"""
|
| 353 |
+
def __init__(self, precompiled_charsmap):
|
| 354 |
+
pass
|
| 355 |
+
|
| 356 |
+
def normalize(self, normalized):
|
| 357 |
+
"""
|
| 358 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 359 |
+
|
| 360 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 361 |
+
keep track of the alignment information. If you just want to see the result
|
| 362 |
+
of the normalization on a raw string, you can use
|
| 363 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 364 |
+
|
| 365 |
+
Args:
|
| 366 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 367 |
+
The normalized string on which to apply this
|
| 368 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 369 |
+
"""
|
| 370 |
+
pass
|
| 371 |
+
|
| 372 |
+
def normalize_str(self, sequence):
|
| 373 |
+
"""
|
| 374 |
+
Normalize the given string
|
| 375 |
+
|
| 376 |
+
This method provides a way to visualize the effect of a
|
| 377 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 378 |
+
information. If you need to get/convert offsets, you can use
|
| 379 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 380 |
+
|
| 381 |
+
Args:
|
| 382 |
+
sequence (:obj:`str`):
|
| 383 |
+
A string to normalize
|
| 384 |
+
|
| 385 |
+
Returns:
|
| 386 |
+
:obj:`str`: A string after normalization
|
| 387 |
+
"""
|
| 388 |
+
pass
|
| 389 |
+
|
| 390 |
+
class Prepend(Normalizer):
|
| 391 |
+
"""
|
| 392 |
+
Prepend normalizer
|
| 393 |
+
"""
|
| 394 |
+
def __init__(self, prepend):
|
| 395 |
+
pass
|
| 396 |
+
|
| 397 |
+
def normalize(self, normalized):
|
| 398 |
+
"""
|
| 399 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 400 |
+
|
| 401 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 402 |
+
keep track of the alignment information. If you just want to see the result
|
| 403 |
+
of the normalization on a raw string, you can use
|
| 404 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 405 |
+
|
| 406 |
+
Args:
|
| 407 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 408 |
+
The normalized string on which to apply this
|
| 409 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 410 |
+
"""
|
| 411 |
+
pass
|
| 412 |
+
|
| 413 |
+
def normalize_str(self, sequence):
|
| 414 |
+
"""
|
| 415 |
+
Normalize the given string
|
| 416 |
+
|
| 417 |
+
This method provides a way to visualize the effect of a
|
| 418 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 419 |
+
information. If you need to get/convert offsets, you can use
|
| 420 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
sequence (:obj:`str`):
|
| 424 |
+
A string to normalize
|
| 425 |
+
|
| 426 |
+
Returns:
|
| 427 |
+
:obj:`str`: A string after normalization
|
| 428 |
+
"""
|
| 429 |
+
pass
|
| 430 |
+
|
| 431 |
+
class Replace(Normalizer):
|
| 432 |
+
"""
|
| 433 |
+
Replace normalizer
|
| 434 |
+
"""
|
| 435 |
+
def __init__(self, pattern, content):
|
| 436 |
+
pass
|
| 437 |
+
|
| 438 |
+
def normalize(self, normalized):
|
| 439 |
+
"""
|
| 440 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 441 |
+
|
| 442 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 443 |
+
keep track of the alignment information. If you just want to see the result
|
| 444 |
+
of the normalization on a raw string, you can use
|
| 445 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 446 |
+
|
| 447 |
+
Args:
|
| 448 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 449 |
+
The normalized string on which to apply this
|
| 450 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 451 |
+
"""
|
| 452 |
+
pass
|
| 453 |
+
|
| 454 |
+
def normalize_str(self, sequence):
|
| 455 |
+
"""
|
| 456 |
+
Normalize the given string
|
| 457 |
+
|
| 458 |
+
This method provides a way to visualize the effect of a
|
| 459 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 460 |
+
information. If you need to get/convert offsets, you can use
|
| 461 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 462 |
+
|
| 463 |
+
Args:
|
| 464 |
+
sequence (:obj:`str`):
|
| 465 |
+
A string to normalize
|
| 466 |
+
|
| 467 |
+
Returns:
|
| 468 |
+
:obj:`str`: A string after normalization
|
| 469 |
+
"""
|
| 470 |
+
pass
|
| 471 |
+
|
| 472 |
+
class Sequence(Normalizer):
|
| 473 |
+
"""
|
| 474 |
+
Allows concatenating multiple other Normalizer as a Sequence.
|
| 475 |
+
All the normalizers run in sequence in the given order
|
| 476 |
+
|
| 477 |
+
Args:
|
| 478 |
+
normalizers (:obj:`List[Normalizer]`):
|
| 479 |
+
A list of Normalizer to be run as a sequence
|
| 480 |
+
"""
|
| 481 |
+
def normalize(self, normalized):
|
| 482 |
+
"""
|
| 483 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 484 |
+
|
| 485 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 486 |
+
keep track of the alignment information. If you just want to see the result
|
| 487 |
+
of the normalization on a raw string, you can use
|
| 488 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 489 |
+
|
| 490 |
+
Args:
|
| 491 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 492 |
+
The normalized string on which to apply this
|
| 493 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 494 |
+
"""
|
| 495 |
+
pass
|
| 496 |
+
|
| 497 |
+
def normalize_str(self, sequence):
|
| 498 |
+
"""
|
| 499 |
+
Normalize the given string
|
| 500 |
+
|
| 501 |
+
This method provides a way to visualize the effect of a
|
| 502 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 503 |
+
information. If you need to get/convert offsets, you can use
|
| 504 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 505 |
+
|
| 506 |
+
Args:
|
| 507 |
+
sequence (:obj:`str`):
|
| 508 |
+
A string to normalize
|
| 509 |
+
|
| 510 |
+
Returns:
|
| 511 |
+
:obj:`str`: A string after normalization
|
| 512 |
+
"""
|
| 513 |
+
pass
|
| 514 |
+
|
| 515 |
+
class Strip(Normalizer):
|
| 516 |
+
"""
|
| 517 |
+
Strip normalizer
|
| 518 |
+
"""
|
| 519 |
+
def __init__(self, left=True, right=True):
|
| 520 |
+
pass
|
| 521 |
+
|
| 522 |
+
def normalize(self, normalized):
|
| 523 |
+
"""
|
| 524 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 525 |
+
|
| 526 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 527 |
+
keep track of the alignment information. If you just want to see the result
|
| 528 |
+
of the normalization on a raw string, you can use
|
| 529 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 530 |
+
|
| 531 |
+
Args:
|
| 532 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 533 |
+
The normalized string on which to apply this
|
| 534 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 535 |
+
"""
|
| 536 |
+
pass
|
| 537 |
+
|
| 538 |
+
def normalize_str(self, sequence):
|
| 539 |
+
"""
|
| 540 |
+
Normalize the given string
|
| 541 |
+
|
| 542 |
+
This method provides a way to visualize the effect of a
|
| 543 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 544 |
+
information. If you need to get/convert offsets, you can use
|
| 545 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 546 |
+
|
| 547 |
+
Args:
|
| 548 |
+
sequence (:obj:`str`):
|
| 549 |
+
A string to normalize
|
| 550 |
+
|
| 551 |
+
Returns:
|
| 552 |
+
:obj:`str`: A string after normalization
|
| 553 |
+
"""
|
| 554 |
+
pass
|
| 555 |
+
|
| 556 |
+
class StripAccents(Normalizer):
|
| 557 |
+
"""
|
| 558 |
+
StripAccents normalizer
|
| 559 |
+
"""
|
| 560 |
+
def __init__(self):
|
| 561 |
+
pass
|
| 562 |
+
|
| 563 |
+
def normalize(self, normalized):
|
| 564 |
+
"""
|
| 565 |
+
Normalize a :class:`~tokenizers.NormalizedString` in-place
|
| 566 |
+
|
| 567 |
+
This method allows to modify a :class:`~tokenizers.NormalizedString` to
|
| 568 |
+
keep track of the alignment information. If you just want to see the result
|
| 569 |
+
of the normalization on a raw string, you can use
|
| 570 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
|
| 571 |
+
|
| 572 |
+
Args:
|
| 573 |
+
normalized (:class:`~tokenizers.NormalizedString`):
|
| 574 |
+
The normalized string on which to apply this
|
| 575 |
+
:class:`~tokenizers.normalizers.Normalizer`
|
| 576 |
+
"""
|
| 577 |
+
pass
|
| 578 |
+
|
| 579 |
+
def normalize_str(self, sequence):
|
| 580 |
+
"""
|
| 581 |
+
Normalize the given string
|
| 582 |
+
|
| 583 |
+
This method provides a way to visualize the effect of a
|
| 584 |
+
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
|
| 585 |
+
information. If you need to get/convert offsets, you can use
|
| 586 |
+
:meth:`~tokenizers.normalizers.Normalizer.normalize`
|
| 587 |
+
|
| 588 |
+
Args:
|
| 589 |
+
sequence (:obj:`str`):
|
| 590 |
+
A string to normalize
|
| 591 |
+
|
| 592 |
+
Returns:
|
| 593 |
+
:obj:`str`: A string after normalization
|
| 594 |
+
"""
|
| 595 |
+
pass
|
parrot/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (788 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
from .. import processors
|
| 3 |
+
|
| 4 |
+
PostProcessor = processors.PostProcessor
|
| 5 |
+
BertProcessing = processors.BertProcessing
|
| 6 |
+
ByteLevel = processors.ByteLevel
|
| 7 |
+
RobertaProcessing = processors.RobertaProcessing
|
| 8 |
+
Sequence = processors.Sequence
|
| 9 |
+
TemplateProcessing = processors.TemplateProcessing
|
parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class PostProcessor:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all post-processors
|
| 5 |
+
|
| 6 |
+
This class is not supposed to be instantiated directly. Instead, any implementation of
|
| 7 |
+
a PostProcessor will return an instance of this class when instantiated.
|
| 8 |
+
"""
|
| 9 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 10 |
+
"""
|
| 11 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
is_pair (:obj:`bool`):
|
| 15 |
+
Whether the input would be a pair of sequences
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
:obj:`int`: The number of tokens to add
|
| 19 |
+
"""
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 23 |
+
"""
|
| 24 |
+
Post-process the given encodings, generating the final one
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 28 |
+
The encoding for the first sequence
|
| 29 |
+
|
| 30 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 31 |
+
The encoding for the pair sequence
|
| 32 |
+
|
| 33 |
+
add_special_tokens (:obj:`bool`):
|
| 34 |
+
Whether to add the special tokens
|
| 35 |
+
|
| 36 |
+
Return:
|
| 37 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 38 |
+
"""
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
class BertProcessing(PostProcessor):
|
| 42 |
+
"""
|
| 43 |
+
This post-processor takes care of adding the special tokens needed by
|
| 44 |
+
a Bert model:
|
| 45 |
+
|
| 46 |
+
- a SEP token
|
| 47 |
+
- a CLS token
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
sep (:obj:`Tuple[str, int]`):
|
| 51 |
+
A tuple with the string representation of the SEP token, and its id
|
| 52 |
+
|
| 53 |
+
cls (:obj:`Tuple[str, int]`):
|
| 54 |
+
A tuple with the string representation of the CLS token, and its id
|
| 55 |
+
"""
|
| 56 |
+
def __init__(self, sep, cls):
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 60 |
+
"""
|
| 61 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
is_pair (:obj:`bool`):
|
| 65 |
+
Whether the input would be a pair of sequences
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
:obj:`int`: The number of tokens to add
|
| 69 |
+
"""
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 73 |
+
"""
|
| 74 |
+
Post-process the given encodings, generating the final one
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 78 |
+
The encoding for the first sequence
|
| 79 |
+
|
| 80 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 81 |
+
The encoding for the pair sequence
|
| 82 |
+
|
| 83 |
+
add_special_tokens (:obj:`bool`):
|
| 84 |
+
Whether to add the special tokens
|
| 85 |
+
|
| 86 |
+
Return:
|
| 87 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 88 |
+
"""
|
| 89 |
+
pass
|
| 90 |
+
|
| 91 |
+
class ByteLevel(PostProcessor):
|
| 92 |
+
"""
|
| 93 |
+
This post-processor takes care of trimming the offsets.
|
| 94 |
+
|
| 95 |
+
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
|
| 96 |
+
want the offsets to include these whitespaces, then this PostProcessor must be used.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
trim_offsets (:obj:`bool`):
|
| 100 |
+
Whether to trim the whitespaces from the produced offsets.
|
| 101 |
+
"""
|
| 102 |
+
def __init__(self, trim_offsets=True):
|
| 103 |
+
pass
|
| 104 |
+
|
| 105 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 106 |
+
"""
|
| 107 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
is_pair (:obj:`bool`):
|
| 111 |
+
Whether the input would be a pair of sequences
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
:obj:`int`: The number of tokens to add
|
| 115 |
+
"""
|
| 116 |
+
pass
|
| 117 |
+
|
| 118 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 119 |
+
"""
|
| 120 |
+
Post-process the given encodings, generating the final one
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 124 |
+
The encoding for the first sequence
|
| 125 |
+
|
| 126 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 127 |
+
The encoding for the pair sequence
|
| 128 |
+
|
| 129 |
+
add_special_tokens (:obj:`bool`):
|
| 130 |
+
Whether to add the special tokens
|
| 131 |
+
|
| 132 |
+
Return:
|
| 133 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 134 |
+
"""
|
| 135 |
+
pass
|
| 136 |
+
|
| 137 |
+
class RobertaProcessing(PostProcessor):
|
| 138 |
+
"""
|
| 139 |
+
This post-processor takes care of adding the special tokens needed by
|
| 140 |
+
a Roberta model:
|
| 141 |
+
|
| 142 |
+
- a SEP token
|
| 143 |
+
- a CLS token
|
| 144 |
+
|
| 145 |
+
It also takes care of trimming the offsets.
|
| 146 |
+
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
|
| 147 |
+
want the offsets to include these whitespaces, then this PostProcessor should be initialized
|
| 148 |
+
with :obj:`trim_offsets=True`
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
sep (:obj:`Tuple[str, int]`):
|
| 152 |
+
A tuple with the string representation of the SEP token, and its id
|
| 153 |
+
|
| 154 |
+
cls (:obj:`Tuple[str, int]`):
|
| 155 |
+
A tuple with the string representation of the CLS token, and its id
|
| 156 |
+
|
| 157 |
+
trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 158 |
+
Whether to trim the whitespaces from the produced offsets.
|
| 159 |
+
|
| 160 |
+
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
| 161 |
+
Whether the add_prefix_space option was enabled during pre-tokenization. This
|
| 162 |
+
is relevant because it defines the way the offsets are trimmed out.
|
| 163 |
+
"""
|
| 164 |
+
def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True):
|
| 165 |
+
pass
|
| 166 |
+
|
| 167 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 168 |
+
"""
|
| 169 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
is_pair (:obj:`bool`):
|
| 173 |
+
Whether the input would be a pair of sequences
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
:obj:`int`: The number of tokens to add
|
| 177 |
+
"""
|
| 178 |
+
pass
|
| 179 |
+
|
| 180 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 181 |
+
"""
|
| 182 |
+
Post-process the given encodings, generating the final one
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 186 |
+
The encoding for the first sequence
|
| 187 |
+
|
| 188 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 189 |
+
The encoding for the pair sequence
|
| 190 |
+
|
| 191 |
+
add_special_tokens (:obj:`bool`):
|
| 192 |
+
Whether to add the special tokens
|
| 193 |
+
|
| 194 |
+
Return:
|
| 195 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 196 |
+
"""
|
| 197 |
+
pass
|
| 198 |
+
|
| 199 |
+
class Sequence(PostProcessor):
|
| 200 |
+
"""
|
| 201 |
+
Sequence Processor
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
processors (:obj:`List[PostProcessor]`)
|
| 205 |
+
The processors that need to be chained
|
| 206 |
+
"""
|
| 207 |
+
def __init__(self, processors):
|
| 208 |
+
pass
|
| 209 |
+
|
| 210 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 211 |
+
"""
|
| 212 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
is_pair (:obj:`bool`):
|
| 216 |
+
Whether the input would be a pair of sequences
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
:obj:`int`: The number of tokens to add
|
| 220 |
+
"""
|
| 221 |
+
pass
|
| 222 |
+
|
| 223 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 224 |
+
"""
|
| 225 |
+
Post-process the given encodings, generating the final one
|
| 226 |
+
|
| 227 |
+
Args:
|
| 228 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 229 |
+
The encoding for the first sequence
|
| 230 |
+
|
| 231 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 232 |
+
The encoding for the pair sequence
|
| 233 |
+
|
| 234 |
+
add_special_tokens (:obj:`bool`):
|
| 235 |
+
Whether to add the special tokens
|
| 236 |
+
|
| 237 |
+
Return:
|
| 238 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 239 |
+
"""
|
| 240 |
+
pass
|
| 241 |
+
|
| 242 |
+
class TemplateProcessing(PostProcessor):
|
| 243 |
+
"""
|
| 244 |
+
Provides a way to specify templates in order to add the special tokens to each
|
| 245 |
+
input sequence as relevant.
|
| 246 |
+
|
| 247 |
+
Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to
|
| 248 |
+
delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first
|
| 249 |
+
sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair
|
| 250 |
+
sequences. The final result looks like this:
|
| 251 |
+
|
| 252 |
+
- Single sequence: :obj:`[CLS] Hello there [SEP]`
|
| 253 |
+
- Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]`
|
| 254 |
+
|
| 255 |
+
With the type ids as following::
|
| 256 |
+
|
| 257 |
+
[CLS] ... [SEP] ... [SEP]
|
| 258 |
+
0 0 0 1 1
|
| 259 |
+
|
| 260 |
+
You can achieve such behavior using a TemplateProcessing::
|
| 261 |
+
|
| 262 |
+
TemplateProcessing(
|
| 263 |
+
single="[CLS] $0 [SEP]",
|
| 264 |
+
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
|
| 265 |
+
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
In this example, each input sequence is identified using a ``$`` construct. This identifier
|
| 269 |
+
lets us specify each input sequence, and the type_id to use. When nothing is specified,
|
| 270 |
+
it uses the default values. Here are the different ways to specify it:
|
| 271 |
+
|
| 272 |
+
- Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B``
|
| 273 |
+
- Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ...
|
| 274 |
+
- Specifying both: ``$A:0``, ``$B:1``, ...
|
| 275 |
+
|
| 276 |
+
The same construct is used for special tokens: ``<identifier>(:<type_id>)?``.
|
| 277 |
+
|
| 278 |
+
**Warning**: You must ensure that you are giving the correct tokens/ids as these
|
| 279 |
+
will be added to the Encoding without any further check. If the given ids correspond
|
| 280 |
+
to something totally different in a `Tokenizer` using this `PostProcessor`, it
|
| 281 |
+
might lead to unexpected results.
|
| 282 |
+
|
| 283 |
+
Args:
|
| 284 |
+
single (:obj:`Template`):
|
| 285 |
+
The template used for single sequences
|
| 286 |
+
|
| 287 |
+
pair (:obj:`Template`):
|
| 288 |
+
The template used when both sequences are specified
|
| 289 |
+
|
| 290 |
+
special_tokens (:obj:`Tokens`):
|
| 291 |
+
The list of special tokens used in each sequences
|
| 292 |
+
|
| 293 |
+
Types:
|
| 294 |
+
|
| 295 |
+
Template (:obj:`str` or :obj:`List`):
|
| 296 |
+
- If a :obj:`str` is provided, the whitespace is used as delimiter between tokens
|
| 297 |
+
- If a :obj:`List[str]` is provided, a list of tokens
|
| 298 |
+
|
| 299 |
+
Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`):
|
| 300 |
+
- A :obj:`Tuple` with both a token and its associated ID, in any order
|
| 301 |
+
- A :obj:`dict` with the following keys:
|
| 302 |
+
- "id": :obj:`str` => The special token id, as specified in the Template
|
| 303 |
+
- "ids": :obj:`List[int]` => The associated IDs
|
| 304 |
+
- "tokens": :obj:`List[str]` => The associated tokens
|
| 305 |
+
|
| 306 |
+
The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have
|
| 307 |
+
the same length.
|
| 308 |
+
"""
|
| 309 |
+
def __init__(self, single, pair, special_tokens):
|
| 310 |
+
pass
|
| 311 |
+
|
| 312 |
+
def num_special_tokens_to_add(self, is_pair):
|
| 313 |
+
"""
|
| 314 |
+
Return the number of special tokens that would be added for single/pair sentences.
|
| 315 |
+
|
| 316 |
+
Args:
|
| 317 |
+
is_pair (:obj:`bool`):
|
| 318 |
+
Whether the input would be a pair of sequences
|
| 319 |
+
|
| 320 |
+
Returns:
|
| 321 |
+
:obj:`int`: The number of tokens to add
|
| 322 |
+
"""
|
| 323 |
+
pass
|
| 324 |
+
|
| 325 |
+
def process(self, encoding, pair=None, add_special_tokens=True):
|
| 326 |
+
"""
|
| 327 |
+
Post-process the given encodings, generating the final one
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 331 |
+
The encoding for the first sequence
|
| 332 |
+
|
| 333 |
+
pair (:class:`~tokenizers.Encoding`, `optional`):
|
| 334 |
+
The encoding for the pair sequence
|
| 335 |
+
|
| 336 |
+
add_special_tokens (:obj:`bool`):
|
| 337 |
+
Whether to add the special tokens
|
| 338 |
+
|
| 339 |
+
Return:
|
| 340 |
+
:class:`~tokenizers.Encoding`: The final encoding
|
| 341 |
+
"""
|
| 342 |
+
pass
|
parrot/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (347 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/tools/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .visualizer import Annotation, EncodingVisualizer
|
parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (242 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.tokenized-text {
|
| 2 |
+
width:100%;
|
| 3 |
+
padding:2rem;
|
| 4 |
+
max-height: 400px;
|
| 5 |
+
overflow-y: auto;
|
| 6 |
+
box-sizing:border-box;
|
| 7 |
+
line-height:4rem; /* Lots of space between lines */
|
| 8 |
+
font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace;
|
| 9 |
+
box-shadow: 2px 2px 2px rgba(0,0,0,0.2);
|
| 10 |
+
background-color: rgba(0,0,0,0.01);
|
| 11 |
+
letter-spacing:2px; /* Give some extra separation between chars */
|
| 12 |
+
}
|
| 13 |
+
.non-token{
|
| 14 |
+
/* White space and other things the tokenizer ignores*/
|
| 15 |
+
white-space: pre;
|
| 16 |
+
letter-spacing:4px;
|
| 17 |
+
border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/
|
| 18 |
+
border-bottom:1px solid #A0A0A0;
|
| 19 |
+
line-height: 1rem;
|
| 20 |
+
height: calc(100% - 2px);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
.token {
|
| 24 |
+
white-space: pre;
|
| 25 |
+
position:relative;
|
| 26 |
+
color:black;
|
| 27 |
+
letter-spacing:2px;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
.annotation{
|
| 31 |
+
white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */
|
| 32 |
+
border-radius:4px;
|
| 33 |
+
position:relative;
|
| 34 |
+
width:fit-content;
|
| 35 |
+
}
|
| 36 |
+
.annotation:before {
|
| 37 |
+
/*The before holds the text and the after holds the background*/
|
| 38 |
+
z-index:1000; /* Make sure this is above the background */
|
| 39 |
+
content:attr(data-label); /* The annotations label is on a data attribute */
|
| 40 |
+
color:white;
|
| 41 |
+
position:absolute;
|
| 42 |
+
font-size:1rem;
|
| 43 |
+
text-align:center;
|
| 44 |
+
font-weight:bold;
|
| 45 |
+
|
| 46 |
+
top:1.75rem;
|
| 47 |
+
line-height:0;
|
| 48 |
+
left:0;
|
| 49 |
+
width:100%;
|
| 50 |
+
padding:0.5rem 0;
|
| 51 |
+
/* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/
|
| 52 |
+
overflow: hidden;
|
| 53 |
+
white-space: nowrap;
|
| 54 |
+
text-overflow:ellipsis;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
.annotation:after {
|
| 58 |
+
content:attr(data-label); /* The content defines the width of the annotation*/
|
| 59 |
+
position:absolute;
|
| 60 |
+
font-size:0.75rem;
|
| 61 |
+
text-align:center;
|
| 62 |
+
font-weight:bold;
|
| 63 |
+
text-overflow:ellipsis;
|
| 64 |
+
top:1.75rem;
|
| 65 |
+
line-height:0;
|
| 66 |
+
overflow: hidden;
|
| 67 |
+
white-space: nowrap;
|
| 68 |
+
|
| 69 |
+
left:0;
|
| 70 |
+
width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
|
| 71 |
+
|
| 72 |
+
padding:0.5rem 0;
|
| 73 |
+
/* Nast hack below:
|
| 74 |
+
We set the annotations color in code because we don't know the colors at css time.
|
| 75 |
+
But you can't pass a color as a data attribute to get it into the pseudo element (this thing)
|
| 76 |
+
So to get around that, annotations have the color set on them with a style attribute and then we
|
| 77 |
+
can get the color with currentColor.
|
| 78 |
+
Annotations wrap tokens and tokens set the color back to black
|
| 79 |
+
*/
|
| 80 |
+
background-color: currentColor;
|
| 81 |
+
}
|
| 82 |
+
.annotation:hover::after, .annotation:hover::before{
|
| 83 |
+
/* When the user hovers over an annotation expand the label to display in full
|
| 84 |
+
*/
|
| 85 |
+
min-width: fit-content;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
.annotation:hover{
|
| 89 |
+
/* Emphasize the annotation start end with a border on hover*/
|
| 90 |
+
border-color: currentColor;
|
| 91 |
+
border: 2px solid;
|
| 92 |
+
}
|
| 93 |
+
.special-token:not(:empty){
|
| 94 |
+
/*
|
| 95 |
+
A none empty special token is like UNK (as opposed to CLS which has no representation in the text )
|
| 96 |
+
*/
|
| 97 |
+
position:relative;
|
| 98 |
+
}
|
| 99 |
+
.special-token:empty::before{
|
| 100 |
+
/* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/
|
| 101 |
+
content:attr(data-stok);
|
| 102 |
+
background:#202020;
|
| 103 |
+
font-size:0.75rem;
|
| 104 |
+
color:white;
|
| 105 |
+
margin: 0 0.25rem;
|
| 106 |
+
padding: 0.25rem;
|
| 107 |
+
border-radius:4px
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
.special-token:not(:empty):before {
|
| 111 |
+
/* Special tokens that have text (UNK) are displayed above the actual text*/
|
| 112 |
+
content:attr(data-stok);
|
| 113 |
+
position:absolute;
|
| 114 |
+
bottom:1.75rem;
|
| 115 |
+
min-width:100%;
|
| 116 |
+
width:100%;
|
| 117 |
+
height:1rem;
|
| 118 |
+
line-height:1rem;
|
| 119 |
+
font-size:1rem;
|
| 120 |
+
text-align:center;
|
| 121 |
+
color:white;
|
| 122 |
+
font-weight:bold;
|
| 123 |
+
background:#202020;
|
| 124 |
+
border-radius:10%;
|
| 125 |
+
}
|
| 126 |
+
/*
|
| 127 |
+
We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations
|
| 128 |
+
instead we apply even and odd class at generation time and color them that way
|
| 129 |
+
*/
|
| 130 |
+
.even-token{
|
| 131 |
+
background:#DCDCDC ;
|
| 132 |
+
border: 1px solid #DCDCDC;
|
| 133 |
+
}
|
| 134 |
+
.odd-token{
|
| 135 |
+
background:#A0A0A0;
|
| 136 |
+
border: 1px solid #A0A0A0;
|
| 137 |
+
}
|
| 138 |
+
.even-token.multi-token,.odd-token.multi-token{
|
| 139 |
+
background: repeating-linear-gradient(
|
| 140 |
+
45deg,
|
| 141 |
+
transparent,
|
| 142 |
+
transparent 1px,
|
| 143 |
+
#ccc 1px,
|
| 144 |
+
#ccc 1px
|
| 145 |
+
),
|
| 146 |
+
/* on "bottom" */
|
| 147 |
+
linear-gradient(
|
| 148 |
+
to bottom,
|
| 149 |
+
#FFB6C1,
|
| 150 |
+
#999
|
| 151 |
+
);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
.multi-token:hover::after {
|
| 155 |
+
content:"This char has more than 1 token"; /* The content defines the width of the annotation*/
|
| 156 |
+
color:white;
|
| 157 |
+
background-color: black;
|
| 158 |
+
position:absolute;
|
| 159 |
+
font-size:0.75rem;
|
| 160 |
+
text-align:center;
|
| 161 |
+
font-weight:bold;
|
| 162 |
+
text-overflow:ellipsis;
|
| 163 |
+
top:1.75rem;
|
| 164 |
+
line-height:0;
|
| 165 |
+
overflow: hidden;
|
| 166 |
+
white-space: nowrap;
|
| 167 |
+
left:0;
|
| 168 |
+
width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
|
| 169 |
+
padding:0.5rem 0;
|
| 170 |
+
}
|
parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
from string import Template
|
| 5 |
+
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
from tokenizers import Encoding, Tokenizer
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
dirname = os.path.dirname(__file__)
|
| 11 |
+
css_filename = os.path.join(dirname, "visualizer-styles.css")
|
| 12 |
+
with open(css_filename) as f:
|
| 13 |
+
css = f.read()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Annotation:
|
| 17 |
+
start: int
|
| 18 |
+
end: int
|
| 19 |
+
label: int
|
| 20 |
+
|
| 21 |
+
def __init__(self, start: int, end: int, label: str):
|
| 22 |
+
self.start = start
|
| 23 |
+
self.end = end
|
| 24 |
+
self.label = label
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
AnnotationList = List[Annotation]
|
| 28 |
+
PartialIntList = List[Optional[int]]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class CharStateKey(NamedTuple):
|
| 32 |
+
token_ix: Optional[int]
|
| 33 |
+
anno_ix: Optional[int]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class CharState:
|
| 37 |
+
char_ix: Optional[int]
|
| 38 |
+
|
| 39 |
+
def __init__(self, char_ix):
|
| 40 |
+
self.char_ix = char_ix
|
| 41 |
+
|
| 42 |
+
self.anno_ix: Optional[int] = None
|
| 43 |
+
self.tokens: List[int] = []
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def token_ix(self):
|
| 47 |
+
return self.tokens[0] if len(self.tokens) > 0 else None
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def is_multitoken(self):
|
| 51 |
+
"""
|
| 52 |
+
BPE tokenizers can output more than one token for a char
|
| 53 |
+
"""
|
| 54 |
+
return len(self.tokens) > 1
|
| 55 |
+
|
| 56 |
+
def partition_key(self) -> CharStateKey:
|
| 57 |
+
return CharStateKey(
|
| 58 |
+
token_ix=self.token_ix,
|
| 59 |
+
anno_ix=self.anno_ix,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class Aligned:
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class EncodingVisualizer:
|
| 68 |
+
"""
|
| 69 |
+
Build an EncodingVisualizer
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
|
| 73 |
+
tokenizer (:class:`~tokenizers.Tokenizer`):
|
| 74 |
+
A tokenizer instance
|
| 75 |
+
|
| 76 |
+
default_to_notebook (:obj:`bool`):
|
| 77 |
+
Whether to render html output in a notebook by default
|
| 78 |
+
|
| 79 |
+
annotation_converter (:obj:`Callable`, `optional`):
|
| 80 |
+
An optional (lambda) function that takes an annotation in any format and returns
|
| 81 |
+
an Annotation object
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
tokenizer: Tokenizer,
|
| 89 |
+
default_to_notebook: bool = True,
|
| 90 |
+
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
|
| 91 |
+
):
|
| 92 |
+
if default_to_notebook:
|
| 93 |
+
try:
|
| 94 |
+
from IPython.core.display import HTML, display
|
| 95 |
+
except ImportError:
|
| 96 |
+
raise Exception(
|
| 97 |
+
"""We couldn't import IPython utils for html display.
|
| 98 |
+
Are you running in a notebook?
|
| 99 |
+
You can also pass `default_to_notebook=False` to get back raw HTML
|
| 100 |
+
"""
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
self.tokenizer = tokenizer
|
| 104 |
+
self.default_to_notebook = default_to_notebook
|
| 105 |
+
self.annotation_coverter = annotation_converter
|
| 106 |
+
pass
|
| 107 |
+
|
| 108 |
+
def __call__(
|
| 109 |
+
self,
|
| 110 |
+
text: str,
|
| 111 |
+
annotations: AnnotationList = [],
|
| 112 |
+
default_to_notebook: Optional[bool] = None,
|
| 113 |
+
) -> Optional[str]:
|
| 114 |
+
"""
|
| 115 |
+
Build a visualization of the given text
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
text (:obj:`str`):
|
| 119 |
+
The text to tokenize
|
| 120 |
+
|
| 121 |
+
annotations (:obj:`List[Annotation]`, `optional`):
|
| 122 |
+
An optional list of annotations of the text. The can either be an annotation class
|
| 123 |
+
or anything else if you instantiated the visualizer with a converter function
|
| 124 |
+
|
| 125 |
+
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
|
| 126 |
+
If True, will render the html in a notebook. Otherwise returns an html string.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
The HTML string if default_to_notebook is False, otherwise (default) returns None and
|
| 130 |
+
renders the HTML in the notebook
|
| 131 |
+
|
| 132 |
+
"""
|
| 133 |
+
final_default_to_notebook = self.default_to_notebook
|
| 134 |
+
if default_to_notebook is not None:
|
| 135 |
+
final_default_to_notebook = default_to_notebook
|
| 136 |
+
if final_default_to_notebook:
|
| 137 |
+
try:
|
| 138 |
+
from IPython.core.display import HTML, display
|
| 139 |
+
except ImportError:
|
| 140 |
+
raise Exception(
|
| 141 |
+
"""We couldn't import IPython utils for html display.
|
| 142 |
+
Are you running in a notebook?"""
|
| 143 |
+
)
|
| 144 |
+
if self.annotation_coverter is not None:
|
| 145 |
+
annotations = list(map(self.annotation_coverter, annotations))
|
| 146 |
+
encoding = self.tokenizer.encode(text)
|
| 147 |
+
html = EncodingVisualizer.__make_html(text, encoding, annotations)
|
| 148 |
+
if final_default_to_notebook:
|
| 149 |
+
display(HTML(html))
|
| 150 |
+
else:
|
| 151 |
+
return html
|
| 152 |
+
|
| 153 |
+
@staticmethod
|
| 154 |
+
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
|
| 155 |
+
"""
|
| 156 |
+
Generates a color palette for all the labels in a given set of annotations
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
annotations (:obj:`Annotation`):
|
| 160 |
+
A list of annotations
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
:obj:`dict`: A dictionary mapping labels to colors in HSL format
|
| 164 |
+
"""
|
| 165 |
+
if len(annotations) == 0:
|
| 166 |
+
return {}
|
| 167 |
+
labels = set(map(lambda x: x.label, annotations))
|
| 168 |
+
num_labels = len(labels)
|
| 169 |
+
h_step = int(255 / num_labels)
|
| 170 |
+
if h_step < 20:
|
| 171 |
+
h_step = 20
|
| 172 |
+
s = 32
|
| 173 |
+
l = 64 # noqa: E741
|
| 174 |
+
h = 10
|
| 175 |
+
colors = {}
|
| 176 |
+
|
| 177 |
+
for label in sorted(labels): # sort so we always get the same colors for a given set of labels
|
| 178 |
+
colors[label] = f"hsl({h},{s}%,{l}%"
|
| 179 |
+
h += h_step
|
| 180 |
+
return colors
|
| 181 |
+
|
| 182 |
+
@staticmethod
|
| 183 |
+
def consecutive_chars_to_html(
|
| 184 |
+
consecutive_chars_list: List[CharState],
|
| 185 |
+
text: str,
|
| 186 |
+
encoding: Encoding,
|
| 187 |
+
):
|
| 188 |
+
"""
|
| 189 |
+
Converts a list of "consecutive chars" into a single HTML element.
|
| 190 |
+
Chars are consecutive if they fall under the same word, token and annotation.
|
| 191 |
+
The CharState class is a named tuple with a "partition_key" method that makes it easy to
|
| 192 |
+
compare if two chars are consecutive.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
consecutive_chars_list (:obj:`List[CharState]`):
|
| 196 |
+
A list of CharStates that have been grouped together
|
| 197 |
+
|
| 198 |
+
text (:obj:`str`):
|
| 199 |
+
The original text being processed
|
| 200 |
+
|
| 201 |
+
encoding (:class:`~tokenizers.Encoding`):
|
| 202 |
+
The encoding returned from the tokenizer
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
:obj:`str`: The HTML span for a set of consecutive chars
|
| 206 |
+
"""
|
| 207 |
+
first = consecutive_chars_list[0]
|
| 208 |
+
if first.char_ix is None:
|
| 209 |
+
# its a special token
|
| 210 |
+
stoken = encoding.tokens[first.token_ix]
|
| 211 |
+
# special tokens are represented as empty spans. We use the data attribute and css
|
| 212 |
+
# magic to display it
|
| 213 |
+
return f'<span class="special-token" data-stoken={stoken}></span>'
|
| 214 |
+
# We're not in a special token so this group has a start and end.
|
| 215 |
+
last = consecutive_chars_list[-1]
|
| 216 |
+
start = first.char_ix
|
| 217 |
+
end = last.char_ix + 1
|
| 218 |
+
span_text = text[start:end]
|
| 219 |
+
css_classes = [] # What css classes will we apply on the resulting span
|
| 220 |
+
data_items = {} # What data attributes will we apply on the result span
|
| 221 |
+
if first.token_ix is not None:
|
| 222 |
+
# We can either be in a token or not (e.g. in white space)
|
| 223 |
+
css_classes.append("token")
|
| 224 |
+
if first.is_multitoken:
|
| 225 |
+
css_classes.append("multi-token")
|
| 226 |
+
if first.token_ix % 2:
|
| 227 |
+
# We use this to color alternating tokens.
|
| 228 |
+
# A token might be split by an annotation that ends in the middle of it, so this
|
| 229 |
+
# lets us visually indicate a consecutive token despite its possible splitting in
|
| 230 |
+
# the html markup
|
| 231 |
+
css_classes.append("odd-token")
|
| 232 |
+
else:
|
| 233 |
+
# Like above, but a different color so we can see the tokens alternate
|
| 234 |
+
css_classes.append("even-token")
|
| 235 |
+
if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None:
|
| 236 |
+
# This is a special token that is in the text. probably UNK
|
| 237 |
+
css_classes.append("special-token")
|
| 238 |
+
# TODO is this the right name for the data attribute ?
|
| 239 |
+
data_items["stok"] = encoding.tokens[first.token_ix]
|
| 240 |
+
else:
|
| 241 |
+
# In this case we are looking at a group/single char that is not tokenized.
|
| 242 |
+
# e.g. white space
|
| 243 |
+
css_classes.append("non-token")
|
| 244 |
+
css = f'''class="{' '.join(css_classes)}"'''
|
| 245 |
+
data = ""
|
| 246 |
+
for key, val in data_items.items():
|
| 247 |
+
data += f' data-{key}="{val}"'
|
| 248 |
+
return f"<span {css} {data} >{span_text}</span>"
|
| 249 |
+
|
| 250 |
+
@staticmethod
|
| 251 |
+
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
|
| 252 |
+
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
|
| 253 |
+
current_consecutive_chars = [char_states[0]]
|
| 254 |
+
prev_anno_ix = char_states[0].anno_ix
|
| 255 |
+
spans = []
|
| 256 |
+
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
|
| 257 |
+
cur_anno_ix = char_states[0].anno_ix
|
| 258 |
+
if cur_anno_ix is not None:
|
| 259 |
+
# If we started in an annotation make a span for it
|
| 260 |
+
anno = annotations[cur_anno_ix]
|
| 261 |
+
label = anno.label
|
| 262 |
+
color = label_colors_dict[label]
|
| 263 |
+
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
|
| 264 |
+
|
| 265 |
+
for cs in char_states[1:]:
|
| 266 |
+
cur_anno_ix = cs.anno_ix
|
| 267 |
+
if cur_anno_ix != prev_anno_ix:
|
| 268 |
+
# If we've transitioned in or out of an annotation
|
| 269 |
+
spans.append(
|
| 270 |
+
# Create a span from the current consecutive characters
|
| 271 |
+
EncodingVisualizer.consecutive_chars_to_html(
|
| 272 |
+
current_consecutive_chars,
|
| 273 |
+
text=text,
|
| 274 |
+
encoding=encoding,
|
| 275 |
+
)
|
| 276 |
+
)
|
| 277 |
+
current_consecutive_chars = [cs]
|
| 278 |
+
|
| 279 |
+
if prev_anno_ix is not None:
|
| 280 |
+
# if we transitioned out of an annotation close it's span
|
| 281 |
+
spans.append("</span>")
|
| 282 |
+
if cur_anno_ix is not None:
|
| 283 |
+
# If we entered a new annotation make a span for it
|
| 284 |
+
anno = annotations[cur_anno_ix]
|
| 285 |
+
label = anno.label
|
| 286 |
+
color = label_colors_dict[label]
|
| 287 |
+
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
|
| 288 |
+
prev_anno_ix = cur_anno_ix
|
| 289 |
+
|
| 290 |
+
if cs.partition_key() == current_consecutive_chars[0].partition_key():
|
| 291 |
+
# If the current charchter is in the same "group" as the previous one
|
| 292 |
+
current_consecutive_chars.append(cs)
|
| 293 |
+
else:
|
| 294 |
+
# Otherwise we make a span for the previous group
|
| 295 |
+
spans.append(
|
| 296 |
+
EncodingVisualizer.consecutive_chars_to_html(
|
| 297 |
+
current_consecutive_chars,
|
| 298 |
+
text=text,
|
| 299 |
+
encoding=encoding,
|
| 300 |
+
)
|
| 301 |
+
)
|
| 302 |
+
# An reset the consecutive_char_list to form a new group
|
| 303 |
+
current_consecutive_chars = [cs]
|
| 304 |
+
# All that's left is to fill out the final span
|
| 305 |
+
# TODO I think there is an edge case here where an annotation's span might not close
|
| 306 |
+
spans.append(
|
| 307 |
+
EncodingVisualizer.consecutive_chars_to_html(
|
| 308 |
+
current_consecutive_chars,
|
| 309 |
+
text=text,
|
| 310 |
+
encoding=encoding,
|
| 311 |
+
)
|
| 312 |
+
)
|
| 313 |
+
res = HTMLBody(spans) # Send the list of spans to the body of our html
|
| 314 |
+
return res
|
| 315 |
+
|
| 316 |
+
@staticmethod
|
| 317 |
+
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
|
| 318 |
+
"""
|
| 319 |
+
Args:
|
| 320 |
+
text (:obj:`str`):
|
| 321 |
+
The raw text we want to align to
|
| 322 |
+
|
| 323 |
+
annotations (:obj:`AnnotationList`):
|
| 324 |
+
A (possibly empty) list of annotations
|
| 325 |
+
|
| 326 |
+
Returns:
|
| 327 |
+
A list of length len(text) whose entry at index i is None if there is no annotation on
|
| 328 |
+
charachter i or k, the index of the annotation that covers index i where k is with
|
| 329 |
+
respect to the list of annotations
|
| 330 |
+
"""
|
| 331 |
+
annotation_map = [None] * len(text)
|
| 332 |
+
for anno_ix, a in enumerate(annotations):
|
| 333 |
+
for i in range(a.start, a.end):
|
| 334 |
+
annotation_map[i] = anno_ix
|
| 335 |
+
return annotation_map
|
| 336 |
+
|
| 337 |
+
@staticmethod
|
| 338 |
+
def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]:
|
| 339 |
+
"""
|
| 340 |
+
For each character in the original text, we emit a tuple representing it's "state":
|
| 341 |
+
|
| 342 |
+
* which token_ix it corresponds to
|
| 343 |
+
* which word_ix it corresponds to
|
| 344 |
+
* which annotation_ix it corresponds to
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
text (:obj:`str`):
|
| 348 |
+
The raw text we want to align to
|
| 349 |
+
|
| 350 |
+
annotations (:obj:`List[Annotation]`):
|
| 351 |
+
A (possibly empty) list of annotations
|
| 352 |
+
|
| 353 |
+
encoding: (:class:`~tokenizers.Encoding`):
|
| 354 |
+
The encoding returned from the tokenizer
|
| 355 |
+
|
| 356 |
+
Returns:
|
| 357 |
+
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
|
| 358 |
+
it's state is
|
| 359 |
+
"""
|
| 360 |
+
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
|
| 361 |
+
# Todo make this a dataclass or named tuple
|
| 362 |
+
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
|
| 363 |
+
for token_ix, token in enumerate(encoding.tokens):
|
| 364 |
+
offsets = encoding.token_to_chars(token_ix)
|
| 365 |
+
if offsets is not None:
|
| 366 |
+
start, end = offsets
|
| 367 |
+
for i in range(start, end):
|
| 368 |
+
char_states[i].tokens.append(token_ix)
|
| 369 |
+
for char_ix, anno_ix in enumerate(annotation_map):
|
| 370 |
+
char_states[char_ix].anno_ix = anno_ix
|
| 371 |
+
|
| 372 |
+
return char_states
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def HTMLBody(children: List[str], css_styles=css) -> str:
|
| 376 |
+
"""
|
| 377 |
+
Generates the full html with css from a list of html spans
|
| 378 |
+
|
| 379 |
+
Args:
|
| 380 |
+
children (:obj:`List[str]`):
|
| 381 |
+
A list of strings, assumed to be html elements
|
| 382 |
+
|
| 383 |
+
css_styles (:obj:`str`, `optional`):
|
| 384 |
+
Optional alternative implementation of the css
|
| 385 |
+
|
| 386 |
+
Returns:
|
| 387 |
+
:obj:`str`: An HTML string with style markup
|
| 388 |
+
"""
|
| 389 |
+
children_text = "".join(children)
|
| 390 |
+
return f"""
|
| 391 |
+
<html>
|
| 392 |
+
<head>
|
| 393 |
+
<style>
|
| 394 |
+
{css_styles}
|
| 395 |
+
</style>
|
| 396 |
+
</head>
|
| 397 |
+
<body>
|
| 398 |
+
<div class="tokenized-text" dir=auto>
|
| 399 |
+
{children_text}
|
| 400 |
+
</div>
|
| 401 |
+
</body>
|
| 402 |
+
</html>
|
| 403 |
+
"""
|
parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
from .. import trainers
|
| 3 |
+
|
| 4 |
+
Trainer = trainers.Trainer
|
| 5 |
+
BpeTrainer = trainers.BpeTrainer
|
| 6 |
+
UnigramTrainer = trainers.UnigramTrainer
|
| 7 |
+
WordLevelTrainer = trainers.WordLevelTrainer
|
| 8 |
+
WordPieceTrainer = trainers.WordPieceTrainer
|
parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
class Trainer:
|
| 3 |
+
"""
|
| 4 |
+
Base class for all trainers
|
| 5 |
+
|
| 6 |
+
This class is not supposed to be instantiated directly. Instead, any implementation of a
|
| 7 |
+
Trainer will return an instance of this class when instantiated.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
class BpeTrainer(Trainer):
|
| 11 |
+
"""
|
| 12 |
+
Trainer capable of training a BPE model
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
vocab_size (:obj:`int`, `optional`):
|
| 16 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 17 |
+
|
| 18 |
+
min_frequency (:obj:`int`, `optional`):
|
| 19 |
+
The minimum frequency a pair should have in order to be merged.
|
| 20 |
+
|
| 21 |
+
show_progress (:obj:`bool`, `optional`):
|
| 22 |
+
Whether to show progress bars while training.
|
| 23 |
+
|
| 24 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
|
| 25 |
+
A list of special tokens the model should know of.
|
| 26 |
+
|
| 27 |
+
limit_alphabet (:obj:`int`, `optional`):
|
| 28 |
+
The maximum different characters to keep in the alphabet.
|
| 29 |
+
|
| 30 |
+
initial_alphabet (:obj:`List[str]`, `optional`):
|
| 31 |
+
A list of characters to include in the initial alphabet, even
|
| 32 |
+
if not seen in the training dataset.
|
| 33 |
+
If the strings contain more than one character, only the first one
|
| 34 |
+
is kept.
|
| 35 |
+
|
| 36 |
+
continuing_subword_prefix (:obj:`str`, `optional`):
|
| 37 |
+
A prefix to be used for every subword that is not a beginning-of-word.
|
| 38 |
+
|
| 39 |
+
end_of_word_suffix (:obj:`str`, `optional`):
|
| 40 |
+
A suffix to be used for every subword that is a end-of-word.
|
| 41 |
+
|
| 42 |
+
max_token_length (:obj:`int`, `optional`):
|
| 43 |
+
Prevents creating tokens longer than the specified size.
|
| 44 |
+
This can help with reducing polluting your vocabulary with
|
| 45 |
+
highly repetitive tokens like `======` for wikipedia
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
class UnigramTrainer(Trainer):
|
| 50 |
+
"""
|
| 51 |
+
Trainer capable of training a Unigram model
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
vocab_size (:obj:`int`):
|
| 55 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 56 |
+
|
| 57 |
+
show_progress (:obj:`bool`):
|
| 58 |
+
Whether to show progress bars while training.
|
| 59 |
+
|
| 60 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`):
|
| 61 |
+
A list of special tokens the model should know of.
|
| 62 |
+
|
| 63 |
+
initial_alphabet (:obj:`List[str]`):
|
| 64 |
+
A list of characters to include in the initial alphabet, even
|
| 65 |
+
if not seen in the training dataset.
|
| 66 |
+
If the strings contain more than one character, only the first one
|
| 67 |
+
is kept.
|
| 68 |
+
|
| 69 |
+
shrinking_factor (:obj:`float`):
|
| 70 |
+
The shrinking factor used at each step of the training to prune the
|
| 71 |
+
vocabulary.
|
| 72 |
+
|
| 73 |
+
unk_token (:obj:`str`):
|
| 74 |
+
The token used for out-of-vocabulary tokens.
|
| 75 |
+
|
| 76 |
+
max_piece_length (:obj:`int`):
|
| 77 |
+
The maximum length of a given token.
|
| 78 |
+
|
| 79 |
+
n_sub_iterations (:obj:`int`):
|
| 80 |
+
The number of iterations of the EM algorithm to perform before
|
| 81 |
+
pruning the vocabulary.
|
| 82 |
+
"""
|
| 83 |
+
def __init__(
|
| 84 |
+
self,
|
| 85 |
+
vocab_size=8000,
|
| 86 |
+
show_progress=True,
|
| 87 |
+
special_tokens=[],
|
| 88 |
+
shrinking_factor=0.75,
|
| 89 |
+
unk_token=None,
|
| 90 |
+
max_piece_length=16,
|
| 91 |
+
n_sub_iterations=2,
|
| 92 |
+
):
|
| 93 |
+
pass
|
| 94 |
+
|
| 95 |
+
class WordLevelTrainer(Trainer):
|
| 96 |
+
"""
|
| 97 |
+
Trainer capable of training a WorldLevel model
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
vocab_size (:obj:`int`, `optional`):
|
| 101 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 102 |
+
|
| 103 |
+
min_frequency (:obj:`int`, `optional`):
|
| 104 |
+
The minimum frequency a pair should have in order to be merged.
|
| 105 |
+
|
| 106 |
+
show_progress (:obj:`bool`, `optional`):
|
| 107 |
+
Whether to show progress bars while training.
|
| 108 |
+
|
| 109 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`):
|
| 110 |
+
A list of special tokens the model should know of.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
class WordPieceTrainer(Trainer):
|
| 114 |
+
"""
|
| 115 |
+
Trainer capable of training a WordPiece model
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
vocab_size (:obj:`int`, `optional`):
|
| 119 |
+
The size of the final vocabulary, including all tokens and alphabet.
|
| 120 |
+
|
| 121 |
+
min_frequency (:obj:`int`, `optional`):
|
| 122 |
+
The minimum frequency a pair should have in order to be merged.
|
| 123 |
+
|
| 124 |
+
show_progress (:obj:`bool`, `optional`):
|
| 125 |
+
Whether to show progress bars while training.
|
| 126 |
+
|
| 127 |
+
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
|
| 128 |
+
A list of special tokens the model should know of.
|
| 129 |
+
|
| 130 |
+
limit_alphabet (:obj:`int`, `optional`):
|
| 131 |
+
The maximum different characters to keep in the alphabet.
|
| 132 |
+
|
| 133 |
+
initial_alphabet (:obj:`List[str]`, `optional`):
|
| 134 |
+
A list of characters to include in the initial alphabet, even
|
| 135 |
+
if not seen in the training dataset.
|
| 136 |
+
If the strings contain more than one character, only the first one
|
| 137 |
+
is kept.
|
| 138 |
+
|
| 139 |
+
continuing_subword_prefix (:obj:`str`, `optional`):
|
| 140 |
+
A prefix to be used for every subword that is not a beginning-of-word.
|
| 141 |
+
|
| 142 |
+
end_of_word_suffix (:obj:`str`, `optional`):
|
| 143 |
+
A suffix to be used for every subword that is a end-of-word.
|
| 144 |
+
"""
|
| 145 |
+
def __init__(
|
| 146 |
+
self,
|
| 147 |
+
vocab_size=30000,
|
| 148 |
+
min_frequency=0,
|
| 149 |
+
show_progress=True,
|
| 150 |
+
special_tokens=[],
|
| 151 |
+
limit_alphabet=None,
|
| 152 |
+
initial_alphabet=[],
|
| 153 |
+
continuing_subword_prefix="##",
|
| 154 |
+
end_of_word_suffix=None,
|
| 155 |
+
):
|
| 156 |
+
pass
|
parrot/lib/python3.10/site-packages/tokenizers/trainers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (317 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/__init__.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import warnings
|
| 3 |
+
from modulefinder import Module
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
# Don't re-order these, we need to load the _C extension (done when importing
|
| 8 |
+
# .extensions) before entering _meta_registrations.
|
| 9 |
+
from .extension import _HAS_OPS # usort:skip
|
| 10 |
+
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils # usort:skip
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
from .version import __version__ # noqa: F401
|
| 14 |
+
except ImportError:
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Check if torchvision is being imported within the root folder
|
| 19 |
+
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
|
| 20 |
+
os.path.realpath(os.getcwd()), "torchvision"
|
| 21 |
+
):
|
| 22 |
+
message = (
|
| 23 |
+
"You are importing torchvision within its own root folder ({}). "
|
| 24 |
+
"This is not expected to work and may give errors. Please exit the "
|
| 25 |
+
"torchvision project source and relaunch your python interpreter."
|
| 26 |
+
)
|
| 27 |
+
warnings.warn(message.format(os.getcwd()))
|
| 28 |
+
|
| 29 |
+
_image_backend = "PIL"
|
| 30 |
+
|
| 31 |
+
_video_backend = "pyav"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def set_image_backend(backend):
|
| 35 |
+
"""
|
| 36 |
+
Specifies the package used to load images.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
|
| 40 |
+
The :mod:`accimage` package uses the Intel IPP library. It is
|
| 41 |
+
generally faster than PIL, but does not support as many operations.
|
| 42 |
+
"""
|
| 43 |
+
global _image_backend
|
| 44 |
+
if backend not in ["PIL", "accimage"]:
|
| 45 |
+
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
|
| 46 |
+
_image_backend = backend
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def get_image_backend():
|
| 50 |
+
"""
|
| 51 |
+
Gets the name of the package used to load images
|
| 52 |
+
"""
|
| 53 |
+
return _image_backend
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def set_video_backend(backend):
|
| 57 |
+
"""
|
| 58 |
+
Specifies the package used to decode videos.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
|
| 62 |
+
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
|
| 63 |
+
binding for the FFmpeg libraries.
|
| 64 |
+
The :mod:`video_reader` package includes a native C++ implementation on
|
| 65 |
+
top of FFMPEG libraries, and a python API of TorchScript custom operator.
|
| 66 |
+
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
|
| 67 |
+
|
| 68 |
+
.. note::
|
| 69 |
+
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
|
| 70 |
+
backend, please compile torchvision from source.
|
| 71 |
+
"""
|
| 72 |
+
global _video_backend
|
| 73 |
+
if backend not in ["pyav", "video_reader", "cuda"]:
|
| 74 |
+
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
|
| 75 |
+
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
|
| 76 |
+
# TODO: better messages
|
| 77 |
+
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
|
| 78 |
+
raise RuntimeError(message)
|
| 79 |
+
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
|
| 80 |
+
# TODO: better messages
|
| 81 |
+
message = "cuda video backend is not available."
|
| 82 |
+
raise RuntimeError(message)
|
| 83 |
+
else:
|
| 84 |
+
_video_backend = backend
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def get_video_backend():
|
| 88 |
+
"""
|
| 89 |
+
Returns the currently active video backend used to decode videos.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
str: Name of the video backend. one of {'pyav', 'video_reader'}.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
return _video_backend
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _is_tracing():
|
| 99 |
+
return torch._C._get_tracing_state()
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def disable_beta_transforms_warning():
|
| 103 |
+
# Noop, only exists to avoid breaking existing code.
|
| 104 |
+
# See https://github.com/pytorch/vision/issues/7896
|
| 105 |
+
pass
|
parrot/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib.machinery
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from torch.hub import _get_torch_home
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
_HOME = os.path.join(_get_torch_home(), "datasets", "vision")
|
| 8 |
+
_USE_SHARDED_DATASETS = False
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _download_file_from_remote_location(fpath: str, url: str) -> None:
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _is_remote_location_available() -> bool:
|
| 16 |
+
return False
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
from torch.hub import load_state_dict_from_url # noqa: 401
|
| 21 |
+
except ImportError:
|
| 22 |
+
from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _get_extension_path(lib_name):
|
| 26 |
+
|
| 27 |
+
lib_dir = os.path.dirname(__file__)
|
| 28 |
+
if os.name == "nt":
|
| 29 |
+
# Register the main torchvision library location on the default DLL path
|
| 30 |
+
import ctypes
|
| 31 |
+
|
| 32 |
+
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
|
| 33 |
+
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
|
| 34 |
+
prev_error_mode = kernel32.SetErrorMode(0x0001)
|
| 35 |
+
|
| 36 |
+
if with_load_library_flags:
|
| 37 |
+
kernel32.AddDllDirectory.restype = ctypes.c_void_p
|
| 38 |
+
|
| 39 |
+
os.add_dll_directory(lib_dir)
|
| 40 |
+
|
| 41 |
+
kernel32.SetErrorMode(prev_error_mode)
|
| 42 |
+
|
| 43 |
+
loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)
|
| 44 |
+
|
| 45 |
+
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
|
| 46 |
+
ext_specs = extfinder.find_spec(lib_name)
|
| 47 |
+
if ext_specs is None:
|
| 48 |
+
raise ImportError
|
| 49 |
+
|
| 50 |
+
return ext_specs.origin
|
parrot/lib/python3.10/site-packages/torchvision/_meta_registrations.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch._custom_ops
|
| 5 |
+
import torch.library
|
| 6 |
+
|
| 7 |
+
# Ensure that torch.ops.torchvision is visible
|
| 8 |
+
import torchvision.extension # noqa: F401
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@functools.lru_cache(None)
|
| 12 |
+
def get_meta_lib():
|
| 13 |
+
return torch.library.Library("torchvision", "IMPL", "Meta")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def register_meta(op_name, overload_name="default"):
|
| 17 |
+
def wrapper(fn):
|
| 18 |
+
if torchvision.extension._has_ops():
|
| 19 |
+
get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn)
|
| 20 |
+
return fn
|
| 21 |
+
|
| 22 |
+
return wrapper
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@register_meta("roi_align")
|
| 26 |
+
def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
|
| 27 |
+
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
|
| 28 |
+
torch._check(
|
| 29 |
+
input.dtype == rois.dtype,
|
| 30 |
+
lambda: (
|
| 31 |
+
"Expected tensor for input to have the same type as tensor for rois; "
|
| 32 |
+
f"but type {input.dtype} does not equal {rois.dtype}"
|
| 33 |
+
),
|
| 34 |
+
)
|
| 35 |
+
num_rois = rois.size(0)
|
| 36 |
+
channels = input.size(1)
|
| 37 |
+
return input.new_empty((num_rois, channels, pooled_height, pooled_width))
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@register_meta("_roi_align_backward")
|
| 41 |
+
def meta_roi_align_backward(
|
| 42 |
+
grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned
|
| 43 |
+
):
|
| 44 |
+
torch._check(
|
| 45 |
+
grad.dtype == rois.dtype,
|
| 46 |
+
lambda: (
|
| 47 |
+
"Expected tensor for grad to have the same type as tensor for rois; "
|
| 48 |
+
f"but type {grad.dtype} does not equal {rois.dtype}"
|
| 49 |
+
),
|
| 50 |
+
)
|
| 51 |
+
return grad.new_empty((batch_size, channels, height, width))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@register_meta("ps_roi_align")
|
| 55 |
+
def meta_ps_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio):
|
| 56 |
+
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
|
| 57 |
+
torch._check(
|
| 58 |
+
input.dtype == rois.dtype,
|
| 59 |
+
lambda: (
|
| 60 |
+
"Expected tensor for input to have the same type as tensor for rois; "
|
| 61 |
+
f"but type {input.dtype} does not equal {rois.dtype}"
|
| 62 |
+
),
|
| 63 |
+
)
|
| 64 |
+
channels = input.size(1)
|
| 65 |
+
torch._check(
|
| 66 |
+
channels % (pooled_height * pooled_width) == 0,
|
| 67 |
+
"input channels must be a multiple of pooling height * pooling width",
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
num_rois = rois.size(0)
|
| 71 |
+
out_size = (num_rois, channels // (pooled_height * pooled_width), pooled_height, pooled_width)
|
| 72 |
+
return input.new_empty(out_size), torch.empty(out_size, dtype=torch.int32, device="meta")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@register_meta("_ps_roi_align_backward")
|
| 76 |
+
def meta_ps_roi_align_backward(
|
| 77 |
+
grad,
|
| 78 |
+
rois,
|
| 79 |
+
channel_mapping,
|
| 80 |
+
spatial_scale,
|
| 81 |
+
pooled_height,
|
| 82 |
+
pooled_width,
|
| 83 |
+
sampling_ratio,
|
| 84 |
+
batch_size,
|
| 85 |
+
channels,
|
| 86 |
+
height,
|
| 87 |
+
width,
|
| 88 |
+
):
|
| 89 |
+
torch._check(
|
| 90 |
+
grad.dtype == rois.dtype,
|
| 91 |
+
lambda: (
|
| 92 |
+
"Expected tensor for grad to have the same type as tensor for rois; "
|
| 93 |
+
f"but type {grad.dtype} does not equal {rois.dtype}"
|
| 94 |
+
),
|
| 95 |
+
)
|
| 96 |
+
return grad.new_empty((batch_size, channels, height, width))
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@register_meta("roi_pool")
|
| 100 |
+
def meta_roi_pool(input, rois, spatial_scale, pooled_height, pooled_width):
|
| 101 |
+
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
|
| 102 |
+
torch._check(
|
| 103 |
+
input.dtype == rois.dtype,
|
| 104 |
+
lambda: (
|
| 105 |
+
"Expected tensor for input to have the same type as tensor for rois; "
|
| 106 |
+
f"but type {input.dtype} does not equal {rois.dtype}"
|
| 107 |
+
),
|
| 108 |
+
)
|
| 109 |
+
num_rois = rois.size(0)
|
| 110 |
+
channels = input.size(1)
|
| 111 |
+
out_size = (num_rois, channels, pooled_height, pooled_width)
|
| 112 |
+
return input.new_empty(out_size), torch.empty(out_size, device="meta", dtype=torch.int32)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@register_meta("_roi_pool_backward")
|
| 116 |
+
def meta_roi_pool_backward(
|
| 117 |
+
grad, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width
|
| 118 |
+
):
|
| 119 |
+
torch._check(
|
| 120 |
+
grad.dtype == rois.dtype,
|
| 121 |
+
lambda: (
|
| 122 |
+
"Expected tensor for grad to have the same type as tensor for rois; "
|
| 123 |
+
f"but type {grad.dtype} does not equal {rois.dtype}"
|
| 124 |
+
),
|
| 125 |
+
)
|
| 126 |
+
return grad.new_empty((batch_size, channels, height, width))
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
@register_meta("ps_roi_pool")
|
| 130 |
+
def meta_ps_roi_pool(input, rois, spatial_scale, pooled_height, pooled_width):
|
| 131 |
+
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
|
| 132 |
+
torch._check(
|
| 133 |
+
input.dtype == rois.dtype,
|
| 134 |
+
lambda: (
|
| 135 |
+
"Expected tensor for input to have the same type as tensor for rois; "
|
| 136 |
+
f"but type {input.dtype} does not equal {rois.dtype}"
|
| 137 |
+
),
|
| 138 |
+
)
|
| 139 |
+
channels = input.size(1)
|
| 140 |
+
torch._check(
|
| 141 |
+
channels % (pooled_height * pooled_width) == 0,
|
| 142 |
+
"input channels must be a multiple of pooling height * pooling width",
|
| 143 |
+
)
|
| 144 |
+
num_rois = rois.size(0)
|
| 145 |
+
out_size = (num_rois, channels // (pooled_height * pooled_width), pooled_height, pooled_width)
|
| 146 |
+
return input.new_empty(out_size), torch.empty(out_size, device="meta", dtype=torch.int32)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@register_meta("_ps_roi_pool_backward")
|
| 150 |
+
def meta_ps_roi_pool_backward(
|
| 151 |
+
grad, rois, channel_mapping, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width
|
| 152 |
+
):
|
| 153 |
+
torch._check(
|
| 154 |
+
grad.dtype == rois.dtype,
|
| 155 |
+
lambda: (
|
| 156 |
+
"Expected tensor for grad to have the same type as tensor for rois; "
|
| 157 |
+
f"but type {grad.dtype} does not equal {rois.dtype}"
|
| 158 |
+
),
|
| 159 |
+
)
|
| 160 |
+
return grad.new_empty((batch_size, channels, height, width))
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
@torch.library.register_fake("torchvision::nms")
|
| 164 |
+
def meta_nms(dets, scores, iou_threshold):
|
| 165 |
+
torch._check(dets.dim() == 2, lambda: f"boxes should be a 2d tensor, got {dets.dim()}D")
|
| 166 |
+
torch._check(dets.size(1) == 4, lambda: f"boxes should have 4 elements in dimension 1, got {dets.size(1)}")
|
| 167 |
+
torch._check(scores.dim() == 1, lambda: f"scores should be a 1d tensor, got {scores.dim()}")
|
| 168 |
+
torch._check(
|
| 169 |
+
dets.size(0) == scores.size(0),
|
| 170 |
+
lambda: f"boxes and scores should have same number of elements in dimension 0, got {dets.size(0)} and {scores.size(0)}",
|
| 171 |
+
)
|
| 172 |
+
ctx = torch._custom_ops.get_ctx()
|
| 173 |
+
num_to_keep = ctx.create_unbacked_symint()
|
| 174 |
+
return dets.new_empty(num_to_keep, dtype=torch.long)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@register_meta("deform_conv2d")
|
| 178 |
+
def meta_deform_conv2d(
|
| 179 |
+
input,
|
| 180 |
+
weight,
|
| 181 |
+
offset,
|
| 182 |
+
mask,
|
| 183 |
+
bias,
|
| 184 |
+
stride_h,
|
| 185 |
+
stride_w,
|
| 186 |
+
pad_h,
|
| 187 |
+
pad_w,
|
| 188 |
+
dil_h,
|
| 189 |
+
dil_w,
|
| 190 |
+
n_weight_grps,
|
| 191 |
+
n_offset_grps,
|
| 192 |
+
use_mask,
|
| 193 |
+
):
|
| 194 |
+
|
| 195 |
+
out_height, out_width = offset.shape[-2:]
|
| 196 |
+
out_channels = weight.shape[0]
|
| 197 |
+
batch_size = input.shape[0]
|
| 198 |
+
return input.new_empty((batch_size, out_channels, out_height, out_width))
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
@register_meta("_deform_conv2d_backward")
|
| 202 |
+
def meta_deform_conv2d_backward(
|
| 203 |
+
grad,
|
| 204 |
+
input,
|
| 205 |
+
weight,
|
| 206 |
+
offset,
|
| 207 |
+
mask,
|
| 208 |
+
bias,
|
| 209 |
+
stride_h,
|
| 210 |
+
stride_w,
|
| 211 |
+
pad_h,
|
| 212 |
+
pad_w,
|
| 213 |
+
dilation_h,
|
| 214 |
+
dilation_w,
|
| 215 |
+
groups,
|
| 216 |
+
offset_groups,
|
| 217 |
+
use_mask,
|
| 218 |
+
):
|
| 219 |
+
|
| 220 |
+
grad_input = input.new_empty(input.shape)
|
| 221 |
+
grad_weight = weight.new_empty(weight.shape)
|
| 222 |
+
grad_offset = offset.new_empty(offset.shape)
|
| 223 |
+
grad_mask = mask.new_empty(mask.shape)
|
| 224 |
+
grad_bias = bias.new_empty(bias.shape)
|
| 225 |
+
return grad_input, grad_weight, grad_offset, grad_mask, grad_bias
|
parrot/lib/python3.10/site-packages/torchvision/_utils.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
from typing import Sequence, Type, TypeVar
|
| 3 |
+
|
| 4 |
+
T = TypeVar("T", bound=enum.Enum)
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class StrEnumMeta(enum.EnumMeta):
|
| 8 |
+
auto = enum.auto
|
| 9 |
+
|
| 10 |
+
def from_str(self: Type[T], member: str) -> T: # type: ignore[misc]
|
| 11 |
+
try:
|
| 12 |
+
return self[member]
|
| 13 |
+
except KeyError:
|
| 14 |
+
# TODO: use `add_suggestion` from torchvision.prototype.utils._internal to improve the error message as
|
| 15 |
+
# soon as it is migrated.
|
| 16 |
+
raise ValueError(f"Unknown value '{member}' for {self.__name__}.") from None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class StrEnum(enum.Enum, metaclass=StrEnumMeta):
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def sequence_to_str(seq: Sequence, separate_last: str = "") -> str:
|
| 24 |
+
if not seq:
|
| 25 |
+
return ""
|
| 26 |
+
if len(seq) == 1:
|
| 27 |
+
return f"'{seq[0]}'"
|
| 28 |
+
|
| 29 |
+
head = "'" + "', '".join([str(item) for item in seq[:-1]]) + "'"
|
| 30 |
+
tail = f"{'' if separate_last and len(seq) == 2 else ','} {separate_last}'{seq[-1]}'"
|
| 31 |
+
|
| 32 |
+
return head + tail
|
parrot/lib/python3.10/site-packages/torchvision/extension.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from ._internally_replaced_utils import _get_extension_path
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
_HAS_OPS = False
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _has_ops():
|
| 13 |
+
return False
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
# On Windows Python-3.8.x has `os.add_dll_directory` call,
|
| 18 |
+
# which is called to configure dll search path.
|
| 19 |
+
# To find cuda related dlls we need to make sure the
|
| 20 |
+
# conda environment/bin path is configured Please take a look:
|
| 21 |
+
# https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
|
| 22 |
+
# Please note: if some path can't be added using add_dll_directory we simply ignore this path
|
| 23 |
+
if os.name == "nt" and sys.version_info < (3, 9):
|
| 24 |
+
env_path = os.environ["PATH"]
|
| 25 |
+
path_arr = env_path.split(";")
|
| 26 |
+
for path in path_arr:
|
| 27 |
+
if os.path.exists(path):
|
| 28 |
+
try:
|
| 29 |
+
os.add_dll_directory(path) # type: ignore[attr-defined]
|
| 30 |
+
except Exception:
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
lib_path = _get_extension_path("_C")
|
| 34 |
+
torch.ops.load_library(lib_path)
|
| 35 |
+
_HAS_OPS = True
|
| 36 |
+
|
| 37 |
+
def _has_ops(): # noqa: F811
|
| 38 |
+
return True
|
| 39 |
+
|
| 40 |
+
except (ImportError, OSError):
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _assert_has_ops():
|
| 45 |
+
if not _has_ops():
|
| 46 |
+
raise RuntimeError(
|
| 47 |
+
"Couldn't load custom C++ ops. This can happen if your PyTorch and "
|
| 48 |
+
"torchvision versions are incompatible, or if you had errors while compiling "
|
| 49 |
+
"torchvision from source. For further information on the compatible versions, check "
|
| 50 |
+
"https://github.com/pytorch/vision#installation for the compatibility matrix. "
|
| 51 |
+
"Please check your PyTorch version with torch.__version__ and your torchvision "
|
| 52 |
+
"version with torchvision.__version__ and verify if they are compatible, and if not "
|
| 53 |
+
"please reinstall torchvision so that it matches your PyTorch install."
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _check_cuda_version():
|
| 58 |
+
"""
|
| 59 |
+
Make sure that CUDA versions match between the pytorch install and torchvision install
|
| 60 |
+
"""
|
| 61 |
+
if not _HAS_OPS:
|
| 62 |
+
return -1
|
| 63 |
+
from torch.version import cuda as torch_version_cuda
|
| 64 |
+
|
| 65 |
+
_version = torch.ops.torchvision._cuda_version()
|
| 66 |
+
if _version != -1 and torch_version_cuda is not None:
|
| 67 |
+
tv_version = str(_version)
|
| 68 |
+
if int(tv_version) < 10000:
|
| 69 |
+
tv_major = int(tv_version[0])
|
| 70 |
+
tv_minor = int(tv_version[2])
|
| 71 |
+
else:
|
| 72 |
+
tv_major = int(tv_version[0:2])
|
| 73 |
+
tv_minor = int(tv_version[3])
|
| 74 |
+
t_version = torch_version_cuda.split(".")
|
| 75 |
+
t_major = int(t_version[0])
|
| 76 |
+
t_minor = int(t_version[1])
|
| 77 |
+
if t_major != tv_major:
|
| 78 |
+
raise RuntimeError(
|
| 79 |
+
"Detected that PyTorch and torchvision were compiled with different CUDA major versions. "
|
| 80 |
+
f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has "
|
| 81 |
+
f"CUDA Version={tv_major}.{tv_minor}. "
|
| 82 |
+
"Please reinstall the torchvision that matches your PyTorch install."
|
| 83 |
+
)
|
| 84 |
+
return _version
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _load_library(lib_name):
|
| 88 |
+
lib_path = _get_extension_path(lib_name)
|
| 89 |
+
torch.ops.load_library(lib_path)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
_check_cuda_version()
|
parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.37 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_load_gpu_decoder.cpython-310.pyc
ADDED
|
Binary file (335 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_video_opt.cpython-310.pyc
ADDED
|
Binary file (15.8 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/image.cpython-310.pyc
ADDED
|
Binary file (12.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video_reader.cpython-310.pyc
ADDED
|
Binary file (10 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/io/_video_opt.py
ADDED
|
@@ -0,0 +1,512 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import warnings
|
| 3 |
+
from fractions import Fraction
|
| 4 |
+
from typing import Dict, List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ..extension import _load_library
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
_load_library("video_reader")
|
| 13 |
+
_HAS_VIDEO_OPT = True
|
| 14 |
+
except (ImportError, OSError):
|
| 15 |
+
_HAS_VIDEO_OPT = False
|
| 16 |
+
|
| 17 |
+
default_timebase = Fraction(0, 1)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# simple class for torch scripting
|
| 21 |
+
# the complex Fraction class from fractions module is not scriptable
|
| 22 |
+
class Timebase:
|
| 23 |
+
__annotations__ = {"numerator": int, "denominator": int}
|
| 24 |
+
__slots__ = ["numerator", "denominator"]
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
numerator: int,
|
| 29 |
+
denominator: int,
|
| 30 |
+
) -> None:
|
| 31 |
+
self.numerator = numerator
|
| 32 |
+
self.denominator = denominator
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class VideoMetaData:
|
| 36 |
+
__annotations__ = {
|
| 37 |
+
"has_video": bool,
|
| 38 |
+
"video_timebase": Timebase,
|
| 39 |
+
"video_duration": float,
|
| 40 |
+
"video_fps": float,
|
| 41 |
+
"has_audio": bool,
|
| 42 |
+
"audio_timebase": Timebase,
|
| 43 |
+
"audio_duration": float,
|
| 44 |
+
"audio_sample_rate": float,
|
| 45 |
+
}
|
| 46 |
+
__slots__ = [
|
| 47 |
+
"has_video",
|
| 48 |
+
"video_timebase",
|
| 49 |
+
"video_duration",
|
| 50 |
+
"video_fps",
|
| 51 |
+
"has_audio",
|
| 52 |
+
"audio_timebase",
|
| 53 |
+
"audio_duration",
|
| 54 |
+
"audio_sample_rate",
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
def __init__(self) -> None:
|
| 58 |
+
self.has_video = False
|
| 59 |
+
self.video_timebase = Timebase(0, 1)
|
| 60 |
+
self.video_duration = 0.0
|
| 61 |
+
self.video_fps = 0.0
|
| 62 |
+
self.has_audio = False
|
| 63 |
+
self.audio_timebase = Timebase(0, 1)
|
| 64 |
+
self.audio_duration = 0.0
|
| 65 |
+
self.audio_sample_rate = 0.0
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _validate_pts(pts_range: Tuple[int, int]) -> None:
|
| 69 |
+
|
| 70 |
+
if pts_range[0] > pts_range[1] > 0:
|
| 71 |
+
raise ValueError(
|
| 72 |
+
f"Start pts should not be smaller than end pts, got start pts: {pts_range[0]} and end pts: {pts_range[1]}"
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _fill_info(
|
| 77 |
+
vtimebase: torch.Tensor,
|
| 78 |
+
vfps: torch.Tensor,
|
| 79 |
+
vduration: torch.Tensor,
|
| 80 |
+
atimebase: torch.Tensor,
|
| 81 |
+
asample_rate: torch.Tensor,
|
| 82 |
+
aduration: torch.Tensor,
|
| 83 |
+
) -> VideoMetaData:
|
| 84 |
+
"""
|
| 85 |
+
Build update VideoMetaData struct with info about the video
|
| 86 |
+
"""
|
| 87 |
+
meta = VideoMetaData()
|
| 88 |
+
if vtimebase.numel() > 0:
|
| 89 |
+
meta.video_timebase = Timebase(int(vtimebase[0].item()), int(vtimebase[1].item()))
|
| 90 |
+
timebase = vtimebase[0].item() / float(vtimebase[1].item())
|
| 91 |
+
if vduration.numel() > 0:
|
| 92 |
+
meta.has_video = True
|
| 93 |
+
meta.video_duration = float(vduration.item()) * timebase
|
| 94 |
+
if vfps.numel() > 0:
|
| 95 |
+
meta.video_fps = float(vfps.item())
|
| 96 |
+
if atimebase.numel() > 0:
|
| 97 |
+
meta.audio_timebase = Timebase(int(atimebase[0].item()), int(atimebase[1].item()))
|
| 98 |
+
timebase = atimebase[0].item() / float(atimebase[1].item())
|
| 99 |
+
if aduration.numel() > 0:
|
| 100 |
+
meta.has_audio = True
|
| 101 |
+
meta.audio_duration = float(aduration.item()) * timebase
|
| 102 |
+
if asample_rate.numel() > 0:
|
| 103 |
+
meta.audio_sample_rate = float(asample_rate.item())
|
| 104 |
+
|
| 105 |
+
return meta
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _align_audio_frames(
|
| 109 |
+
aframes: torch.Tensor, aframe_pts: torch.Tensor, audio_pts_range: Tuple[int, int]
|
| 110 |
+
) -> torch.Tensor:
|
| 111 |
+
start, end = aframe_pts[0], aframe_pts[-1]
|
| 112 |
+
num_samples = aframes.size(0)
|
| 113 |
+
step_per_aframe = float(end - start + 1) / float(num_samples)
|
| 114 |
+
s_idx = 0
|
| 115 |
+
e_idx = num_samples
|
| 116 |
+
if start < audio_pts_range[0]:
|
| 117 |
+
s_idx = int((audio_pts_range[0] - start) / step_per_aframe)
|
| 118 |
+
if audio_pts_range[1] != -1 and end > audio_pts_range[1]:
|
| 119 |
+
e_idx = int((audio_pts_range[1] - end) / step_per_aframe)
|
| 120 |
+
return aframes[s_idx:e_idx, :]
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _read_video_from_file(
|
| 124 |
+
filename: str,
|
| 125 |
+
seek_frame_margin: float = 0.25,
|
| 126 |
+
read_video_stream: bool = True,
|
| 127 |
+
video_width: int = 0,
|
| 128 |
+
video_height: int = 0,
|
| 129 |
+
video_min_dimension: int = 0,
|
| 130 |
+
video_max_dimension: int = 0,
|
| 131 |
+
video_pts_range: Tuple[int, int] = (0, -1),
|
| 132 |
+
video_timebase: Fraction = default_timebase,
|
| 133 |
+
read_audio_stream: bool = True,
|
| 134 |
+
audio_samples: int = 0,
|
| 135 |
+
audio_channels: int = 0,
|
| 136 |
+
audio_pts_range: Tuple[int, int] = (0, -1),
|
| 137 |
+
audio_timebase: Fraction = default_timebase,
|
| 138 |
+
) -> Tuple[torch.Tensor, torch.Tensor, VideoMetaData]:
|
| 139 |
+
"""
|
| 140 |
+
Reads a video from a file, returning both the video frames and the audio frames
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
filename (str): path to the video file
|
| 144 |
+
seek_frame_margin (double, optional): seeking frame in the stream is imprecise. Thus,
|
| 145 |
+
when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds
|
| 146 |
+
read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0
|
| 147 |
+
video_width/video_height/video_min_dimension/video_max_dimension (int): together decide
|
| 148 |
+
the size of decoded frames:
|
| 149 |
+
|
| 150 |
+
- When video_width = 0, video_height = 0, video_min_dimension = 0,
|
| 151 |
+
and video_max_dimension = 0, keep the original frame resolution
|
| 152 |
+
- When video_width = 0, video_height = 0, video_min_dimension != 0,
|
| 153 |
+
and video_max_dimension = 0, keep the aspect ratio and resize the
|
| 154 |
+
frame so that shorter edge size is video_min_dimension
|
| 155 |
+
- When video_width = 0, video_height = 0, video_min_dimension = 0,
|
| 156 |
+
and video_max_dimension != 0, keep the aspect ratio and resize
|
| 157 |
+
the frame so that longer edge size is video_max_dimension
|
| 158 |
+
- When video_width = 0, video_height = 0, video_min_dimension != 0,
|
| 159 |
+
and video_max_dimension != 0, resize the frame so that shorter
|
| 160 |
+
edge size is video_min_dimension, and longer edge size is
|
| 161 |
+
video_max_dimension. The aspect ratio may not be preserved
|
| 162 |
+
- When video_width = 0, video_height != 0, video_min_dimension = 0,
|
| 163 |
+
and video_max_dimension = 0, keep the aspect ratio and resize
|
| 164 |
+
the frame so that frame video_height is $video_height
|
| 165 |
+
- When video_width != 0, video_height == 0, video_min_dimension = 0,
|
| 166 |
+
and video_max_dimension = 0, keep the aspect ratio and resize
|
| 167 |
+
the frame so that frame video_width is $video_width
|
| 168 |
+
- When video_width != 0, video_height != 0, video_min_dimension = 0,
|
| 169 |
+
and video_max_dimension = 0, resize the frame so that frame
|
| 170 |
+
video_width and video_height are set to $video_width and
|
| 171 |
+
$video_height, respectively
|
| 172 |
+
video_pts_range (list(int), optional): the start and end presentation timestamp of video stream
|
| 173 |
+
video_timebase (Fraction, optional): a Fraction rational number which denotes timebase in video stream
|
| 174 |
+
read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0
|
| 175 |
+
audio_samples (int, optional): audio sampling rate
|
| 176 |
+
audio_channels (int optional): audio channels
|
| 177 |
+
audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream
|
| 178 |
+
audio_timebase (Fraction, optional): a Fraction rational number which denotes time base in audio stream
|
| 179 |
+
|
| 180 |
+
Returns
|
| 181 |
+
vframes (Tensor[T, H, W, C]): the `T` video frames
|
| 182 |
+
aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and
|
| 183 |
+
`K` is the number of audio_channels
|
| 184 |
+
info (Dict): metadata for the video and audio. Can contain the fields video_fps (float)
|
| 185 |
+
and audio_fps (int)
|
| 186 |
+
"""
|
| 187 |
+
_validate_pts(video_pts_range)
|
| 188 |
+
_validate_pts(audio_pts_range)
|
| 189 |
+
|
| 190 |
+
result = torch.ops.video_reader.read_video_from_file(
|
| 191 |
+
filename,
|
| 192 |
+
seek_frame_margin,
|
| 193 |
+
0, # getPtsOnly
|
| 194 |
+
read_video_stream,
|
| 195 |
+
video_width,
|
| 196 |
+
video_height,
|
| 197 |
+
video_min_dimension,
|
| 198 |
+
video_max_dimension,
|
| 199 |
+
video_pts_range[0],
|
| 200 |
+
video_pts_range[1],
|
| 201 |
+
video_timebase.numerator,
|
| 202 |
+
video_timebase.denominator,
|
| 203 |
+
read_audio_stream,
|
| 204 |
+
audio_samples,
|
| 205 |
+
audio_channels,
|
| 206 |
+
audio_pts_range[0],
|
| 207 |
+
audio_pts_range[1],
|
| 208 |
+
audio_timebase.numerator,
|
| 209 |
+
audio_timebase.denominator,
|
| 210 |
+
)
|
| 211 |
+
vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result
|
| 212 |
+
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
|
| 213 |
+
if aframes.numel() > 0:
|
| 214 |
+
# when audio stream is found
|
| 215 |
+
aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
|
| 216 |
+
return vframes, aframes, info
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def _read_video_timestamps_from_file(filename: str) -> Tuple[List[int], List[int], VideoMetaData]:
|
| 220 |
+
"""
|
| 221 |
+
Decode all video- and audio frames in the video. Only pts
|
| 222 |
+
(presentation timestamp) is returned. The actual frame pixel data is not
|
| 223 |
+
copied. Thus, it is much faster than read_video(...)
|
| 224 |
+
"""
|
| 225 |
+
result = torch.ops.video_reader.read_video_from_file(
|
| 226 |
+
filename,
|
| 227 |
+
0, # seek_frame_margin
|
| 228 |
+
1, # getPtsOnly
|
| 229 |
+
1, # read_video_stream
|
| 230 |
+
0, # video_width
|
| 231 |
+
0, # video_height
|
| 232 |
+
0, # video_min_dimension
|
| 233 |
+
0, # video_max_dimension
|
| 234 |
+
0, # video_start_pts
|
| 235 |
+
-1, # video_end_pts
|
| 236 |
+
0, # video_timebase_num
|
| 237 |
+
1, # video_timebase_den
|
| 238 |
+
1, # read_audio_stream
|
| 239 |
+
0, # audio_samples
|
| 240 |
+
0, # audio_channels
|
| 241 |
+
0, # audio_start_pts
|
| 242 |
+
-1, # audio_end_pts
|
| 243 |
+
0, # audio_timebase_num
|
| 244 |
+
1, # audio_timebase_den
|
| 245 |
+
)
|
| 246 |
+
_vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result
|
| 247 |
+
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
|
| 248 |
+
|
| 249 |
+
vframe_pts = vframe_pts.numpy().tolist()
|
| 250 |
+
aframe_pts = aframe_pts.numpy().tolist()
|
| 251 |
+
return vframe_pts, aframe_pts, info
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def _probe_video_from_file(filename: str) -> VideoMetaData:
|
| 255 |
+
"""
|
| 256 |
+
Probe a video file and return VideoMetaData with info about the video
|
| 257 |
+
"""
|
| 258 |
+
result = torch.ops.video_reader.probe_video_from_file(filename)
|
| 259 |
+
vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
|
| 260 |
+
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
|
| 261 |
+
return info
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _read_video_from_memory(
|
| 265 |
+
video_data: torch.Tensor,
|
| 266 |
+
seek_frame_margin: float = 0.25,
|
| 267 |
+
read_video_stream: int = 1,
|
| 268 |
+
video_width: int = 0,
|
| 269 |
+
video_height: int = 0,
|
| 270 |
+
video_min_dimension: int = 0,
|
| 271 |
+
video_max_dimension: int = 0,
|
| 272 |
+
video_pts_range: Tuple[int, int] = (0, -1),
|
| 273 |
+
video_timebase_numerator: int = 0,
|
| 274 |
+
video_timebase_denominator: int = 1,
|
| 275 |
+
read_audio_stream: int = 1,
|
| 276 |
+
audio_samples: int = 0,
|
| 277 |
+
audio_channels: int = 0,
|
| 278 |
+
audio_pts_range: Tuple[int, int] = (0, -1),
|
| 279 |
+
audio_timebase_numerator: int = 0,
|
| 280 |
+
audio_timebase_denominator: int = 1,
|
| 281 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 282 |
+
"""
|
| 283 |
+
Reads a video from memory, returning both the video frames as the audio frames
|
| 284 |
+
This function is torchscriptable.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
video_data (data type could be 1) torch.Tensor, dtype=torch.int8 or 2) python bytes):
|
| 288 |
+
compressed video content stored in either 1) torch.Tensor 2) python bytes
|
| 289 |
+
seek_frame_margin (double, optional): seeking frame in the stream is imprecise.
|
| 290 |
+
Thus, when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds
|
| 291 |
+
read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0
|
| 292 |
+
video_width/video_height/video_min_dimension/video_max_dimension (int): together decide
|
| 293 |
+
the size of decoded frames:
|
| 294 |
+
|
| 295 |
+
- When video_width = 0, video_height = 0, video_min_dimension = 0,
|
| 296 |
+
and video_max_dimension = 0, keep the original frame resolution
|
| 297 |
+
- When video_width = 0, video_height = 0, video_min_dimension != 0,
|
| 298 |
+
and video_max_dimension = 0, keep the aspect ratio and resize the
|
| 299 |
+
frame so that shorter edge size is video_min_dimension
|
| 300 |
+
- When video_width = 0, video_height = 0, video_min_dimension = 0,
|
| 301 |
+
and video_max_dimension != 0, keep the aspect ratio and resize
|
| 302 |
+
the frame so that longer edge size is video_max_dimension
|
| 303 |
+
- When video_width = 0, video_height = 0, video_min_dimension != 0,
|
| 304 |
+
and video_max_dimension != 0, resize the frame so that shorter
|
| 305 |
+
edge size is video_min_dimension, and longer edge size is
|
| 306 |
+
video_max_dimension. The aspect ratio may not be preserved
|
| 307 |
+
- When video_width = 0, video_height != 0, video_min_dimension = 0,
|
| 308 |
+
and video_max_dimension = 0, keep the aspect ratio and resize
|
| 309 |
+
the frame so that frame video_height is $video_height
|
| 310 |
+
- When video_width != 0, video_height == 0, video_min_dimension = 0,
|
| 311 |
+
and video_max_dimension = 0, keep the aspect ratio and resize
|
| 312 |
+
the frame so that frame video_width is $video_width
|
| 313 |
+
- When video_width != 0, video_height != 0, video_min_dimension = 0,
|
| 314 |
+
and video_max_dimension = 0, resize the frame so that frame
|
| 315 |
+
video_width and video_height are set to $video_width and
|
| 316 |
+
$video_height, respectively
|
| 317 |
+
video_pts_range (list(int), optional): the start and end presentation timestamp of video stream
|
| 318 |
+
video_timebase_numerator / video_timebase_denominator (float, optional): a rational
|
| 319 |
+
number which denotes timebase in video stream
|
| 320 |
+
read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0
|
| 321 |
+
audio_samples (int, optional): audio sampling rate
|
| 322 |
+
audio_channels (int optional): audio audio_channels
|
| 323 |
+
audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream
|
| 324 |
+
audio_timebase_numerator / audio_timebase_denominator (float, optional):
|
| 325 |
+
a rational number which denotes time base in audio stream
|
| 326 |
+
|
| 327 |
+
Returns:
|
| 328 |
+
vframes (Tensor[T, H, W, C]): the `T` video frames
|
| 329 |
+
aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and
|
| 330 |
+
`K` is the number of channels
|
| 331 |
+
"""
|
| 332 |
+
|
| 333 |
+
_validate_pts(video_pts_range)
|
| 334 |
+
_validate_pts(audio_pts_range)
|
| 335 |
+
|
| 336 |
+
if not isinstance(video_data, torch.Tensor):
|
| 337 |
+
with warnings.catch_warnings():
|
| 338 |
+
# Ignore the warning because we actually don't modify the buffer in this function
|
| 339 |
+
warnings.filterwarnings("ignore", message="The given buffer is not writable")
|
| 340 |
+
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
|
| 341 |
+
|
| 342 |
+
result = torch.ops.video_reader.read_video_from_memory(
|
| 343 |
+
video_data,
|
| 344 |
+
seek_frame_margin,
|
| 345 |
+
0, # getPtsOnly
|
| 346 |
+
read_video_stream,
|
| 347 |
+
video_width,
|
| 348 |
+
video_height,
|
| 349 |
+
video_min_dimension,
|
| 350 |
+
video_max_dimension,
|
| 351 |
+
video_pts_range[0],
|
| 352 |
+
video_pts_range[1],
|
| 353 |
+
video_timebase_numerator,
|
| 354 |
+
video_timebase_denominator,
|
| 355 |
+
read_audio_stream,
|
| 356 |
+
audio_samples,
|
| 357 |
+
audio_channels,
|
| 358 |
+
audio_pts_range[0],
|
| 359 |
+
audio_pts_range[1],
|
| 360 |
+
audio_timebase_numerator,
|
| 361 |
+
audio_timebase_denominator,
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result
|
| 365 |
+
|
| 366 |
+
if aframes.numel() > 0:
|
| 367 |
+
# when audio stream is found
|
| 368 |
+
aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
|
| 369 |
+
|
| 370 |
+
return vframes, aframes
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def _read_video_timestamps_from_memory(
|
| 374 |
+
video_data: torch.Tensor,
|
| 375 |
+
) -> Tuple[List[int], List[int], VideoMetaData]:
|
| 376 |
+
"""
|
| 377 |
+
Decode all frames in the video. Only pts (presentation timestamp) is returned.
|
| 378 |
+
The actual frame pixel data is not copied. Thus, read_video_timestamps(...)
|
| 379 |
+
is much faster than read_video(...)
|
| 380 |
+
"""
|
| 381 |
+
if not isinstance(video_data, torch.Tensor):
|
| 382 |
+
with warnings.catch_warnings():
|
| 383 |
+
# Ignore the warning because we actually don't modify the buffer in this function
|
| 384 |
+
warnings.filterwarnings("ignore", message="The given buffer is not writable")
|
| 385 |
+
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
|
| 386 |
+
result = torch.ops.video_reader.read_video_from_memory(
|
| 387 |
+
video_data,
|
| 388 |
+
0, # seek_frame_margin
|
| 389 |
+
1, # getPtsOnly
|
| 390 |
+
1, # read_video_stream
|
| 391 |
+
0, # video_width
|
| 392 |
+
0, # video_height
|
| 393 |
+
0, # video_min_dimension
|
| 394 |
+
0, # video_max_dimension
|
| 395 |
+
0, # video_start_pts
|
| 396 |
+
-1, # video_end_pts
|
| 397 |
+
0, # video_timebase_num
|
| 398 |
+
1, # video_timebase_den
|
| 399 |
+
1, # read_audio_stream
|
| 400 |
+
0, # audio_samples
|
| 401 |
+
0, # audio_channels
|
| 402 |
+
0, # audio_start_pts
|
| 403 |
+
-1, # audio_end_pts
|
| 404 |
+
0, # audio_timebase_num
|
| 405 |
+
1, # audio_timebase_den
|
| 406 |
+
)
|
| 407 |
+
_vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result
|
| 408 |
+
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
|
| 409 |
+
|
| 410 |
+
vframe_pts = vframe_pts.numpy().tolist()
|
| 411 |
+
aframe_pts = aframe_pts.numpy().tolist()
|
| 412 |
+
return vframe_pts, aframe_pts, info
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def _probe_video_from_memory(
|
| 416 |
+
video_data: torch.Tensor,
|
| 417 |
+
) -> VideoMetaData:
|
| 418 |
+
"""
|
| 419 |
+
Probe a video in memory and return VideoMetaData with info about the video
|
| 420 |
+
This function is torchscriptable
|
| 421 |
+
"""
|
| 422 |
+
if not isinstance(video_data, torch.Tensor):
|
| 423 |
+
with warnings.catch_warnings():
|
| 424 |
+
# Ignore the warning because we actually don't modify the buffer in this function
|
| 425 |
+
warnings.filterwarnings("ignore", message="The given buffer is not writable")
|
| 426 |
+
video_data = torch.frombuffer(video_data, dtype=torch.uint8)
|
| 427 |
+
result = torch.ops.video_reader.probe_video_from_memory(video_data)
|
| 428 |
+
vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
|
| 429 |
+
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
|
| 430 |
+
return info
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def _read_video(
|
| 434 |
+
filename: str,
|
| 435 |
+
start_pts: Union[float, Fraction] = 0,
|
| 436 |
+
end_pts: Optional[Union[float, Fraction]] = None,
|
| 437 |
+
pts_unit: str = "pts",
|
| 438 |
+
) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, float]]:
|
| 439 |
+
if end_pts is None:
|
| 440 |
+
end_pts = float("inf")
|
| 441 |
+
|
| 442 |
+
if pts_unit == "pts":
|
| 443 |
+
warnings.warn(
|
| 444 |
+
"The pts_unit 'pts' gives wrong results and will be removed in a "
|
| 445 |
+
+ "follow-up version. Please use pts_unit 'sec'."
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
info = _probe_video_from_file(filename)
|
| 449 |
+
|
| 450 |
+
has_video = info.has_video
|
| 451 |
+
has_audio = info.has_audio
|
| 452 |
+
|
| 453 |
+
def get_pts(time_base):
|
| 454 |
+
start_offset = start_pts
|
| 455 |
+
end_offset = end_pts
|
| 456 |
+
if pts_unit == "sec":
|
| 457 |
+
start_offset = int(math.floor(start_pts * (1 / time_base)))
|
| 458 |
+
if end_offset != float("inf"):
|
| 459 |
+
end_offset = int(math.ceil(end_pts * (1 / time_base)))
|
| 460 |
+
if end_offset == float("inf"):
|
| 461 |
+
end_offset = -1
|
| 462 |
+
return start_offset, end_offset
|
| 463 |
+
|
| 464 |
+
video_pts_range = (0, -1)
|
| 465 |
+
video_timebase = default_timebase
|
| 466 |
+
if has_video:
|
| 467 |
+
video_timebase = Fraction(info.video_timebase.numerator, info.video_timebase.denominator)
|
| 468 |
+
video_pts_range = get_pts(video_timebase)
|
| 469 |
+
|
| 470 |
+
audio_pts_range = (0, -1)
|
| 471 |
+
audio_timebase = default_timebase
|
| 472 |
+
if has_audio:
|
| 473 |
+
audio_timebase = Fraction(info.audio_timebase.numerator, info.audio_timebase.denominator)
|
| 474 |
+
audio_pts_range = get_pts(audio_timebase)
|
| 475 |
+
|
| 476 |
+
vframes, aframes, info = _read_video_from_file(
|
| 477 |
+
filename,
|
| 478 |
+
read_video_stream=True,
|
| 479 |
+
video_pts_range=video_pts_range,
|
| 480 |
+
video_timebase=video_timebase,
|
| 481 |
+
read_audio_stream=True,
|
| 482 |
+
audio_pts_range=audio_pts_range,
|
| 483 |
+
audio_timebase=audio_timebase,
|
| 484 |
+
)
|
| 485 |
+
_info = {}
|
| 486 |
+
if has_video:
|
| 487 |
+
_info["video_fps"] = info.video_fps
|
| 488 |
+
if has_audio:
|
| 489 |
+
_info["audio_fps"] = info.audio_sample_rate
|
| 490 |
+
|
| 491 |
+
return vframes, aframes, _info
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def _read_video_timestamps(
|
| 495 |
+
filename: str, pts_unit: str = "pts"
|
| 496 |
+
) -> Tuple[Union[List[int], List[Fraction]], Optional[float]]:
|
| 497 |
+
if pts_unit == "pts":
|
| 498 |
+
warnings.warn(
|
| 499 |
+
"The pts_unit 'pts' gives wrong results and will be removed in a "
|
| 500 |
+
+ "follow-up version. Please use pts_unit 'sec'."
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
pts: Union[List[int], List[Fraction]]
|
| 504 |
+
pts, _, info = _read_video_timestamps_from_file(filename)
|
| 505 |
+
|
| 506 |
+
if pts_unit == "sec":
|
| 507 |
+
video_time_base = Fraction(info.video_timebase.numerator, info.video_timebase.denominator)
|
| 508 |
+
pts = [x * video_time_base for x in pts]
|
| 509 |
+
|
| 510 |
+
video_fps = info.video_fps if info.has_video else None
|
| 511 |
+
|
| 512 |
+
return pts, video_fps
|
parrot/lib/python3.10/site-packages/torchvision/ops/_utils.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn, Tensor
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor:
|
| 8 |
+
"""
|
| 9 |
+
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
|
| 10 |
+
"""
|
| 11 |
+
# TODO add back the assert
|
| 12 |
+
# assert isinstance(tensors, (list, tuple))
|
| 13 |
+
if len(tensors) == 1:
|
| 14 |
+
return tensors[0]
|
| 15 |
+
return torch.cat(tensors, dim)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
|
| 19 |
+
concat_boxes = _cat([b for b in boxes], dim=0)
|
| 20 |
+
temp = []
|
| 21 |
+
for i, b in enumerate(boxes):
|
| 22 |
+
temp.append(torch.full_like(b[:, :1], i))
|
| 23 |
+
ids = _cat(temp, dim=0)
|
| 24 |
+
rois = torch.cat([ids, concat_boxes], dim=1)
|
| 25 |
+
return rois
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
|
| 29 |
+
if isinstance(boxes, (list, tuple)):
|
| 30 |
+
for _tensor in boxes:
|
| 31 |
+
torch._assert(
|
| 32 |
+
_tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]"
|
| 33 |
+
)
|
| 34 |
+
elif isinstance(boxes, torch.Tensor):
|
| 35 |
+
torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]")
|
| 36 |
+
else:
|
| 37 |
+
torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]")
|
| 38 |
+
return
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def split_normalization_params(
|
| 42 |
+
model: nn.Module, norm_classes: Optional[List[type]] = None
|
| 43 |
+
) -> Tuple[List[Tensor], List[Tensor]]:
|
| 44 |
+
# Adapted from https://github.com/facebookresearch/ClassyVision/blob/659d7f78/classy_vision/generic/util.py#L501
|
| 45 |
+
if not norm_classes:
|
| 46 |
+
norm_classes = [
|
| 47 |
+
nn.modules.batchnorm._BatchNorm,
|
| 48 |
+
nn.LayerNorm,
|
| 49 |
+
nn.GroupNorm,
|
| 50 |
+
nn.modules.instancenorm._InstanceNorm,
|
| 51 |
+
nn.LocalResponseNorm,
|
| 52 |
+
]
|
| 53 |
+
|
| 54 |
+
for t in norm_classes:
|
| 55 |
+
if not issubclass(t, nn.Module):
|
| 56 |
+
raise ValueError(f"Class {t} is not a subclass of nn.Module.")
|
| 57 |
+
|
| 58 |
+
classes = tuple(norm_classes)
|
| 59 |
+
|
| 60 |
+
norm_params = []
|
| 61 |
+
other_params = []
|
| 62 |
+
for module in model.modules():
|
| 63 |
+
if next(module.children(), None):
|
| 64 |
+
other_params.extend(p for p in module.parameters(recurse=False) if p.requires_grad)
|
| 65 |
+
elif isinstance(module, classes):
|
| 66 |
+
norm_params.extend(p for p in module.parameters() if p.requires_grad)
|
| 67 |
+
else:
|
| 68 |
+
other_params.extend(p for p in module.parameters() if p.requires_grad)
|
| 69 |
+
return norm_params, other_params
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _upcast(t: Tensor) -> Tensor:
|
| 73 |
+
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
|
| 74 |
+
if t.is_floating_point():
|
| 75 |
+
return t if t.dtype in (torch.float32, torch.float64) else t.float()
|
| 76 |
+
else:
|
| 77 |
+
return t if t.dtype in (torch.int32, torch.int64) else t.int()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _upcast_non_float(t: Tensor) -> Tensor:
|
| 81 |
+
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
|
| 82 |
+
if t.dtype not in (torch.float32, torch.float64):
|
| 83 |
+
return t.float()
|
| 84 |
+
return t
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _loss_inter_union(
|
| 88 |
+
boxes1: torch.Tensor,
|
| 89 |
+
boxes2: torch.Tensor,
|
| 90 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 91 |
+
|
| 92 |
+
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
|
| 93 |
+
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
|
| 94 |
+
|
| 95 |
+
# Intersection keypoints
|
| 96 |
+
xkis1 = torch.max(x1, x1g)
|
| 97 |
+
ykis1 = torch.max(y1, y1g)
|
| 98 |
+
xkis2 = torch.min(x2, x2g)
|
| 99 |
+
ykis2 = torch.min(y2, y2g)
|
| 100 |
+
|
| 101 |
+
intsctk = torch.zeros_like(x1)
|
| 102 |
+
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
|
| 103 |
+
intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
|
| 104 |
+
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
|
| 105 |
+
|
| 106 |
+
return intsctk, unionk
|
parrot/lib/python3.10/site-packages/torchvision/ops/giou_loss.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from ..utils import _log_api_usage_once
|
| 4 |
+
from ._utils import _loss_inter_union, _upcast_non_float
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def generalized_box_iou_loss(
|
| 8 |
+
boxes1: torch.Tensor,
|
| 9 |
+
boxes2: torch.Tensor,
|
| 10 |
+
reduction: str = "none",
|
| 11 |
+
eps: float = 1e-7,
|
| 12 |
+
) -> torch.Tensor:
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
Gradient-friendly IoU loss with an additional penalty that is non-zero when the
|
| 16 |
+
boxes do not overlap and scales with the size of their smallest enclosing box.
|
| 17 |
+
This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable.
|
| 18 |
+
|
| 19 |
+
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
|
| 20 |
+
``0 <= x1 < x2`` and ``0 <= y1 < y2``, and The two boxes should have the
|
| 21 |
+
same dimensions.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
boxes1 (Tensor[N, 4] or Tensor[4]): first set of boxes
|
| 25 |
+
boxes2 (Tensor[N, 4] or Tensor[4]): second set of boxes
|
| 26 |
+
reduction (string, optional): Specifies the reduction to apply to the output:
|
| 27 |
+
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be
|
| 28 |
+
applied to the output. ``'mean'``: The output will be averaged.
|
| 29 |
+
``'sum'``: The output will be summed. Default: ``'none'``
|
| 30 |
+
eps (float): small number to prevent division by zero. Default: 1e-7
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
Tensor: Loss tensor with the reduction option applied.
|
| 34 |
+
|
| 35 |
+
Reference:
|
| 36 |
+
Hamid Rezatofighi et al.: Generalized Intersection over Union:
|
| 37 |
+
A Metric and A Loss for Bounding Box Regression:
|
| 38 |
+
https://arxiv.org/abs/1902.09630
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
# Original implementation from https://github.com/facebookresearch/fvcore/blob/bfff2ef/fvcore/nn/giou_loss.py
|
| 42 |
+
|
| 43 |
+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
|
| 44 |
+
_log_api_usage_once(generalized_box_iou_loss)
|
| 45 |
+
|
| 46 |
+
boxes1 = _upcast_non_float(boxes1)
|
| 47 |
+
boxes2 = _upcast_non_float(boxes2)
|
| 48 |
+
intsctk, unionk = _loss_inter_union(boxes1, boxes2)
|
| 49 |
+
iouk = intsctk / (unionk + eps)
|
| 50 |
+
|
| 51 |
+
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
|
| 52 |
+
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
|
| 53 |
+
|
| 54 |
+
# smallest enclosing box
|
| 55 |
+
xc1 = torch.min(x1, x1g)
|
| 56 |
+
yc1 = torch.min(y1, y1g)
|
| 57 |
+
xc2 = torch.max(x2, x2g)
|
| 58 |
+
yc2 = torch.max(y2, y2g)
|
| 59 |
+
|
| 60 |
+
area_c = (xc2 - xc1) * (yc2 - yc1)
|
| 61 |
+
miouk = iouk - ((area_c - unionk) / (area_c + eps))
|
| 62 |
+
|
| 63 |
+
loss = 1 - miouk
|
| 64 |
+
|
| 65 |
+
# Check reduction option and return loss accordingly
|
| 66 |
+
if reduction == "none":
|
| 67 |
+
pass
|
| 68 |
+
elif reduction == "mean":
|
| 69 |
+
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
|
| 70 |
+
elif reduction == "sum":
|
| 71 |
+
loss = loss.sum()
|
| 72 |
+
else:
|
| 73 |
+
raise ValueError(
|
| 74 |
+
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
|
| 75 |
+
)
|
| 76 |
+
return loss
|
parrot/lib/python3.10/site-packages/torchvision/transforms/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .transforms import *
|
| 2 |
+
from .autoaugment import *
|
parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (224 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_pil.cpython-310.pyc
ADDED
|
Binary file (9.65 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_tensor.cpython-310.pyc
ADDED
|
Binary file (24.8 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_video.cpython-310.pyc
ADDED
|
Binary file (4.05 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_pil.py
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numbers
|
| 2 |
+
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from PIL import Image, ImageEnhance, ImageOps
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
import accimage
|
| 10 |
+
except ImportError:
|
| 11 |
+
accimage = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@torch.jit.unused
|
| 15 |
+
def _is_pil_image(img: Any) -> bool:
|
| 16 |
+
if accimage is not None:
|
| 17 |
+
return isinstance(img, (Image.Image, accimage.Image))
|
| 18 |
+
else:
|
| 19 |
+
return isinstance(img, Image.Image)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@torch.jit.unused
|
| 23 |
+
def get_dimensions(img: Any) -> List[int]:
|
| 24 |
+
if _is_pil_image(img):
|
| 25 |
+
if hasattr(img, "getbands"):
|
| 26 |
+
channels = len(img.getbands())
|
| 27 |
+
else:
|
| 28 |
+
channels = img.channels
|
| 29 |
+
width, height = img.size
|
| 30 |
+
return [channels, height, width]
|
| 31 |
+
raise TypeError(f"Unexpected type {type(img)}")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@torch.jit.unused
|
| 35 |
+
def get_image_size(img: Any) -> List[int]:
|
| 36 |
+
if _is_pil_image(img):
|
| 37 |
+
return list(img.size)
|
| 38 |
+
raise TypeError(f"Unexpected type {type(img)}")
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@torch.jit.unused
|
| 42 |
+
def get_image_num_channels(img: Any) -> int:
|
| 43 |
+
if _is_pil_image(img):
|
| 44 |
+
if hasattr(img, "getbands"):
|
| 45 |
+
return len(img.getbands())
|
| 46 |
+
else:
|
| 47 |
+
return img.channels
|
| 48 |
+
raise TypeError(f"Unexpected type {type(img)}")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@torch.jit.unused
|
| 52 |
+
def hflip(img: Image.Image) -> Image.Image:
|
| 53 |
+
if not _is_pil_image(img):
|
| 54 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 55 |
+
|
| 56 |
+
return img.transpose(Image.FLIP_LEFT_RIGHT)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@torch.jit.unused
|
| 60 |
+
def vflip(img: Image.Image) -> Image.Image:
|
| 61 |
+
if not _is_pil_image(img):
|
| 62 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 63 |
+
|
| 64 |
+
return img.transpose(Image.FLIP_TOP_BOTTOM)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@torch.jit.unused
|
| 68 |
+
def adjust_brightness(img: Image.Image, brightness_factor: float) -> Image.Image:
|
| 69 |
+
if not _is_pil_image(img):
|
| 70 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 71 |
+
|
| 72 |
+
enhancer = ImageEnhance.Brightness(img)
|
| 73 |
+
img = enhancer.enhance(brightness_factor)
|
| 74 |
+
return img
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@torch.jit.unused
|
| 78 |
+
def adjust_contrast(img: Image.Image, contrast_factor: float) -> Image.Image:
|
| 79 |
+
if not _is_pil_image(img):
|
| 80 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 81 |
+
|
| 82 |
+
enhancer = ImageEnhance.Contrast(img)
|
| 83 |
+
img = enhancer.enhance(contrast_factor)
|
| 84 |
+
return img
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@torch.jit.unused
|
| 88 |
+
def adjust_saturation(img: Image.Image, saturation_factor: float) -> Image.Image:
|
| 89 |
+
if not _is_pil_image(img):
|
| 90 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 91 |
+
|
| 92 |
+
enhancer = ImageEnhance.Color(img)
|
| 93 |
+
img = enhancer.enhance(saturation_factor)
|
| 94 |
+
return img
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@torch.jit.unused
|
| 98 |
+
def adjust_hue(img: Image.Image, hue_factor: float) -> Image.Image:
|
| 99 |
+
if not (-0.5 <= hue_factor <= 0.5):
|
| 100 |
+
raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].")
|
| 101 |
+
|
| 102 |
+
if not _is_pil_image(img):
|
| 103 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 104 |
+
|
| 105 |
+
input_mode = img.mode
|
| 106 |
+
if input_mode in {"L", "1", "I", "F"}:
|
| 107 |
+
return img
|
| 108 |
+
|
| 109 |
+
h, s, v = img.convert("HSV").split()
|
| 110 |
+
|
| 111 |
+
np_h = np.array(h, dtype=np.uint8)
|
| 112 |
+
# This will over/underflow, as desired
|
| 113 |
+
np_h += np.array(hue_factor * 255).astype(np.uint8)
|
| 114 |
+
|
| 115 |
+
h = Image.fromarray(np_h, "L")
|
| 116 |
+
|
| 117 |
+
img = Image.merge("HSV", (h, s, v)).convert(input_mode)
|
| 118 |
+
return img
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@torch.jit.unused
|
| 122 |
+
def adjust_gamma(
|
| 123 |
+
img: Image.Image,
|
| 124 |
+
gamma: float,
|
| 125 |
+
gain: float = 1.0,
|
| 126 |
+
) -> Image.Image:
|
| 127 |
+
|
| 128 |
+
if not _is_pil_image(img):
|
| 129 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 130 |
+
|
| 131 |
+
if gamma < 0:
|
| 132 |
+
raise ValueError("Gamma should be a non-negative real number")
|
| 133 |
+
|
| 134 |
+
input_mode = img.mode
|
| 135 |
+
img = img.convert("RGB")
|
| 136 |
+
gamma_map = [int((255 + 1 - 1e-3) * gain * pow(ele / 255.0, gamma)) for ele in range(256)] * 3
|
| 137 |
+
img = img.point(gamma_map) # use PIL's point-function to accelerate this part
|
| 138 |
+
|
| 139 |
+
img = img.convert(input_mode)
|
| 140 |
+
return img
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@torch.jit.unused
|
| 144 |
+
def pad(
|
| 145 |
+
img: Image.Image,
|
| 146 |
+
padding: Union[int, List[int], Tuple[int, ...]],
|
| 147 |
+
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
|
| 148 |
+
padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
|
| 149 |
+
) -> Image.Image:
|
| 150 |
+
|
| 151 |
+
if not _is_pil_image(img):
|
| 152 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 153 |
+
|
| 154 |
+
if not isinstance(padding, (numbers.Number, tuple, list)):
|
| 155 |
+
raise TypeError("Got inappropriate padding arg")
|
| 156 |
+
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
|
| 157 |
+
raise TypeError("Got inappropriate fill arg")
|
| 158 |
+
if not isinstance(padding_mode, str):
|
| 159 |
+
raise TypeError("Got inappropriate padding_mode arg")
|
| 160 |
+
|
| 161 |
+
if isinstance(padding, list):
|
| 162 |
+
padding = tuple(padding)
|
| 163 |
+
|
| 164 |
+
if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
|
| 165 |
+
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
|
| 166 |
+
|
| 167 |
+
if isinstance(padding, tuple) and len(padding) == 1:
|
| 168 |
+
# Compatibility with `functional_tensor.pad`
|
| 169 |
+
padding = padding[0]
|
| 170 |
+
|
| 171 |
+
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
|
| 172 |
+
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
| 173 |
+
|
| 174 |
+
if padding_mode == "constant":
|
| 175 |
+
opts = _parse_fill(fill, img, name="fill")
|
| 176 |
+
if img.mode == "P":
|
| 177 |
+
palette = img.getpalette()
|
| 178 |
+
image = ImageOps.expand(img, border=padding, **opts)
|
| 179 |
+
image.putpalette(palette)
|
| 180 |
+
return image
|
| 181 |
+
|
| 182 |
+
return ImageOps.expand(img, border=padding, **opts)
|
| 183 |
+
else:
|
| 184 |
+
if isinstance(padding, int):
|
| 185 |
+
pad_left = pad_right = pad_top = pad_bottom = padding
|
| 186 |
+
if isinstance(padding, tuple) and len(padding) == 2:
|
| 187 |
+
pad_left = pad_right = padding[0]
|
| 188 |
+
pad_top = pad_bottom = padding[1]
|
| 189 |
+
if isinstance(padding, tuple) and len(padding) == 4:
|
| 190 |
+
pad_left = padding[0]
|
| 191 |
+
pad_top = padding[1]
|
| 192 |
+
pad_right = padding[2]
|
| 193 |
+
pad_bottom = padding[3]
|
| 194 |
+
|
| 195 |
+
p = [pad_left, pad_top, pad_right, pad_bottom]
|
| 196 |
+
cropping = -np.minimum(p, 0)
|
| 197 |
+
|
| 198 |
+
if cropping.any():
|
| 199 |
+
crop_left, crop_top, crop_right, crop_bottom = cropping
|
| 200 |
+
img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom))
|
| 201 |
+
|
| 202 |
+
pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0)
|
| 203 |
+
|
| 204 |
+
if img.mode == "P":
|
| 205 |
+
palette = img.getpalette()
|
| 206 |
+
img = np.asarray(img)
|
| 207 |
+
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), mode=padding_mode)
|
| 208 |
+
img = Image.fromarray(img)
|
| 209 |
+
img.putpalette(palette)
|
| 210 |
+
return img
|
| 211 |
+
|
| 212 |
+
img = np.asarray(img)
|
| 213 |
+
# RGB image
|
| 214 |
+
if len(img.shape) == 3:
|
| 215 |
+
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
|
| 216 |
+
# Grayscale image
|
| 217 |
+
if len(img.shape) == 2:
|
| 218 |
+
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
|
| 219 |
+
|
| 220 |
+
return Image.fromarray(img)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
@torch.jit.unused
|
| 224 |
+
def crop(
|
| 225 |
+
img: Image.Image,
|
| 226 |
+
top: int,
|
| 227 |
+
left: int,
|
| 228 |
+
height: int,
|
| 229 |
+
width: int,
|
| 230 |
+
) -> Image.Image:
|
| 231 |
+
|
| 232 |
+
if not _is_pil_image(img):
|
| 233 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 234 |
+
|
| 235 |
+
return img.crop((left, top, left + width, top + height))
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
@torch.jit.unused
|
| 239 |
+
def resize(
|
| 240 |
+
img: Image.Image,
|
| 241 |
+
size: Union[List[int], int],
|
| 242 |
+
interpolation: int = Image.BILINEAR,
|
| 243 |
+
) -> Image.Image:
|
| 244 |
+
|
| 245 |
+
if not _is_pil_image(img):
|
| 246 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 247 |
+
if not (isinstance(size, list) and len(size) == 2):
|
| 248 |
+
raise TypeError(f"Got inappropriate size arg: {size}")
|
| 249 |
+
|
| 250 |
+
return img.resize(tuple(size[::-1]), interpolation)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
@torch.jit.unused
|
| 254 |
+
def _parse_fill(
|
| 255 |
+
fill: Optional[Union[float, List[float], Tuple[float, ...]]],
|
| 256 |
+
img: Image.Image,
|
| 257 |
+
name: str = "fillcolor",
|
| 258 |
+
) -> Dict[str, Optional[Union[float, List[float], Tuple[float, ...]]]]:
|
| 259 |
+
|
| 260 |
+
# Process fill color for affine transforms
|
| 261 |
+
num_channels = get_image_num_channels(img)
|
| 262 |
+
if fill is None:
|
| 263 |
+
fill = 0
|
| 264 |
+
if isinstance(fill, (int, float)) and num_channels > 1:
|
| 265 |
+
fill = tuple([fill] * num_channels)
|
| 266 |
+
if isinstance(fill, (list, tuple)):
|
| 267 |
+
if len(fill) == 1:
|
| 268 |
+
fill = fill * num_channels
|
| 269 |
+
elif len(fill) != num_channels:
|
| 270 |
+
msg = "The number of elements in 'fill' does not match the number of channels of the image ({} != {})"
|
| 271 |
+
raise ValueError(msg.format(len(fill), num_channels))
|
| 272 |
+
|
| 273 |
+
fill = tuple(fill) # type: ignore[arg-type]
|
| 274 |
+
|
| 275 |
+
if img.mode != "F":
|
| 276 |
+
if isinstance(fill, (list, tuple)):
|
| 277 |
+
fill = tuple(int(x) for x in fill)
|
| 278 |
+
else:
|
| 279 |
+
fill = int(fill)
|
| 280 |
+
|
| 281 |
+
return {name: fill}
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
@torch.jit.unused
|
| 285 |
+
def affine(
|
| 286 |
+
img: Image.Image,
|
| 287 |
+
matrix: List[float],
|
| 288 |
+
interpolation: int = Image.NEAREST,
|
| 289 |
+
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
|
| 290 |
+
) -> Image.Image:
|
| 291 |
+
|
| 292 |
+
if not _is_pil_image(img):
|
| 293 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 294 |
+
|
| 295 |
+
output_size = img.size
|
| 296 |
+
opts = _parse_fill(fill, img)
|
| 297 |
+
return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
@torch.jit.unused
|
| 301 |
+
def rotate(
|
| 302 |
+
img: Image.Image,
|
| 303 |
+
angle: float,
|
| 304 |
+
interpolation: int = Image.NEAREST,
|
| 305 |
+
expand: bool = False,
|
| 306 |
+
center: Optional[Tuple[int, int]] = None,
|
| 307 |
+
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
|
| 308 |
+
) -> Image.Image:
|
| 309 |
+
|
| 310 |
+
if not _is_pil_image(img):
|
| 311 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 312 |
+
|
| 313 |
+
opts = _parse_fill(fill, img)
|
| 314 |
+
return img.rotate(angle, interpolation, expand, center, **opts)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
@torch.jit.unused
|
| 318 |
+
def perspective(
|
| 319 |
+
img: Image.Image,
|
| 320 |
+
perspective_coeffs: List[float],
|
| 321 |
+
interpolation: int = Image.BICUBIC,
|
| 322 |
+
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
|
| 323 |
+
) -> Image.Image:
|
| 324 |
+
|
| 325 |
+
if not _is_pil_image(img):
|
| 326 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 327 |
+
|
| 328 |
+
opts = _parse_fill(fill, img)
|
| 329 |
+
|
| 330 |
+
return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
@torch.jit.unused
|
| 334 |
+
def to_grayscale(img: Image.Image, num_output_channels: int) -> Image.Image:
|
| 335 |
+
if not _is_pil_image(img):
|
| 336 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 337 |
+
|
| 338 |
+
if num_output_channels == 1:
|
| 339 |
+
img = img.convert("L")
|
| 340 |
+
elif num_output_channels == 3:
|
| 341 |
+
img = img.convert("L")
|
| 342 |
+
np_img = np.array(img, dtype=np.uint8)
|
| 343 |
+
np_img = np.dstack([np_img, np_img, np_img])
|
| 344 |
+
img = Image.fromarray(np_img, "RGB")
|
| 345 |
+
else:
|
| 346 |
+
raise ValueError("num_output_channels should be either 1 or 3")
|
| 347 |
+
|
| 348 |
+
return img
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
@torch.jit.unused
|
| 352 |
+
def invert(img: Image.Image) -> Image.Image:
|
| 353 |
+
if not _is_pil_image(img):
|
| 354 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 355 |
+
return ImageOps.invert(img)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
@torch.jit.unused
|
| 359 |
+
def posterize(img: Image.Image, bits: int) -> Image.Image:
|
| 360 |
+
if not _is_pil_image(img):
|
| 361 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 362 |
+
return ImageOps.posterize(img, bits)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
@torch.jit.unused
|
| 366 |
+
def solarize(img: Image.Image, threshold: int) -> Image.Image:
|
| 367 |
+
if not _is_pil_image(img):
|
| 368 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 369 |
+
return ImageOps.solarize(img, threshold)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
@torch.jit.unused
|
| 373 |
+
def adjust_sharpness(img: Image.Image, sharpness_factor: float) -> Image.Image:
|
| 374 |
+
if not _is_pil_image(img):
|
| 375 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 376 |
+
|
| 377 |
+
enhancer = ImageEnhance.Sharpness(img)
|
| 378 |
+
img = enhancer.enhance(sharpness_factor)
|
| 379 |
+
return img
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
@torch.jit.unused
|
| 383 |
+
def autocontrast(img: Image.Image) -> Image.Image:
|
| 384 |
+
if not _is_pil_image(img):
|
| 385 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 386 |
+
return ImageOps.autocontrast(img)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
@torch.jit.unused
|
| 390 |
+
def equalize(img: Image.Image) -> Image.Image:
|
| 391 |
+
if not _is_pil_image(img):
|
| 392 |
+
raise TypeError(f"img should be PIL Image. Got {type(img)}")
|
| 393 |
+
return ImageOps.equalize(img)
|
parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_tensor.py
ADDED
|
@@ -0,0 +1,960 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
from typing import List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
from torch.nn.functional import conv2d, grid_sample, interpolate, pad as torch_pad
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _is_tensor_a_torch_image(x: Tensor) -> bool:
|
| 10 |
+
return x.ndim >= 2
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _assert_image_tensor(img: Tensor) -> None:
|
| 14 |
+
if not _is_tensor_a_torch_image(img):
|
| 15 |
+
raise TypeError("Tensor is not a torch image.")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_dimensions(img: Tensor) -> List[int]:
|
| 19 |
+
_assert_image_tensor(img)
|
| 20 |
+
channels = 1 if img.ndim == 2 else img.shape[-3]
|
| 21 |
+
height, width = img.shape[-2:]
|
| 22 |
+
return [channels, height, width]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_image_size(img: Tensor) -> List[int]:
|
| 26 |
+
# Returns (w, h) of tensor image
|
| 27 |
+
_assert_image_tensor(img)
|
| 28 |
+
return [img.shape[-1], img.shape[-2]]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_image_num_channels(img: Tensor) -> int:
|
| 32 |
+
_assert_image_tensor(img)
|
| 33 |
+
if img.ndim == 2:
|
| 34 |
+
return 1
|
| 35 |
+
elif img.ndim > 2:
|
| 36 |
+
return img.shape[-3]
|
| 37 |
+
|
| 38 |
+
raise TypeError(f"Input ndim should be 2 or more. Got {img.ndim}")
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _max_value(dtype: torch.dtype) -> int:
|
| 42 |
+
if dtype == torch.uint8:
|
| 43 |
+
return 255
|
| 44 |
+
elif dtype == torch.int8:
|
| 45 |
+
return 127
|
| 46 |
+
elif dtype == torch.int16:
|
| 47 |
+
return 32767
|
| 48 |
+
elif dtype == torch.int32:
|
| 49 |
+
return 2147483647
|
| 50 |
+
elif dtype == torch.int64:
|
| 51 |
+
return 9223372036854775807
|
| 52 |
+
else:
|
| 53 |
+
# This is only here for completeness. This value is implicitly assumed in a lot of places so changing it is not
|
| 54 |
+
# easy.
|
| 55 |
+
return 1
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _assert_channels(img: Tensor, permitted: List[int]) -> None:
|
| 59 |
+
c = get_dimensions(img)[0]
|
| 60 |
+
if c not in permitted:
|
| 61 |
+
raise TypeError(f"Input image tensor permitted channel values are {permitted}, but found {c}")
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
|
| 65 |
+
if image.dtype == dtype:
|
| 66 |
+
return image
|
| 67 |
+
|
| 68 |
+
if image.is_floating_point():
|
| 69 |
+
|
| 70 |
+
# TODO: replace with dtype.is_floating_point when torchscript supports it
|
| 71 |
+
if torch.tensor(0, dtype=dtype).is_floating_point():
|
| 72 |
+
return image.to(dtype)
|
| 73 |
+
|
| 74 |
+
# float to int
|
| 75 |
+
if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or (
|
| 76 |
+
image.dtype == torch.float64 and dtype == torch.int64
|
| 77 |
+
):
|
| 78 |
+
msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely."
|
| 79 |
+
raise RuntimeError(msg)
|
| 80 |
+
|
| 81 |
+
# https://github.com/pytorch/vision/pull/2078#issuecomment-612045321
|
| 82 |
+
# For data in the range 0-1, (float * 255).to(uint) is only 255
|
| 83 |
+
# when float is exactly 1.0.
|
| 84 |
+
# `max + 1 - epsilon` provides more evenly distributed mapping of
|
| 85 |
+
# ranges of floats to ints.
|
| 86 |
+
eps = 1e-3
|
| 87 |
+
max_val = float(_max_value(dtype))
|
| 88 |
+
result = image.mul(max_val + 1.0 - eps)
|
| 89 |
+
return result.to(dtype)
|
| 90 |
+
else:
|
| 91 |
+
input_max = float(_max_value(image.dtype))
|
| 92 |
+
|
| 93 |
+
# int to float
|
| 94 |
+
# TODO: replace with dtype.is_floating_point when torchscript supports it
|
| 95 |
+
if torch.tensor(0, dtype=dtype).is_floating_point():
|
| 96 |
+
image = image.to(dtype)
|
| 97 |
+
return image / input_max
|
| 98 |
+
|
| 99 |
+
output_max = float(_max_value(dtype))
|
| 100 |
+
|
| 101 |
+
# int to int
|
| 102 |
+
if input_max > output_max:
|
| 103 |
+
# factor should be forced to int for torch jit script
|
| 104 |
+
# otherwise factor is a float and image // factor can produce different results
|
| 105 |
+
factor = int((input_max + 1) // (output_max + 1))
|
| 106 |
+
image = torch.div(image, factor, rounding_mode="floor")
|
| 107 |
+
return image.to(dtype)
|
| 108 |
+
else:
|
| 109 |
+
# factor should be forced to int for torch jit script
|
| 110 |
+
# otherwise factor is a float and image * factor can produce different results
|
| 111 |
+
factor = int((output_max + 1) // (input_max + 1))
|
| 112 |
+
image = image.to(dtype)
|
| 113 |
+
return image * factor
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def vflip(img: Tensor) -> Tensor:
|
| 117 |
+
_assert_image_tensor(img)
|
| 118 |
+
|
| 119 |
+
return img.flip(-2)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def hflip(img: Tensor) -> Tensor:
|
| 123 |
+
_assert_image_tensor(img)
|
| 124 |
+
|
| 125 |
+
return img.flip(-1)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
|
| 129 |
+
_assert_image_tensor(img)
|
| 130 |
+
|
| 131 |
+
_, h, w = get_dimensions(img)
|
| 132 |
+
right = left + width
|
| 133 |
+
bottom = top + height
|
| 134 |
+
|
| 135 |
+
if left < 0 or top < 0 or right > w or bottom > h:
|
| 136 |
+
padding_ltrb = [
|
| 137 |
+
max(-left + min(0, right), 0),
|
| 138 |
+
max(-top + min(0, bottom), 0),
|
| 139 |
+
max(right - max(w, left), 0),
|
| 140 |
+
max(bottom - max(h, top), 0),
|
| 141 |
+
]
|
| 142 |
+
return pad(img[..., max(top, 0) : bottom, max(left, 0) : right], padding_ltrb, fill=0)
|
| 143 |
+
return img[..., top:bottom, left:right]
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
|
| 147 |
+
if img.ndim < 3:
|
| 148 |
+
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
|
| 149 |
+
_assert_channels(img, [1, 3])
|
| 150 |
+
|
| 151 |
+
if num_output_channels not in (1, 3):
|
| 152 |
+
raise ValueError("num_output_channels should be either 1 or 3")
|
| 153 |
+
|
| 154 |
+
if img.shape[-3] == 3:
|
| 155 |
+
r, g, b = img.unbind(dim=-3)
|
| 156 |
+
# This implementation closely follows the TF one:
|
| 157 |
+
# https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138
|
| 158 |
+
l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype)
|
| 159 |
+
l_img = l_img.unsqueeze(dim=-3)
|
| 160 |
+
else:
|
| 161 |
+
l_img = img.clone()
|
| 162 |
+
|
| 163 |
+
if num_output_channels == 3:
|
| 164 |
+
return l_img.expand(img.shape)
|
| 165 |
+
|
| 166 |
+
return l_img
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
|
| 170 |
+
if brightness_factor < 0:
|
| 171 |
+
raise ValueError(f"brightness_factor ({brightness_factor}) is not non-negative.")
|
| 172 |
+
|
| 173 |
+
_assert_image_tensor(img)
|
| 174 |
+
|
| 175 |
+
_assert_channels(img, [1, 3])
|
| 176 |
+
|
| 177 |
+
return _blend(img, torch.zeros_like(img), brightness_factor)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
|
| 181 |
+
if contrast_factor < 0:
|
| 182 |
+
raise ValueError(f"contrast_factor ({contrast_factor}) is not non-negative.")
|
| 183 |
+
|
| 184 |
+
_assert_image_tensor(img)
|
| 185 |
+
|
| 186 |
+
_assert_channels(img, [3, 1])
|
| 187 |
+
c = get_dimensions(img)[0]
|
| 188 |
+
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
|
| 189 |
+
if c == 3:
|
| 190 |
+
mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True)
|
| 191 |
+
else:
|
| 192 |
+
mean = torch.mean(img.to(dtype), dim=(-3, -2, -1), keepdim=True)
|
| 193 |
+
|
| 194 |
+
return _blend(img, mean, contrast_factor)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
|
| 198 |
+
if not (-0.5 <= hue_factor <= 0.5):
|
| 199 |
+
raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].")
|
| 200 |
+
|
| 201 |
+
if not (isinstance(img, torch.Tensor)):
|
| 202 |
+
raise TypeError("Input img should be Tensor image")
|
| 203 |
+
|
| 204 |
+
_assert_image_tensor(img)
|
| 205 |
+
|
| 206 |
+
_assert_channels(img, [1, 3])
|
| 207 |
+
if get_dimensions(img)[0] == 1: # Match PIL behaviour
|
| 208 |
+
return img
|
| 209 |
+
|
| 210 |
+
orig_dtype = img.dtype
|
| 211 |
+
img = convert_image_dtype(img, torch.float32)
|
| 212 |
+
|
| 213 |
+
img = _rgb2hsv(img)
|
| 214 |
+
h, s, v = img.unbind(dim=-3)
|
| 215 |
+
h = (h + hue_factor) % 1.0
|
| 216 |
+
img = torch.stack((h, s, v), dim=-3)
|
| 217 |
+
img_hue_adj = _hsv2rgb(img)
|
| 218 |
+
|
| 219 |
+
return convert_image_dtype(img_hue_adj, orig_dtype)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
|
| 223 |
+
if saturation_factor < 0:
|
| 224 |
+
raise ValueError(f"saturation_factor ({saturation_factor}) is not non-negative.")
|
| 225 |
+
|
| 226 |
+
_assert_image_tensor(img)
|
| 227 |
+
|
| 228 |
+
_assert_channels(img, [1, 3])
|
| 229 |
+
|
| 230 |
+
if get_dimensions(img)[0] == 1: # Match PIL behaviour
|
| 231 |
+
return img
|
| 232 |
+
|
| 233 |
+
return _blend(img, rgb_to_grayscale(img), saturation_factor)
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
|
| 237 |
+
if not isinstance(img, torch.Tensor):
|
| 238 |
+
raise TypeError("Input img should be a Tensor.")
|
| 239 |
+
|
| 240 |
+
_assert_channels(img, [1, 3])
|
| 241 |
+
|
| 242 |
+
if gamma < 0:
|
| 243 |
+
raise ValueError("Gamma should be a non-negative real number")
|
| 244 |
+
|
| 245 |
+
result = img
|
| 246 |
+
dtype = img.dtype
|
| 247 |
+
if not torch.is_floating_point(img):
|
| 248 |
+
result = convert_image_dtype(result, torch.float32)
|
| 249 |
+
|
| 250 |
+
result = (gain * result**gamma).clamp(0, 1)
|
| 251 |
+
|
| 252 |
+
result = convert_image_dtype(result, dtype)
|
| 253 |
+
return result
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:
|
| 257 |
+
ratio = float(ratio)
|
| 258 |
+
bound = _max_value(img1.dtype)
|
| 259 |
+
return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _rgb2hsv(img: Tensor) -> Tensor:
|
| 263 |
+
r, g, b = img.unbind(dim=-3)
|
| 264 |
+
|
| 265 |
+
# Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/
|
| 266 |
+
# src/libImaging/Convert.c#L330
|
| 267 |
+
maxc = torch.max(img, dim=-3).values
|
| 268 |
+
minc = torch.min(img, dim=-3).values
|
| 269 |
+
|
| 270 |
+
# The algorithm erases S and H channel where `maxc = minc`. This avoids NaN
|
| 271 |
+
# from happening in the results, because
|
| 272 |
+
# + S channel has division by `maxc`, which is zero only if `maxc = minc`
|
| 273 |
+
# + H channel has division by `(maxc - minc)`.
|
| 274 |
+
#
|
| 275 |
+
# Instead of overwriting NaN afterwards, we just prevent it from occurring, so
|
| 276 |
+
# we don't need to deal with it in case we save the NaN in a buffer in
|
| 277 |
+
# backprop, if it is ever supported, but it doesn't hurt to do so.
|
| 278 |
+
eqc = maxc == minc
|
| 279 |
+
|
| 280 |
+
cr = maxc - minc
|
| 281 |
+
# Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine.
|
| 282 |
+
ones = torch.ones_like(maxc)
|
| 283 |
+
s = cr / torch.where(eqc, ones, maxc)
|
| 284 |
+
# Note that `eqc => maxc = minc = r = g = b`. So the following calculation
|
| 285 |
+
# of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it
|
| 286 |
+
# would not matter what values `rc`, `gc`, and `bc` have here, and thus
|
| 287 |
+
# replacing denominator with 1 when `eqc` is fine.
|
| 288 |
+
cr_divisor = torch.where(eqc, ones, cr)
|
| 289 |
+
rc = (maxc - r) / cr_divisor
|
| 290 |
+
gc = (maxc - g) / cr_divisor
|
| 291 |
+
bc = (maxc - b) / cr_divisor
|
| 292 |
+
|
| 293 |
+
hr = (maxc == r) * (bc - gc)
|
| 294 |
+
hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
|
| 295 |
+
hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
|
| 296 |
+
h = hr + hg + hb
|
| 297 |
+
h = torch.fmod((h / 6.0 + 1.0), 1.0)
|
| 298 |
+
return torch.stack((h, s, maxc), dim=-3)
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def _hsv2rgb(img: Tensor) -> Tensor:
|
| 302 |
+
h, s, v = img.unbind(dim=-3)
|
| 303 |
+
i = torch.floor(h * 6.0)
|
| 304 |
+
f = (h * 6.0) - i
|
| 305 |
+
i = i.to(dtype=torch.int32)
|
| 306 |
+
|
| 307 |
+
p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
|
| 308 |
+
q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
|
| 309 |
+
t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
|
| 310 |
+
i = i % 6
|
| 311 |
+
|
| 312 |
+
mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1)
|
| 313 |
+
|
| 314 |
+
a1 = torch.stack((v, q, p, p, t, v), dim=-3)
|
| 315 |
+
a2 = torch.stack((t, v, v, q, p, p), dim=-3)
|
| 316 |
+
a3 = torch.stack((p, p, t, v, v, q), dim=-3)
|
| 317 |
+
a4 = torch.stack((a1, a2, a3), dim=-4)
|
| 318 |
+
|
| 319 |
+
return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:
|
| 323 |
+
# padding is left, right, top, bottom
|
| 324 |
+
|
| 325 |
+
# crop if needed
|
| 326 |
+
if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0:
|
| 327 |
+
neg_min_padding = [-min(x, 0) for x in padding]
|
| 328 |
+
crop_left, crop_right, crop_top, crop_bottom = neg_min_padding
|
| 329 |
+
img = img[..., crop_top : img.shape[-2] - crop_bottom, crop_left : img.shape[-1] - crop_right]
|
| 330 |
+
padding = [max(x, 0) for x in padding]
|
| 331 |
+
|
| 332 |
+
in_sizes = img.size()
|
| 333 |
+
|
| 334 |
+
_x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...]
|
| 335 |
+
left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0]
|
| 336 |
+
right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3]
|
| 337 |
+
x_indices = torch.tensor(left_indices + _x_indices + right_indices, device=img.device)
|
| 338 |
+
|
| 339 |
+
_y_indices = [i for i in range(in_sizes[-2])]
|
| 340 |
+
top_indices = [i for i in range(padding[2] - 1, -1, -1)]
|
| 341 |
+
bottom_indices = [-(i + 1) for i in range(padding[3])]
|
| 342 |
+
y_indices = torch.tensor(top_indices + _y_indices + bottom_indices, device=img.device)
|
| 343 |
+
|
| 344 |
+
ndim = img.ndim
|
| 345 |
+
if ndim == 3:
|
| 346 |
+
return img[:, y_indices[:, None], x_indices[None, :]]
|
| 347 |
+
elif ndim == 4:
|
| 348 |
+
return img[:, :, y_indices[:, None], x_indices[None, :]]
|
| 349 |
+
else:
|
| 350 |
+
raise RuntimeError("Symmetric padding of N-D tensors are not supported yet")
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _parse_pad_padding(padding: Union[int, List[int]]) -> List[int]:
|
| 354 |
+
if isinstance(padding, int):
|
| 355 |
+
if torch.jit.is_scripting():
|
| 356 |
+
# This maybe unreachable
|
| 357 |
+
raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]")
|
| 358 |
+
pad_left = pad_right = pad_top = pad_bottom = padding
|
| 359 |
+
elif len(padding) == 1:
|
| 360 |
+
pad_left = pad_right = pad_top = pad_bottom = padding[0]
|
| 361 |
+
elif len(padding) == 2:
|
| 362 |
+
pad_left = pad_right = padding[0]
|
| 363 |
+
pad_top = pad_bottom = padding[1]
|
| 364 |
+
else:
|
| 365 |
+
pad_left = padding[0]
|
| 366 |
+
pad_top = padding[1]
|
| 367 |
+
pad_right = padding[2]
|
| 368 |
+
pad_bottom = padding[3]
|
| 369 |
+
|
| 370 |
+
return [pad_left, pad_right, pad_top, pad_bottom]
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def pad(
|
| 374 |
+
img: Tensor, padding: Union[int, List[int]], fill: Optional[Union[int, float]] = 0, padding_mode: str = "constant"
|
| 375 |
+
) -> Tensor:
|
| 376 |
+
_assert_image_tensor(img)
|
| 377 |
+
|
| 378 |
+
if fill is None:
|
| 379 |
+
fill = 0
|
| 380 |
+
|
| 381 |
+
if not isinstance(padding, (int, tuple, list)):
|
| 382 |
+
raise TypeError("Got inappropriate padding arg")
|
| 383 |
+
if not isinstance(fill, (int, float)):
|
| 384 |
+
raise TypeError("Got inappropriate fill arg")
|
| 385 |
+
if not isinstance(padding_mode, str):
|
| 386 |
+
raise TypeError("Got inappropriate padding_mode arg")
|
| 387 |
+
|
| 388 |
+
if isinstance(padding, tuple):
|
| 389 |
+
padding = list(padding)
|
| 390 |
+
|
| 391 |
+
if isinstance(padding, list):
|
| 392 |
+
# TODO: Jit is failing on loading this op when scripted and saved
|
| 393 |
+
# https://github.com/pytorch/pytorch/issues/81100
|
| 394 |
+
if len(padding) not in [1, 2, 4]:
|
| 395 |
+
raise ValueError(
|
| 396 |
+
f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple"
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
|
| 400 |
+
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
| 401 |
+
|
| 402 |
+
p = _parse_pad_padding(padding)
|
| 403 |
+
|
| 404 |
+
if padding_mode == "edge":
|
| 405 |
+
# remap padding_mode str
|
| 406 |
+
padding_mode = "replicate"
|
| 407 |
+
elif padding_mode == "symmetric":
|
| 408 |
+
# route to another implementation
|
| 409 |
+
return _pad_symmetric(img, p)
|
| 410 |
+
|
| 411 |
+
need_squeeze = False
|
| 412 |
+
if img.ndim < 4:
|
| 413 |
+
img = img.unsqueeze(dim=0)
|
| 414 |
+
need_squeeze = True
|
| 415 |
+
|
| 416 |
+
out_dtype = img.dtype
|
| 417 |
+
need_cast = False
|
| 418 |
+
if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64):
|
| 419 |
+
# Here we temporarily cast input tensor to float
|
| 420 |
+
# until pytorch issue is resolved :
|
| 421 |
+
# https://github.com/pytorch/pytorch/issues/40763
|
| 422 |
+
need_cast = True
|
| 423 |
+
img = img.to(torch.float32)
|
| 424 |
+
|
| 425 |
+
if padding_mode in ("reflect", "replicate"):
|
| 426 |
+
img = torch_pad(img, p, mode=padding_mode)
|
| 427 |
+
else:
|
| 428 |
+
img = torch_pad(img, p, mode=padding_mode, value=float(fill))
|
| 429 |
+
|
| 430 |
+
if need_squeeze:
|
| 431 |
+
img = img.squeeze(dim=0)
|
| 432 |
+
|
| 433 |
+
if need_cast:
|
| 434 |
+
img = img.to(out_dtype)
|
| 435 |
+
|
| 436 |
+
return img
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def resize(
|
| 440 |
+
img: Tensor,
|
| 441 |
+
size: List[int],
|
| 442 |
+
interpolation: str = "bilinear",
|
| 443 |
+
antialias: Optional[bool] = True,
|
| 444 |
+
) -> Tensor:
|
| 445 |
+
_assert_image_tensor(img)
|
| 446 |
+
|
| 447 |
+
if isinstance(size, tuple):
|
| 448 |
+
size = list(size)
|
| 449 |
+
|
| 450 |
+
if antialias is None:
|
| 451 |
+
antialias = False
|
| 452 |
+
|
| 453 |
+
if antialias and interpolation not in ["bilinear", "bicubic"]:
|
| 454 |
+
# We manually set it to False to avoid an error downstream in interpolate()
|
| 455 |
+
# This behaviour is documented: the parameter is irrelevant for modes
|
| 456 |
+
# that are not bilinear or bicubic. We used to raise an error here, but
|
| 457 |
+
# now we don't as True is the default.
|
| 458 |
+
antialias = False
|
| 459 |
+
|
| 460 |
+
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [torch.float32, torch.float64])
|
| 461 |
+
|
| 462 |
+
# Define align_corners to avoid warnings
|
| 463 |
+
align_corners = False if interpolation in ["bilinear", "bicubic"] else None
|
| 464 |
+
|
| 465 |
+
img = interpolate(img, size=size, mode=interpolation, align_corners=align_corners, antialias=antialias)
|
| 466 |
+
|
| 467 |
+
if interpolation == "bicubic" and out_dtype == torch.uint8:
|
| 468 |
+
img = img.clamp(min=0, max=255)
|
| 469 |
+
|
| 470 |
+
img = _cast_squeeze_out(img, need_cast=need_cast, need_squeeze=need_squeeze, out_dtype=out_dtype)
|
| 471 |
+
|
| 472 |
+
return img
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def _assert_grid_transform_inputs(
|
| 476 |
+
img: Tensor,
|
| 477 |
+
matrix: Optional[List[float]],
|
| 478 |
+
interpolation: str,
|
| 479 |
+
fill: Optional[Union[int, float, List[float]]],
|
| 480 |
+
supported_interpolation_modes: List[str],
|
| 481 |
+
coeffs: Optional[List[float]] = None,
|
| 482 |
+
) -> None:
|
| 483 |
+
|
| 484 |
+
if not (isinstance(img, torch.Tensor)):
|
| 485 |
+
raise TypeError("Input img should be Tensor")
|
| 486 |
+
|
| 487 |
+
_assert_image_tensor(img)
|
| 488 |
+
|
| 489 |
+
if matrix is not None and not isinstance(matrix, list):
|
| 490 |
+
raise TypeError("Argument matrix should be a list")
|
| 491 |
+
|
| 492 |
+
if matrix is not None and len(matrix) != 6:
|
| 493 |
+
raise ValueError("Argument matrix should have 6 float values")
|
| 494 |
+
|
| 495 |
+
if coeffs is not None and len(coeffs) != 8:
|
| 496 |
+
raise ValueError("Argument coeffs should have 8 float values")
|
| 497 |
+
|
| 498 |
+
if fill is not None and not isinstance(fill, (int, float, tuple, list)):
|
| 499 |
+
warnings.warn("Argument fill should be either int, float, tuple or list")
|
| 500 |
+
|
| 501 |
+
# Check fill
|
| 502 |
+
num_channels = get_dimensions(img)[0]
|
| 503 |
+
if fill is not None and isinstance(fill, (tuple, list)) and len(fill) > 1 and len(fill) != num_channels:
|
| 504 |
+
msg = (
|
| 505 |
+
"The number of elements in 'fill' cannot broadcast to match the number of "
|
| 506 |
+
"channels of the image ({} != {})"
|
| 507 |
+
)
|
| 508 |
+
raise ValueError(msg.format(len(fill), num_channels))
|
| 509 |
+
|
| 510 |
+
if interpolation not in supported_interpolation_modes:
|
| 511 |
+
raise ValueError(f"Interpolation mode '{interpolation}' is unsupported with Tensor input")
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]:
|
| 515 |
+
need_squeeze = False
|
| 516 |
+
# make image NCHW
|
| 517 |
+
if img.ndim < 4:
|
| 518 |
+
img = img.unsqueeze(dim=0)
|
| 519 |
+
need_squeeze = True
|
| 520 |
+
|
| 521 |
+
out_dtype = img.dtype
|
| 522 |
+
need_cast = False
|
| 523 |
+
if out_dtype not in req_dtypes:
|
| 524 |
+
need_cast = True
|
| 525 |
+
req_dtype = req_dtypes[0]
|
| 526 |
+
img = img.to(req_dtype)
|
| 527 |
+
return img, need_cast, need_squeeze, out_dtype
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype) -> Tensor:
|
| 531 |
+
if need_squeeze:
|
| 532 |
+
img = img.squeeze(dim=0)
|
| 533 |
+
|
| 534 |
+
if need_cast:
|
| 535 |
+
if out_dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
|
| 536 |
+
# it is better to round before cast
|
| 537 |
+
img = torch.round(img)
|
| 538 |
+
img = img.to(out_dtype)
|
| 539 |
+
|
| 540 |
+
return img
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
def _apply_grid_transform(
|
| 544 |
+
img: Tensor, grid: Tensor, mode: str, fill: Optional[Union[int, float, List[float]]]
|
| 545 |
+
) -> Tensor:
|
| 546 |
+
|
| 547 |
+
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype])
|
| 548 |
+
|
| 549 |
+
if img.shape[0] > 1:
|
| 550 |
+
# Apply same grid to a batch of images
|
| 551 |
+
grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3])
|
| 552 |
+
|
| 553 |
+
# Append a dummy mask for customized fill colors, should be faster than grid_sample() twice
|
| 554 |
+
if fill is not None:
|
| 555 |
+
mask = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device)
|
| 556 |
+
img = torch.cat((img, mask), dim=1)
|
| 557 |
+
|
| 558 |
+
img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False)
|
| 559 |
+
|
| 560 |
+
# Fill with required color
|
| 561 |
+
if fill is not None:
|
| 562 |
+
mask = img[:, -1:, :, :] # N * 1 * H * W
|
| 563 |
+
img = img[:, :-1, :, :] # N * C * H * W
|
| 564 |
+
mask = mask.expand_as(img)
|
| 565 |
+
fill_list, len_fill = (fill, len(fill)) if isinstance(fill, (tuple, list)) else ([float(fill)], 1)
|
| 566 |
+
fill_img = torch.tensor(fill_list, dtype=img.dtype, device=img.device).view(1, len_fill, 1, 1).expand_as(img)
|
| 567 |
+
if mode == "nearest":
|
| 568 |
+
mask = mask < 0.5
|
| 569 |
+
img[mask] = fill_img[mask]
|
| 570 |
+
else: # 'bilinear'
|
| 571 |
+
img = img * mask + (1.0 - mask) * fill_img
|
| 572 |
+
|
| 573 |
+
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
|
| 574 |
+
return img
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def _gen_affine_grid(
|
| 578 |
+
theta: Tensor,
|
| 579 |
+
w: int,
|
| 580 |
+
h: int,
|
| 581 |
+
ow: int,
|
| 582 |
+
oh: int,
|
| 583 |
+
) -> Tensor:
|
| 584 |
+
# https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/
|
| 585 |
+
# AffineGridGenerator.cpp#L18
|
| 586 |
+
# Difference with AffineGridGenerator is that:
|
| 587 |
+
# 1) we normalize grid values after applying theta
|
| 588 |
+
# 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate
|
| 589 |
+
|
| 590 |
+
d = 0.5
|
| 591 |
+
base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device)
|
| 592 |
+
x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device)
|
| 593 |
+
base_grid[..., 0].copy_(x_grid)
|
| 594 |
+
y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1)
|
| 595 |
+
base_grid[..., 1].copy_(y_grid)
|
| 596 |
+
base_grid[..., 2].fill_(1)
|
| 597 |
+
|
| 598 |
+
rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device)
|
| 599 |
+
output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta)
|
| 600 |
+
return output_grid.view(1, oh, ow, 2)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
def affine(
|
| 604 |
+
img: Tensor,
|
| 605 |
+
matrix: List[float],
|
| 606 |
+
interpolation: str = "nearest",
|
| 607 |
+
fill: Optional[Union[int, float, List[float]]] = None,
|
| 608 |
+
) -> Tensor:
|
| 609 |
+
_assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
|
| 610 |
+
|
| 611 |
+
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
|
| 612 |
+
theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
|
| 613 |
+
shape = img.shape
|
| 614 |
+
# grid will be generated on the same device as theta and img
|
| 615 |
+
grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2])
|
| 616 |
+
return _apply_grid_transform(img, grid, interpolation, fill=fill)
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
|
| 620 |
+
|
| 621 |
+
# Inspired of PIL implementation:
|
| 622 |
+
# https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054
|
| 623 |
+
|
| 624 |
+
# pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points.
|
| 625 |
+
# Points are shifted due to affine matrix torch convention about
|
| 626 |
+
# the center point. Center is (0, 0) for image center pivot point (w * 0.5, h * 0.5)
|
| 627 |
+
pts = torch.tensor(
|
| 628 |
+
[
|
| 629 |
+
[-0.5 * w, -0.5 * h, 1.0],
|
| 630 |
+
[-0.5 * w, 0.5 * h, 1.0],
|
| 631 |
+
[0.5 * w, 0.5 * h, 1.0],
|
| 632 |
+
[0.5 * w, -0.5 * h, 1.0],
|
| 633 |
+
]
|
| 634 |
+
)
|
| 635 |
+
theta = torch.tensor(matrix, dtype=torch.float).view(2, 3)
|
| 636 |
+
new_pts = torch.matmul(pts, theta.T)
|
| 637 |
+
min_vals, _ = new_pts.min(dim=0)
|
| 638 |
+
max_vals, _ = new_pts.max(dim=0)
|
| 639 |
+
|
| 640 |
+
# shift points to [0, w] and [0, h] interval to match PIL results
|
| 641 |
+
min_vals += torch.tensor((w * 0.5, h * 0.5))
|
| 642 |
+
max_vals += torch.tensor((w * 0.5, h * 0.5))
|
| 643 |
+
|
| 644 |
+
# Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0
|
| 645 |
+
tol = 1e-4
|
| 646 |
+
cmax = torch.ceil((max_vals / tol).trunc_() * tol)
|
| 647 |
+
cmin = torch.floor((min_vals / tol).trunc_() * tol)
|
| 648 |
+
size = cmax - cmin
|
| 649 |
+
return int(size[0]), int(size[1]) # w, h
|
| 650 |
+
|
| 651 |
+
|
| 652 |
+
def rotate(
|
| 653 |
+
img: Tensor,
|
| 654 |
+
matrix: List[float],
|
| 655 |
+
interpolation: str = "nearest",
|
| 656 |
+
expand: bool = False,
|
| 657 |
+
fill: Optional[Union[int, float, List[float]]] = None,
|
| 658 |
+
) -> Tensor:
|
| 659 |
+
_assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
|
| 660 |
+
w, h = img.shape[-1], img.shape[-2]
|
| 661 |
+
ow, oh = _compute_affine_output_size(matrix, w, h) if expand else (w, h)
|
| 662 |
+
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
|
| 663 |
+
theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
|
| 664 |
+
# grid will be generated on the same device as theta and img
|
| 665 |
+
grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh)
|
| 666 |
+
|
| 667 |
+
return _apply_grid_transform(img, grid, interpolation, fill=fill)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> Tensor:
|
| 671 |
+
# https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/
|
| 672 |
+
# src/libImaging/Geometry.c#L394
|
| 673 |
+
|
| 674 |
+
#
|
| 675 |
+
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
|
| 676 |
+
# y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
|
| 677 |
+
#
|
| 678 |
+
theta1 = torch.tensor(
|
| 679 |
+
[[[coeffs[0], coeffs[1], coeffs[2]], [coeffs[3], coeffs[4], coeffs[5]]]], dtype=dtype, device=device
|
| 680 |
+
)
|
| 681 |
+
theta2 = torch.tensor([[[coeffs[6], coeffs[7], 1.0], [coeffs[6], coeffs[7], 1.0]]], dtype=dtype, device=device)
|
| 682 |
+
|
| 683 |
+
d = 0.5
|
| 684 |
+
base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device)
|
| 685 |
+
x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device)
|
| 686 |
+
base_grid[..., 0].copy_(x_grid)
|
| 687 |
+
y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1)
|
| 688 |
+
base_grid[..., 1].copy_(y_grid)
|
| 689 |
+
base_grid[..., 2].fill_(1)
|
| 690 |
+
|
| 691 |
+
rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device)
|
| 692 |
+
output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1)
|
| 693 |
+
output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2))
|
| 694 |
+
|
| 695 |
+
output_grid = output_grid1 / output_grid2 - 1.0
|
| 696 |
+
return output_grid.view(1, oh, ow, 2)
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
def perspective(
|
| 700 |
+
img: Tensor,
|
| 701 |
+
perspective_coeffs: List[float],
|
| 702 |
+
interpolation: str = "bilinear",
|
| 703 |
+
fill: Optional[Union[int, float, List[float]]] = None,
|
| 704 |
+
) -> Tensor:
|
| 705 |
+
if not (isinstance(img, torch.Tensor)):
|
| 706 |
+
raise TypeError("Input img should be Tensor.")
|
| 707 |
+
|
| 708 |
+
_assert_image_tensor(img)
|
| 709 |
+
|
| 710 |
+
_assert_grid_transform_inputs(
|
| 711 |
+
img,
|
| 712 |
+
matrix=None,
|
| 713 |
+
interpolation=interpolation,
|
| 714 |
+
fill=fill,
|
| 715 |
+
supported_interpolation_modes=["nearest", "bilinear"],
|
| 716 |
+
coeffs=perspective_coeffs,
|
| 717 |
+
)
|
| 718 |
+
|
| 719 |
+
ow, oh = img.shape[-1], img.shape[-2]
|
| 720 |
+
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
|
| 721 |
+
grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device)
|
| 722 |
+
return _apply_grid_transform(img, grid, interpolation, fill=fill)
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
def _get_gaussian_kernel1d(kernel_size: int, sigma: float, dtype: torch.dtype, device: torch.device) -> Tensor:
|
| 726 |
+
ksize_half = (kernel_size - 1) * 0.5
|
| 727 |
+
|
| 728 |
+
x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, dtype=dtype, device=device)
|
| 729 |
+
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
|
| 730 |
+
kernel1d = pdf / pdf.sum()
|
| 731 |
+
|
| 732 |
+
return kernel1d
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
def _get_gaussian_kernel2d(
|
| 736 |
+
kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
|
| 737 |
+
) -> Tensor:
|
| 738 |
+
kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0], dtype, device)
|
| 739 |
+
kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1], dtype, device)
|
| 740 |
+
kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :])
|
| 741 |
+
return kernel2d
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor:
|
| 745 |
+
if not (isinstance(img, torch.Tensor)):
|
| 746 |
+
raise TypeError(f"img should be Tensor. Got {type(img)}")
|
| 747 |
+
|
| 748 |
+
_assert_image_tensor(img)
|
| 749 |
+
|
| 750 |
+
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
|
| 751 |
+
kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device)
|
| 752 |
+
kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])
|
| 753 |
+
|
| 754 |
+
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype])
|
| 755 |
+
|
| 756 |
+
# padding = (left, right, top, bottom)
|
| 757 |
+
padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
|
| 758 |
+
img = torch_pad(img, padding, mode="reflect")
|
| 759 |
+
img = conv2d(img, kernel, groups=img.shape[-3])
|
| 760 |
+
|
| 761 |
+
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
|
| 762 |
+
return img
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
def invert(img: Tensor) -> Tensor:
|
| 766 |
+
|
| 767 |
+
_assert_image_tensor(img)
|
| 768 |
+
|
| 769 |
+
if img.ndim < 3:
|
| 770 |
+
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
|
| 771 |
+
|
| 772 |
+
_assert_channels(img, [1, 3])
|
| 773 |
+
|
| 774 |
+
return _max_value(img.dtype) - img
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
def posterize(img: Tensor, bits: int) -> Tensor:
|
| 778 |
+
|
| 779 |
+
_assert_image_tensor(img)
|
| 780 |
+
|
| 781 |
+
if img.ndim < 3:
|
| 782 |
+
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
|
| 783 |
+
if img.dtype != torch.uint8:
|
| 784 |
+
raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}")
|
| 785 |
+
|
| 786 |
+
_assert_channels(img, [1, 3])
|
| 787 |
+
mask = -int(2 ** (8 - bits)) # JIT-friendly for: ~(2 ** (8 - bits) - 1)
|
| 788 |
+
return img & mask
|
| 789 |
+
|
| 790 |
+
|
| 791 |
+
def solarize(img: Tensor, threshold: float) -> Tensor:
|
| 792 |
+
|
| 793 |
+
_assert_image_tensor(img)
|
| 794 |
+
|
| 795 |
+
if img.ndim < 3:
|
| 796 |
+
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
|
| 797 |
+
|
| 798 |
+
_assert_channels(img, [1, 3])
|
| 799 |
+
|
| 800 |
+
if threshold > _max_value(img.dtype):
|
| 801 |
+
raise TypeError("Threshold should be less than bound of img.")
|
| 802 |
+
|
| 803 |
+
inverted_img = invert(img)
|
| 804 |
+
return torch.where(img >= threshold, inverted_img, img)
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
def _blurred_degenerate_image(img: Tensor) -> Tensor:
|
| 808 |
+
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
|
| 809 |
+
|
| 810 |
+
kernel = torch.ones((3, 3), dtype=dtype, device=img.device)
|
| 811 |
+
kernel[1, 1] = 5.0
|
| 812 |
+
kernel /= kernel.sum()
|
| 813 |
+
kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])
|
| 814 |
+
|
| 815 |
+
result_tmp, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype])
|
| 816 |
+
result_tmp = conv2d(result_tmp, kernel, groups=result_tmp.shape[-3])
|
| 817 |
+
result_tmp = _cast_squeeze_out(result_tmp, need_cast, need_squeeze, out_dtype)
|
| 818 |
+
|
| 819 |
+
result = img.clone()
|
| 820 |
+
result[..., 1:-1, 1:-1] = result_tmp
|
| 821 |
+
|
| 822 |
+
return result
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
|
| 826 |
+
if sharpness_factor < 0:
|
| 827 |
+
raise ValueError(f"sharpness_factor ({sharpness_factor}) is not non-negative.")
|
| 828 |
+
|
| 829 |
+
_assert_image_tensor(img)
|
| 830 |
+
|
| 831 |
+
_assert_channels(img, [1, 3])
|
| 832 |
+
|
| 833 |
+
if img.size(-1) <= 2 or img.size(-2) <= 2:
|
| 834 |
+
return img
|
| 835 |
+
|
| 836 |
+
return _blend(img, _blurred_degenerate_image(img), sharpness_factor)
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
def autocontrast(img: Tensor) -> Tensor:
|
| 840 |
+
|
| 841 |
+
_assert_image_tensor(img)
|
| 842 |
+
|
| 843 |
+
if img.ndim < 3:
|
| 844 |
+
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
|
| 845 |
+
|
| 846 |
+
_assert_channels(img, [1, 3])
|
| 847 |
+
|
| 848 |
+
bound = _max_value(img.dtype)
|
| 849 |
+
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
|
| 850 |
+
|
| 851 |
+
minimum = img.amin(dim=(-2, -1), keepdim=True).to(dtype)
|
| 852 |
+
maximum = img.amax(dim=(-2, -1), keepdim=True).to(dtype)
|
| 853 |
+
scale = bound / (maximum - minimum)
|
| 854 |
+
eq_idxs = torch.isfinite(scale).logical_not()
|
| 855 |
+
minimum[eq_idxs] = 0
|
| 856 |
+
scale[eq_idxs] = 1
|
| 857 |
+
|
| 858 |
+
return ((img - minimum) * scale).clamp(0, bound).to(img.dtype)
|
| 859 |
+
|
| 860 |
+
|
| 861 |
+
def _scale_channel(img_chan: Tensor) -> Tensor:
|
| 862 |
+
# TODO: we should expect bincount to always be faster than histc, but this
|
| 863 |
+
# isn't always the case. Once
|
| 864 |
+
# https://github.com/pytorch/pytorch/issues/53194 is fixed, remove the if
|
| 865 |
+
# block and only use bincount.
|
| 866 |
+
if img_chan.is_cuda:
|
| 867 |
+
hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255)
|
| 868 |
+
else:
|
| 869 |
+
hist = torch.bincount(img_chan.reshape(-1), minlength=256)
|
| 870 |
+
|
| 871 |
+
nonzero_hist = hist[hist != 0]
|
| 872 |
+
step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode="floor")
|
| 873 |
+
if step == 0:
|
| 874 |
+
return img_chan
|
| 875 |
+
|
| 876 |
+
lut = torch.div(torch.cumsum(hist, 0) + torch.div(step, 2, rounding_mode="floor"), step, rounding_mode="floor")
|
| 877 |
+
lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255)
|
| 878 |
+
|
| 879 |
+
return lut[img_chan.to(torch.int64)].to(torch.uint8)
|
| 880 |
+
|
| 881 |
+
|
| 882 |
+
def _equalize_single_image(img: Tensor) -> Tensor:
|
| 883 |
+
return torch.stack([_scale_channel(img[c]) for c in range(img.size(0))])
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
def equalize(img: Tensor) -> Tensor:
|
| 887 |
+
|
| 888 |
+
_assert_image_tensor(img)
|
| 889 |
+
|
| 890 |
+
if not (3 <= img.ndim <= 4):
|
| 891 |
+
raise TypeError(f"Input image tensor should have 3 or 4 dimensions, but found {img.ndim}")
|
| 892 |
+
if img.dtype != torch.uint8:
|
| 893 |
+
raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}")
|
| 894 |
+
|
| 895 |
+
_assert_channels(img, [1, 3])
|
| 896 |
+
|
| 897 |
+
if img.ndim == 3:
|
| 898 |
+
return _equalize_single_image(img)
|
| 899 |
+
|
| 900 |
+
return torch.stack([_equalize_single_image(x) for x in img])
|
| 901 |
+
|
| 902 |
+
|
| 903 |
+
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
|
| 904 |
+
_assert_image_tensor(tensor)
|
| 905 |
+
|
| 906 |
+
if not tensor.is_floating_point():
|
| 907 |
+
raise TypeError(f"Input tensor should be a float tensor. Got {tensor.dtype}.")
|
| 908 |
+
|
| 909 |
+
if tensor.ndim < 3:
|
| 910 |
+
raise ValueError(
|
| 911 |
+
f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {tensor.size()}"
|
| 912 |
+
)
|
| 913 |
+
|
| 914 |
+
if not inplace:
|
| 915 |
+
tensor = tensor.clone()
|
| 916 |
+
|
| 917 |
+
dtype = tensor.dtype
|
| 918 |
+
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
|
| 919 |
+
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
|
| 920 |
+
if (std == 0).any():
|
| 921 |
+
raise ValueError(f"std evaluated to zero after conversion to {dtype}, leading to division by zero.")
|
| 922 |
+
if mean.ndim == 1:
|
| 923 |
+
mean = mean.view(-1, 1, 1)
|
| 924 |
+
if std.ndim == 1:
|
| 925 |
+
std = std.view(-1, 1, 1)
|
| 926 |
+
return tensor.sub_(mean).div_(std)
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
|
| 930 |
+
_assert_image_tensor(img)
|
| 931 |
+
|
| 932 |
+
if not inplace:
|
| 933 |
+
img = img.clone()
|
| 934 |
+
|
| 935 |
+
img[..., i : i + h, j : j + w] = v
|
| 936 |
+
return img
|
| 937 |
+
|
| 938 |
+
|
| 939 |
+
def _create_identity_grid(size: List[int]) -> Tensor:
|
| 940 |
+
hw_space = [torch.linspace((-s + 1) / s, (s - 1) / s, s) for s in size]
|
| 941 |
+
grid_y, grid_x = torch.meshgrid(hw_space, indexing="ij")
|
| 942 |
+
return torch.stack([grid_x, grid_y], -1).unsqueeze(0) # 1 x H x W x 2
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
def elastic_transform(
|
| 946 |
+
img: Tensor,
|
| 947 |
+
displacement: Tensor,
|
| 948 |
+
interpolation: str = "bilinear",
|
| 949 |
+
fill: Optional[Union[int, float, List[float]]] = None,
|
| 950 |
+
) -> Tensor:
|
| 951 |
+
|
| 952 |
+
if not (isinstance(img, torch.Tensor)):
|
| 953 |
+
raise TypeError(f"img should be Tensor. Got {type(img)}")
|
| 954 |
+
|
| 955 |
+
size = list(img.shape[-2:])
|
| 956 |
+
displacement = displacement.to(img.device)
|
| 957 |
+
|
| 958 |
+
identity_grid = _create_identity_grid(size)
|
| 959 |
+
grid = identity_grid.to(img.device) + displacement
|
| 960 |
+
return _apply_grid_transform(img, grid, interpolation, fill)
|
parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
warnings.warn(
|
| 7 |
+
"The 'torchvision.transforms._functional_video' module is deprecated since 0.12 and will be removed in the future. "
|
| 8 |
+
"Please use the 'torchvision.transforms.functional' module instead."
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _is_tensor_video_clip(clip):
|
| 13 |
+
if not torch.is_tensor(clip):
|
| 14 |
+
raise TypeError("clip should be Tensor. Got %s" % type(clip))
|
| 15 |
+
|
| 16 |
+
if not clip.ndimension() == 4:
|
| 17 |
+
raise ValueError("clip should be 4D. Got %dD" % clip.dim())
|
| 18 |
+
|
| 19 |
+
return True
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def crop(clip, i, j, h, w):
|
| 23 |
+
"""
|
| 24 |
+
Args:
|
| 25 |
+
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
|
| 26 |
+
"""
|
| 27 |
+
if len(clip.size()) != 4:
|
| 28 |
+
raise ValueError("clip should be a 4D tensor")
|
| 29 |
+
return clip[..., i : i + h, j : j + w]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def resize(clip, target_size, interpolation_mode):
|
| 33 |
+
if len(target_size) != 2:
|
| 34 |
+
raise ValueError(f"target size should be tuple (height, width), instead got {target_size}")
|
| 35 |
+
return torch.nn.functional.interpolate(clip, size=target_size, mode=interpolation_mode, align_corners=False)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"):
|
| 39 |
+
"""
|
| 40 |
+
Do spatial cropping and resizing to the video clip
|
| 41 |
+
Args:
|
| 42 |
+
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
|
| 43 |
+
i (int): i in (i,j) i.e coordinates of the upper left corner.
|
| 44 |
+
j (int): j in (i,j) i.e coordinates of the upper left corner.
|
| 45 |
+
h (int): Height of the cropped region.
|
| 46 |
+
w (int): Width of the cropped region.
|
| 47 |
+
size (tuple(int, int)): height and width of resized clip
|
| 48 |
+
Returns:
|
| 49 |
+
clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
|
| 50 |
+
"""
|
| 51 |
+
if not _is_tensor_video_clip(clip):
|
| 52 |
+
raise ValueError("clip should be a 4D torch.tensor")
|
| 53 |
+
clip = crop(clip, i, j, h, w)
|
| 54 |
+
clip = resize(clip, size, interpolation_mode)
|
| 55 |
+
return clip
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def center_crop(clip, crop_size):
|
| 59 |
+
if not _is_tensor_video_clip(clip):
|
| 60 |
+
raise ValueError("clip should be a 4D torch.tensor")
|
| 61 |
+
h, w = clip.size(-2), clip.size(-1)
|
| 62 |
+
th, tw = crop_size
|
| 63 |
+
if h < th or w < tw:
|
| 64 |
+
raise ValueError("height and width must be no smaller than crop_size")
|
| 65 |
+
|
| 66 |
+
i = int(round((h - th) / 2.0))
|
| 67 |
+
j = int(round((w - tw) / 2.0))
|
| 68 |
+
return crop(clip, i, j, th, tw)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def to_tensor(clip):
|
| 72 |
+
"""
|
| 73 |
+
Convert tensor data type from uint8 to float, divide value by 255.0 and
|
| 74 |
+
permute the dimensions of clip tensor
|
| 75 |
+
Args:
|
| 76 |
+
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
|
| 77 |
+
Return:
|
| 78 |
+
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
|
| 79 |
+
"""
|
| 80 |
+
_is_tensor_video_clip(clip)
|
| 81 |
+
if not clip.dtype == torch.uint8:
|
| 82 |
+
raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype))
|
| 83 |
+
return clip.float().permute(3, 0, 1, 2) / 255.0
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def normalize(clip, mean, std, inplace=False):
|
| 87 |
+
"""
|
| 88 |
+
Args:
|
| 89 |
+
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
|
| 90 |
+
mean (tuple): pixel RGB mean. Size is (3)
|
| 91 |
+
std (tuple): pixel standard deviation. Size is (3)
|
| 92 |
+
Returns:
|
| 93 |
+
normalized clip (torch.tensor): Size is (C, T, H, W)
|
| 94 |
+
"""
|
| 95 |
+
if not _is_tensor_video_clip(clip):
|
| 96 |
+
raise ValueError("clip should be a 4D torch.tensor")
|
| 97 |
+
if not inplace:
|
| 98 |
+
clip = clip.clone()
|
| 99 |
+
mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device)
|
| 100 |
+
std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device)
|
| 101 |
+
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
|
| 102 |
+
return clip
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def hflip(clip):
|
| 106 |
+
"""
|
| 107 |
+
Args:
|
| 108 |
+
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
|
| 109 |
+
Returns:
|
| 110 |
+
flipped clip (torch.tensor): Size is (C, T, H, W)
|
| 111 |
+
"""
|
| 112 |
+
if not _is_tensor_video_clip(clip):
|
| 113 |
+
raise ValueError("clip should be a 4D torch.tensor")
|
| 114 |
+
return clip.flip(-1)
|
parrot/lib/python3.10/site-packages/torchvision/transforms/_presets.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This file is part of the private API. Please do not use directly these classes as they will be modified on
|
| 3 |
+
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
|
| 4 |
+
"""
|
| 5 |
+
from typing import Optional, Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn, Tensor
|
| 9 |
+
|
| 10 |
+
from . import functional as F, InterpolationMode
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"ObjectDetection",
|
| 15 |
+
"ImageClassification",
|
| 16 |
+
"VideoClassification",
|
| 17 |
+
"SemanticSegmentation",
|
| 18 |
+
"OpticalFlow",
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ObjectDetection(nn.Module):
|
| 23 |
+
def forward(self, img: Tensor) -> Tensor:
|
| 24 |
+
if not isinstance(img, Tensor):
|
| 25 |
+
img = F.pil_to_tensor(img)
|
| 26 |
+
return F.convert_image_dtype(img, torch.float)
|
| 27 |
+
|
| 28 |
+
def __repr__(self) -> str:
|
| 29 |
+
return self.__class__.__name__ + "()"
|
| 30 |
+
|
| 31 |
+
def describe(self) -> str:
|
| 32 |
+
return (
|
| 33 |
+
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
|
| 34 |
+
"The images are rescaled to ``[0.0, 1.0]``."
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class ImageClassification(nn.Module):
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
*,
|
| 42 |
+
crop_size: int,
|
| 43 |
+
resize_size: int = 256,
|
| 44 |
+
mean: Tuple[float, ...] = (0.485, 0.456, 0.406),
|
| 45 |
+
std: Tuple[float, ...] = (0.229, 0.224, 0.225),
|
| 46 |
+
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
|
| 47 |
+
antialias: Optional[bool] = True,
|
| 48 |
+
) -> None:
|
| 49 |
+
super().__init__()
|
| 50 |
+
self.crop_size = [crop_size]
|
| 51 |
+
self.resize_size = [resize_size]
|
| 52 |
+
self.mean = list(mean)
|
| 53 |
+
self.std = list(std)
|
| 54 |
+
self.interpolation = interpolation
|
| 55 |
+
self.antialias = antialias
|
| 56 |
+
|
| 57 |
+
def forward(self, img: Tensor) -> Tensor:
|
| 58 |
+
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=self.antialias)
|
| 59 |
+
img = F.center_crop(img, self.crop_size)
|
| 60 |
+
if not isinstance(img, Tensor):
|
| 61 |
+
img = F.pil_to_tensor(img)
|
| 62 |
+
img = F.convert_image_dtype(img, torch.float)
|
| 63 |
+
img = F.normalize(img, mean=self.mean, std=self.std)
|
| 64 |
+
return img
|
| 65 |
+
|
| 66 |
+
def __repr__(self) -> str:
|
| 67 |
+
format_string = self.__class__.__name__ + "("
|
| 68 |
+
format_string += f"\n crop_size={self.crop_size}"
|
| 69 |
+
format_string += f"\n resize_size={self.resize_size}"
|
| 70 |
+
format_string += f"\n mean={self.mean}"
|
| 71 |
+
format_string += f"\n std={self.std}"
|
| 72 |
+
format_string += f"\n interpolation={self.interpolation}"
|
| 73 |
+
format_string += "\n)"
|
| 74 |
+
return format_string
|
| 75 |
+
|
| 76 |
+
def describe(self) -> str:
|
| 77 |
+
return (
|
| 78 |
+
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
|
| 79 |
+
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, "
|
| 80 |
+
f"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to "
|
| 81 |
+
f"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``."
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class VideoClassification(nn.Module):
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
*,
|
| 89 |
+
crop_size: Tuple[int, int],
|
| 90 |
+
resize_size: Tuple[int, int],
|
| 91 |
+
mean: Tuple[float, ...] = (0.43216, 0.394666, 0.37645),
|
| 92 |
+
std: Tuple[float, ...] = (0.22803, 0.22145, 0.216989),
|
| 93 |
+
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
|
| 94 |
+
) -> None:
|
| 95 |
+
super().__init__()
|
| 96 |
+
self.crop_size = list(crop_size)
|
| 97 |
+
self.resize_size = list(resize_size)
|
| 98 |
+
self.mean = list(mean)
|
| 99 |
+
self.std = list(std)
|
| 100 |
+
self.interpolation = interpolation
|
| 101 |
+
|
| 102 |
+
def forward(self, vid: Tensor) -> Tensor:
|
| 103 |
+
need_squeeze = False
|
| 104 |
+
if vid.ndim < 5:
|
| 105 |
+
vid = vid.unsqueeze(dim=0)
|
| 106 |
+
need_squeeze = True
|
| 107 |
+
|
| 108 |
+
N, T, C, H, W = vid.shape
|
| 109 |
+
vid = vid.view(-1, C, H, W)
|
| 110 |
+
# We hard-code antialias=False to preserve results after we changed
|
| 111 |
+
# its default from None to True (see
|
| 112 |
+
# https://github.com/pytorch/vision/pull/7160)
|
| 113 |
+
# TODO: we could re-train the video models with antialias=True?
|
| 114 |
+
vid = F.resize(vid, self.resize_size, interpolation=self.interpolation, antialias=False)
|
| 115 |
+
vid = F.center_crop(vid, self.crop_size)
|
| 116 |
+
vid = F.convert_image_dtype(vid, torch.float)
|
| 117 |
+
vid = F.normalize(vid, mean=self.mean, std=self.std)
|
| 118 |
+
H, W = self.crop_size
|
| 119 |
+
vid = vid.view(N, T, C, H, W)
|
| 120 |
+
vid = vid.permute(0, 2, 1, 3, 4) # (N, T, C, H, W) => (N, C, T, H, W)
|
| 121 |
+
|
| 122 |
+
if need_squeeze:
|
| 123 |
+
vid = vid.squeeze(dim=0)
|
| 124 |
+
return vid
|
| 125 |
+
|
| 126 |
+
def __repr__(self) -> str:
|
| 127 |
+
format_string = self.__class__.__name__ + "("
|
| 128 |
+
format_string += f"\n crop_size={self.crop_size}"
|
| 129 |
+
format_string += f"\n resize_size={self.resize_size}"
|
| 130 |
+
format_string += f"\n mean={self.mean}"
|
| 131 |
+
format_string += f"\n std={self.std}"
|
| 132 |
+
format_string += f"\n interpolation={self.interpolation}"
|
| 133 |
+
format_string += "\n)"
|
| 134 |
+
return format_string
|
| 135 |
+
|
| 136 |
+
def describe(self) -> str:
|
| 137 |
+
return (
|
| 138 |
+
"Accepts batched ``(B, T, C, H, W)`` and single ``(T, C, H, W)`` video frame ``torch.Tensor`` objects. "
|
| 139 |
+
f"The frames are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, "
|
| 140 |
+
f"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to "
|
| 141 |
+
f"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``. Finally the output "
|
| 142 |
+
"dimensions are permuted to ``(..., C, T, H, W)`` tensors."
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class SemanticSegmentation(nn.Module):
|
| 147 |
+
def __init__(
|
| 148 |
+
self,
|
| 149 |
+
*,
|
| 150 |
+
resize_size: Optional[int],
|
| 151 |
+
mean: Tuple[float, ...] = (0.485, 0.456, 0.406),
|
| 152 |
+
std: Tuple[float, ...] = (0.229, 0.224, 0.225),
|
| 153 |
+
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
|
| 154 |
+
antialias: Optional[bool] = True,
|
| 155 |
+
) -> None:
|
| 156 |
+
super().__init__()
|
| 157 |
+
self.resize_size = [resize_size] if resize_size is not None else None
|
| 158 |
+
self.mean = list(mean)
|
| 159 |
+
self.std = list(std)
|
| 160 |
+
self.interpolation = interpolation
|
| 161 |
+
self.antialias = antialias
|
| 162 |
+
|
| 163 |
+
def forward(self, img: Tensor) -> Tensor:
|
| 164 |
+
if isinstance(self.resize_size, list):
|
| 165 |
+
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=self.antialias)
|
| 166 |
+
if not isinstance(img, Tensor):
|
| 167 |
+
img = F.pil_to_tensor(img)
|
| 168 |
+
img = F.convert_image_dtype(img, torch.float)
|
| 169 |
+
img = F.normalize(img, mean=self.mean, std=self.std)
|
| 170 |
+
return img
|
| 171 |
+
|
| 172 |
+
def __repr__(self) -> str:
|
| 173 |
+
format_string = self.__class__.__name__ + "("
|
| 174 |
+
format_string += f"\n resize_size={self.resize_size}"
|
| 175 |
+
format_string += f"\n mean={self.mean}"
|
| 176 |
+
format_string += f"\n std={self.std}"
|
| 177 |
+
format_string += f"\n interpolation={self.interpolation}"
|
| 178 |
+
format_string += "\n)"
|
| 179 |
+
return format_string
|
| 180 |
+
|
| 181 |
+
def describe(self) -> str:
|
| 182 |
+
return (
|
| 183 |
+
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
|
| 184 |
+
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
|
| 185 |
+
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
|
| 186 |
+
f"``std={self.std}``."
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class OpticalFlow(nn.Module):
|
| 191 |
+
def forward(self, img1: Tensor, img2: Tensor) -> Tuple[Tensor, Tensor]:
|
| 192 |
+
if not isinstance(img1, Tensor):
|
| 193 |
+
img1 = F.pil_to_tensor(img1)
|
| 194 |
+
if not isinstance(img2, Tensor):
|
| 195 |
+
img2 = F.pil_to_tensor(img2)
|
| 196 |
+
|
| 197 |
+
img1 = F.convert_image_dtype(img1, torch.float)
|
| 198 |
+
img2 = F.convert_image_dtype(img2, torch.float)
|
| 199 |
+
|
| 200 |
+
# map [0, 1] into [-1, 1]
|
| 201 |
+
img1 = F.normalize(img1, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
| 202 |
+
img2 = F.normalize(img2, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
| 203 |
+
|
| 204 |
+
img1 = img1.contiguous()
|
| 205 |
+
img2 = img2.contiguous()
|
| 206 |
+
|
| 207 |
+
return img1, img2
|
| 208 |
+
|
| 209 |
+
def __repr__(self) -> str:
|
| 210 |
+
return self.__class__.__name__ + "()"
|
| 211 |
+
|
| 212 |
+
def describe(self) -> str:
|
| 213 |
+
return (
|
| 214 |
+
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
|
| 215 |
+
"The images are rescaled to ``[-1.0, 1.0]``."
|
| 216 |
+
)
|