diff --git a/parrot/lib/python3.10/site-packages/anyio/streams/__pycache__/memory.cpython-310.pyc b/parrot/lib/python3.10/site-packages/anyio/streams/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3e9fa8543f3f84a5b0b0e2cdea4ae672c613fa0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/anyio/streams/__pycache__/memory.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/__init__.py b/parrot/lib/python3.10/site-packages/tokenizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..efd574298f7733465966fdb8bd13f5a2d9844574 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/__init__.py @@ -0,0 +1,100 @@ +from enum import Enum +from typing import List, Tuple, Union + + +Offsets = Tuple[int, int] + +TextInputSequence = str +"""A :obj:`str` that represents an input sequence """ + +PreTokenizedInputSequence = Union[List[str], Tuple[str]] +"""A pre-tokenized input sequence. Can be one of: + + - A :obj:`List` of :obj:`str` + - A :obj:`Tuple` of :obj:`str` +""" + +TextEncodeInput = Union[ + TextInputSequence, + Tuple[TextInputSequence, TextInputSequence], + List[TextInputSequence], +] +"""Represents a textual input for encoding. Can be either: + + - A single sequence: :data:`~tokenizers.TextInputSequence` + - A pair of sequences: + + - A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence` + - Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2 +""" + +PreTokenizedEncodeInput = Union[ + PreTokenizedInputSequence, + Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence], + List[PreTokenizedInputSequence], +] +"""Represents a pre-tokenized input for encoding. Can be either: + + - A single sequence: :data:`~tokenizers.PreTokenizedInputSequence` + - A pair of sequences: + + - A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence` + - Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2 +""" + +InputSequence = Union[TextInputSequence, PreTokenizedInputSequence] +"""Represents all the possible types of input sequences for encoding. Can be: + + - When ``is_pretokenized=False``: :data:`~TextInputSequence` + - When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence` +""" + +EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput] +"""Represents all the possible types of input for encoding. Can be: + + - When ``is_pretokenized=False``: :data:`~TextEncodeInput` + - When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput` +""" + + +class OffsetReferential(Enum): + ORIGINAL = "original" + NORMALIZED = "normalized" + + +class OffsetType(Enum): + BYTE = "byte" + CHAR = "char" + + +class SplitDelimiterBehavior(Enum): + REMOVED = "removed" + ISOLATED = "isolated" + MERGED_WITH_PREVIOUS = "merged_with_previous" + MERGED_WITH_NEXT = "merged_with_next" + CONTIGUOUS = "contiguous" + + +from .tokenizers import ( + AddedToken, + Encoding, + NormalizedString, + PreTokenizedString, + Regex, + Token, + Tokenizer, + decoders, + models, + normalizers, + pre_tokenizers, + processors, + trainers, + __version__, +) +from .implementations import ( + BertWordPieceTokenizer, + ByteLevelBPETokenizer, + CharBPETokenizer, + SentencePieceBPETokenizer, + SentencePieceUnigramTokenizer, +) diff --git a/parrot/lib/python3.10/site-packages/tokenizers/__init__.pyi b/parrot/lib/python3.10/site-packages/tokenizers/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5dbc665dcf67fa37034de75619eedb9f346e955e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/__init__.pyi @@ -0,0 +1,1200 @@ +# Generated content DO NOT EDIT +class AddedToken: + """ + Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. + It can have special options that defines the way it should behave. + + Args: + content (:obj:`str`): The content of the token + + single_word (:obj:`bool`, defaults to :obj:`False`): + Defines whether this token should only match single words. If :obj:`True`, this + token will never match inside of a word. For example the token ``ing`` would match + on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. + The notion of "`inside of a word`" is defined by the word boundaries pattern in + regular expressions (ie. the token should start and end with word boundaries). + + lstrip (:obj:`bool`, defaults to :obj:`False`): + Defines whether this token should strip all potential whitespaces on its left side. + If :obj:`True`, this token will greedily match any whitespace on its left. For + example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text + ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). + + rstrip (:obj:`bool`, defaults to :obj:`False`): + Defines whether this token should strip all potential whitespaces on its right + side. If :obj:`True`, this token will greedily match any whitespace on its right. + It works just like :obj:`lstrip` but on the right. + + normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): + Defines whether this token should match against the normalized version of the input + text. For example, with the added token ``"yesterday"``, and a normalizer in charge of + lowercasing the text, the token could be extract from the input ``"I saw a lion + Yesterday"``. + special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): + Defines whether this token should be skipped when decoding. + + """ + def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False): + pass + + @property + def content(self): + """ + Get the content of this :obj:`AddedToken` + """ + pass + + @property + def lstrip(self): + """ + Get the value of the :obj:`lstrip` option + """ + pass + + @property + def normalized(self): + """ + Get the value of the :obj:`normalized` option + """ + pass + + @property + def rstrip(self): + """ + Get the value of the :obj:`rstrip` option + """ + pass + + @property + def single_word(self): + """ + Get the value of the :obj:`single_word` option + """ + pass + + @property + def special(self): + """ + Get the value of the :obj:`special` option + """ + pass + +class Encoding: + """ + The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. + """ + @property + def attention_mask(self): + """ + The attention mask + + This indicates to the LM which tokens should be attended to, and which should not. + This is especially important when batching sequences, where we need to applying + padding. + + Returns: + :obj:`List[int]`: The attention mask + """ + pass + + def char_to_token(self, char_pos, sequence_index=0): + """ + Get the token that contains the char at the given position in the input sequence. + + Args: + char_pos (:obj:`int`): + The position of a char in the input string + sequence_index (:obj:`int`, defaults to :obj:`0`): + The index of the sequence that contains the target char + + Returns: + :obj:`int`: The index of the token that contains this char in the encoded sequence + """ + pass + + def char_to_word(self, char_pos, sequence_index=0): + """ + Get the word that contains the char at the given position in the input sequence. + + Args: + char_pos (:obj:`int`): + The position of a char in the input string + sequence_index (:obj:`int`, defaults to :obj:`0`): + The index of the sequence that contains the target char + + Returns: + :obj:`int`: The index of the word that contains this char in the input sequence + """ + pass + + @property + def ids(self): + """ + The generated IDs + + The IDs are the main input to a Language Model. They are the token indices, + the numerical representations that a LM understands. + + Returns: + :obj:`List[int]`: The list of IDs + """ + pass + + @staticmethod + def merge(encodings, growing_offsets=True): + """ + Merge the list of encodings into one final :class:`~tokenizers.Encoding` + + Args: + encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): + The list of encodings that should be merged in one + + growing_offsets (:obj:`bool`, defaults to :obj:`True`): + Whether the offsets should accumulate while merging + + Returns: + :class:`~tokenizers.Encoding`: The resulting Encoding + """ + pass + + @property + def n_sequences(self): + """ + The number of sequences represented + + Returns: + :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` + """ + pass + + @property + def offsets(self): + """ + The offsets associated to each token + + These offsets let's you slice the input string, and thus retrieve the original + part that led to producing the corresponding token. + + Returns: + A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets + """ + pass + + @property + def overflowing(self): + """ + A :obj:`List` of overflowing :class:`~tokenizers.Encoding` + + When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting + the output into as many pieces as required to match the specified maximum length. + This field lets you retrieve all the subsequent pieces. + + When you use pairs of sequences, the overflowing pieces will contain enough + variations to cover all the possible combinations, while respecting the provided + maximum length. + """ + pass + + def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"): + """ + Pad the :class:`~tokenizers.Encoding` at the given length + + Args: + length (:obj:`int`): + The desired length + + direction: (:obj:`str`, defaults to :obj:`right`): + The expected padding direction. Can be either :obj:`right` or :obj:`left` + + pad_id (:obj:`int`, defaults to :obj:`0`): + The ID corresponding to the padding token + + pad_type_id (:obj:`int`, defaults to :obj:`0`): + The type ID corresponding to the padding token + + pad_token (:obj:`str`, defaults to `[PAD]`): + The pad token to use + """ + pass + + @property + def sequence_ids(self): + """ + The generated sequence indices. + + They represent the index of the input sequence associated to each token. + The sequence id can be None if the token is not related to any input sequence, + like for example with special tokens. + + Returns: + A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. + """ + pass + + def set_sequence_id(self, sequence_id): + """ + Set the given sequence index + + Set the given sequence index for the whole range of tokens contained in this + :class:`~tokenizers.Encoding`. + """ + pass + + @property + def special_tokens_mask(self): + """ + The special token mask + + This indicates which tokens are special tokens, and which are not. + + Returns: + :obj:`List[int]`: The special tokens mask + """ + pass + + def token_to_chars(self, token_index): + """ + Get the offsets of the token at the given index. + + The returned offsets are related to the input sequence that contains the + token. In order to determine in which input sequence it belongs, you + must call :meth:`~tokenizers.Encoding.token_to_sequence()`. + + Args: + token_index (:obj:`int`): + The index of a token in the encoded sequence. + + Returns: + :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` + """ + pass + + def token_to_sequence(self, token_index): + """ + Get the index of the sequence represented by the given token. + + In the general use case, this method returns :obj:`0` for a single sequence or + the first sequence of a pair, and :obj:`1` for the second sequence of a pair + + Args: + token_index (:obj:`int`): + The index of a token in the encoded sequence. + + Returns: + :obj:`int`: The sequence id of the given token + """ + pass + + def token_to_word(self, token_index): + """ + Get the index of the word that contains the token in one of the input sequences. + + The returned word index is related to the input sequence that contains + the token. In order to determine in which input sequence it belongs, you + must call :meth:`~tokenizers.Encoding.token_to_sequence()`. + + Args: + token_index (:obj:`int`): + The index of a token in the encoded sequence. + + Returns: + :obj:`int`: The index of the word in the relevant input sequence. + """ + pass + + @property + def tokens(self): + """ + The generated tokens + + They are the string representation of the IDs. + + Returns: + :obj:`List[str]`: The list of tokens + """ + pass + + def truncate(self, max_length, stride=0, direction="right"): + """ + Truncate the :class:`~tokenizers.Encoding` at the given length + + If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating + this information is lost. It will be considered as representing a single sequence. + + Args: + max_length (:obj:`int`): + The desired length + + stride (:obj:`int`, defaults to :obj:`0`): + The length of previous content to be included in each overflowing piece + + direction (:obj:`str`, defaults to :obj:`right`): + Truncate direction + """ + pass + + @property + def type_ids(self): + """ + The generated type IDs + + Generally used for tasks like sequence classification or question answering, + these tokens let the LM know which input sequence corresponds to each tokens. + + Returns: + :obj:`List[int]`: The list of type ids + """ + pass + + @property + def word_ids(self): + """ + The generated word indices. + + They represent the index of the word associated to each token. + When the input is pre-tokenized, they correspond to the ID of the given input label, + otherwise they correspond to the words indices as defined by the + :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. + + For special tokens and such (any token that was generated from something that was + not part of the input), the output is :obj:`None` + + Returns: + A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. + """ + pass + + def word_to_chars(self, word_index, sequence_index=0): + """ + Get the offsets of the word at the given index in one of the input sequences. + + Args: + word_index (:obj:`int`): + The index of a word in one of the input sequences. + sequence_index (:obj:`int`, defaults to :obj:`0`): + The index of the sequence that contains the target word + + Returns: + :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` + """ + pass + + def word_to_tokens(self, word_index, sequence_index=0): + """ + Get the encoded tokens corresponding to the word at the given index + in one of the input sequences. + + Args: + word_index (:obj:`int`): + The index of a word in one of the input sequences. + sequence_index (:obj:`int`, defaults to :obj:`0`): + The index of the sequence that contains the target word + + Returns: + :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` + """ + pass + + @property + def words(self): + """ + The generated word indices. + + .. warning:: + This is deprecated and will be removed in a future version. + Please use :obj:`~tokenizers.Encoding.word_ids` instead. + + They represent the index of the word associated to each token. + When the input is pre-tokenized, they correspond to the ID of the given input label, + otherwise they correspond to the words indices as defined by the + :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. + + For special tokens and such (any token that was generated from something that was + not part of the input), the output is :obj:`None` + + Returns: + A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. + """ + pass + +class NormalizedString: + """ + NormalizedString + + A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. + While making all the requested modifications, it keeps track of the alignment information + between the two versions of the string. + + Args: + sequence: str: + The string sequence used to initialize this NormalizedString + """ + def append(self, s): + """ + Append the given sequence to the string + """ + pass + + def clear(self): + """ + Clears the string + """ + pass + + def filter(self, func): + """ + Filter each character of the string using the given func + """ + pass + + def for_each(self, func): + """ + Calls the given function for each character of the string + """ + pass + + def lowercase(self): + """ + Lowercase the string + """ + pass + + def lstrip(self): + """ + Strip the left of the string + """ + pass + + def map(self, func): + """ + Calls the given function for each character of the string + + Replaces each character of the string using the returned value. Each + returned value **must** be a str of length 1 (ie a character). + """ + pass + + def nfc(self): + """ + Runs the NFC normalization + """ + pass + + def nfd(self): + """ + Runs the NFD normalization + """ + pass + + def nfkc(self): + """ + Runs the NFKC normalization + """ + pass + + def nfkd(self): + """ + Runs the NFKD normalization + """ + pass + + @property + def normalized(self): + """ + The normalized part of the string + """ + pass + + def prepend(self, s): + """ + Prepend the given sequence to the string + """ + pass + + def replace(self, pattern, content): + """ + Replace the content of the given pattern with the provided content + + Args: + pattern: Pattern: + A pattern used to match the string. Usually a string or a Regex + + content: str: + The content to be used as replacement + """ + pass + + def rstrip(self): + """ + Strip the right of the string + """ + pass + + def slice(self, range): + """ + Slice the string using the given range + """ + pass + + def split(self, pattern, behavior): + """ + Split the NormalizedString using the given pattern and the specified behavior + + Args: + pattern: Pattern: + A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` + + behavior: SplitDelimiterBehavior: + The behavior to use when splitting. + Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", + "contiguous" + + Returns: + A list of NormalizedString, representing each split + """ + pass + + def strip(self): + """ + Strip both ends of the string + """ + pass + + def uppercase(self): + """ + Uppercase the string + """ + pass + +class PreTokenizedString: + """ + PreTokenizedString + + Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the + underlying string, while keeping track of the alignment information (offsets). + + The PreTokenizedString manages what we call `splits`. Each split represents a substring + which is a subpart of the original string, with the relevant offsets and tokens. + + When calling one of the methods used to modify the PreTokenizedString (namely one of + `split`, `normalize` or `tokenize), only the `splits` that don't have any associated + tokens will get modified. + + Args: + sequence: str: + The string sequence used to initialize this PreTokenizedString + """ + def __init__(self, sequence): + pass + + def get_splits(self, offset_referential="original", offset_type="char"): + """ + Get the splits currently managed by the PreTokenizedString + + Args: + offset_referential: :obj:`str` + Whether the returned splits should have offsets expressed relative + to the original string, or the normalized one. choices: "original", "normalized". + + offset_type: :obj:`str` + Whether the returned splits should have offsets expressed in bytes or chars. + When slicing an str, we usually want to use chars, which is the default value. + Now in some cases it might be interesting to get these offsets expressed in bytes, + so it is possible to change this here. + choices: "char", "bytes" + + Returns + A list of splits + """ + pass + + def normalize(self, func): + """ + Normalize each split of the `PreTokenizedString` using the given `func` + + Args: + func: Callable[[NormalizedString], None]: + The function used to normalize each underlying split. This function + does not need to return anything, just calling the methods on the provided + NormalizedString allow its modification. + """ + pass + + def split(self, func): + """ + Split the PreTokenizedString using the given `func` + + Args: + func: Callable[[index, NormalizedString], List[NormalizedString]]: + The function used to split each underlying split. + It is expected to return a list of `NormalizedString`, that represent the new + splits. If the given `NormalizedString` does not need any splitting, we can + just return it directly. + In order for the offsets to be tracked accurately, any returned `NormalizedString` + should come from calling either `.split` or `.slice` on the received one. + """ + pass + + def to_encoding(self, type_id=0, word_idx=None): + """ + Return an Encoding generated from this PreTokenizedString + + Args: + type_id: int = 0: + The type_id to be used on the generated Encoding. + + word_idx: Optional[int] = None: + An optional word index to be used for each token of this Encoding. If provided, + all the word indices in the generated Encoding will use this value, instead + of the one automatically tracked during pre-tokenization. + + Returns: + An Encoding + """ + pass + + def tokenize(self, func): + """ + Tokenize each split of the `PreTokenizedString` using the given `func` + + Args: + func: Callable[[str], List[Token]]: + The function used to tokenize each underlying split. This function must return + a list of Token generated from the input str. + """ + pass + +class Regex: + """ + Instantiate a new Regex with the given pattern + """ + def __init__(self, pattern): + pass + +class Token: + pass + +class Tokenizer: + """ + A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input + and outputs an :class:`~tokenizers.Encoding`. + + Args: + model (:class:`~tokenizers.models.Model`): + The core algorithm that this :obj:`Tokenizer` should be using. + + """ + def __init__(self, model): + pass + + def add_special_tokens(self, tokens): + """ + Add the given special tokens to the Tokenizer. + + If these tokens are already part of the vocabulary, it just let the Tokenizer know about + them. If they don't exist, the Tokenizer creates them, giving them a new id. + + These special tokens will never be processed by the model (ie won't be split into + multiple tokens), and they can be removed from the output when decoding. + + Args: + tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): + The list of special tokens we want to add to the vocabulary. Each token can either + be a string or an instance of :class:`~tokenizers.AddedToken` for more + customization. + + Returns: + :obj:`int`: The number of tokens that were created in the vocabulary + """ + pass + + def add_tokens(self, tokens): + """ + Add the given tokens to the vocabulary + + The given tokens are added only if they don't already exist in the vocabulary. + Each token then gets a new attributed id. + + Args: + tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): + The list of tokens we want to add to the vocabulary. Each token can be either a + string or an instance of :class:`~tokenizers.AddedToken` for more customization. + + Returns: + :obj:`int`: The number of tokens that were created in the vocabulary + """ + pass + + def decode(self, ids, skip_special_tokens=True): + """ + Decode the given list of ids back to a string + + This is used to decode anything coming back from a Language Model + + Args: + ids (A :obj:`List/Tuple` of :obj:`int`): + The list of ids that we want to decode + + skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether the special tokens should be removed from the decoded string + + Returns: + :obj:`str`: The decoded string + """ + pass + + def decode_batch(self, sequences, skip_special_tokens=True): + """ + Decode a batch of ids back to their corresponding string + + Args: + sequences (:obj:`List` of :obj:`List[int]`): + The batch of sequences we want to decode + + skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether the special tokens should be removed from the decoded strings + + Returns: + :obj:`List[str]`: A list of decoded strings + """ + pass + + @property + def decoder(self): + """ + The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer + """ + pass + + def enable_padding( + self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None + ): + """ + Enable the padding + + Args: + direction (:obj:`str`, `optional`, defaults to :obj:`right`): + The direction in which to pad. Can be either ``right`` or ``left`` + + pad_to_multiple_of (:obj:`int`, `optional`): + If specified, the padding length should always snap to the next multiple of the + given value. For example if we were going to pad witha length of 250 but + ``pad_to_multiple_of=8`` then we will pad to 256. + + pad_id (:obj:`int`, defaults to 0): + The id to be used when padding + + pad_type_id (:obj:`int`, defaults to 0): + The type id to be used when padding + + pad_token (:obj:`str`, defaults to :obj:`[PAD]`): + The pad token to be used when padding + + length (:obj:`int`, `optional`): + If specified, the length at which to pad. If not specified we pad using the size of + the longest sequence in a batch. + """ + pass + + def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"): + """ + Enable truncation + + Args: + max_length (:obj:`int`): + The max length at which to truncate + + stride (:obj:`int`, `optional`): + The length of the previous first sequence to be included in the overflowing + sequence + + strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): + The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or + ``only_second``. + + direction (:obj:`str`, defaults to :obj:`right`): + Truncate direction + """ + pass + + def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True): + """ + Encode the given sequence and pair. This method can process raw text sequences + as well as already pre-tokenized sequences. + + Example: + Here are some examples of the inputs that are accepted:: + + encode("A single sequence")` + encode("A sequence", "And its pair")` + encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` + encode( + [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], + is_pretokenized=True + ) + + Args: + sequence (:obj:`~tokenizers.InputSequence`): + The main input sequence we want to encode. This sequence can be either raw + text or pre-tokenized, according to the ``is_pretokenized`` argument: + + - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` + - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` + + pair (:obj:`~tokenizers.InputSequence`, `optional`): + An optional input sequence. The expected format is the same that for ``sequence``. + + is_pretokenized (:obj:`bool`, defaults to :obj:`False`): + Whether the input is already pre-tokenized + + add_special_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether to add the special tokens + + Returns: + :class:`~tokenizers.Encoding`: The encoded result + + """ + pass + + def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True): + """ + Encode the given batch of inputs. This method accept both raw text sequences + as well as already pre-tokenized sequences. + + Example: + Here are some examples of the inputs that are accepted:: + + encode_batch([ + "A single sequence", + ("A tuple with a sequence", "And its pair"), + [ "A", "pre", "tokenized", "sequence" ], + ([ "A", "pre", "tokenized", "sequence" ], "And its pair") + ]) + + Args: + input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): + A list of single sequences or pair sequences to encode. Each sequence + can be either raw text or pre-tokenized, according to the ``is_pretokenized`` + argument: + + - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` + - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` + + is_pretokenized (:obj:`bool`, defaults to :obj:`False`): + Whether the input is already pre-tokenized + + add_special_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether to add the special tokens + + Returns: + A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch + + """ + pass + + @property + def encode_special_tokens(self): + """ + Modifies the tokenizer in order to use or not the special tokens + during encoding. + + Args: + value (:obj:`bool`): + Whether to use the special tokens or not + + """ + pass + + @staticmethod + def from_buffer(buffer): + """ + Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. + + Args: + buffer (:obj:`bytes`): + A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` + + Returns: + :class:`~tokenizers.Tokenizer`: The new tokenizer + """ + pass + + @staticmethod + def from_file(path): + """ + Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. + + Args: + path (:obj:`str`): + A path to a local JSON file representing a previously serialized + :class:`~tokenizers.Tokenizer` + + Returns: + :class:`~tokenizers.Tokenizer`: The new tokenizer + """ + pass + + @staticmethod + def from_pretrained(identifier, revision="main", auth_token=None): + """ + Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the + Hugging Face Hub. + + Args: + identifier (:obj:`str`): + The identifier of a Model on the Hugging Face Hub, that contains + a tokenizer.json file + revision (:obj:`str`, defaults to `main`): + A branch or commit id + auth_token (:obj:`str`, `optional`, defaults to `None`): + An optional auth token used to access private repositories on the + Hugging Face Hub + + Returns: + :class:`~tokenizers.Tokenizer`: The new tokenizer + """ + pass + + @staticmethod + def from_str(json): + """ + Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. + + Args: + json (:obj:`str`): + A valid JSON string representing a previously serialized + :class:`~tokenizers.Tokenizer` + + Returns: + :class:`~tokenizers.Tokenizer`: The new tokenizer + """ + pass + + def get_added_tokens_decoder(self): + """ + Get the underlying vocabulary + + Returns: + :obj:`Dict[int, AddedToken]`: The vocabulary + """ + pass + + def get_vocab(self, with_added_tokens=True): + """ + Get the underlying vocabulary + + Args: + with_added_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether to include the added tokens + + Returns: + :obj:`Dict[str, int]`: The vocabulary + """ + pass + + def get_vocab_size(self, with_added_tokens=True): + """ + Get the size of the underlying vocabulary + + Args: + with_added_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether to include the added tokens + + Returns: + :obj:`int`: The size of the vocabulary + """ + pass + + def id_to_token(self, id): + """ + Convert the given id to its corresponding token if it exists + + Args: + id (:obj:`int`): + The id to convert + + Returns: + :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary + """ + pass + + @property + def model(self): + """ + The :class:`~tokenizers.models.Model` in use by the Tokenizer + """ + pass + + def no_padding(self): + """ + Disable padding + """ + pass + + def no_truncation(self): + """ + Disable truncation + """ + pass + + @property + def normalizer(self): + """ + The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer + """ + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + :param is_pair: Boolean indicating if the input would be a single sentence or a pair + :return: + """ + pass + + @property + def padding(self): + """ + Get the current padding parameters + + `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` + + Returns: + (:obj:`dict`, `optional`): + A dict with the current padding parameters if padding is enabled + """ + pass + + def post_process(self, encoding, pair=None, add_special_tokens=True): + """ + Apply all the post-processing steps to the given encodings. + + The various steps are: + + 1. Truncate according to the set truncation params (provided with + :meth:`~tokenizers.Tokenizer.enable_truncation`) + 2. Apply the :class:`~tokenizers.processors.PostProcessor` + 3. Pad according to the set padding params (provided with + :meth:`~tokenizers.Tokenizer.enable_padding`) + + Args: + encoding (:class:`~tokenizers.Encoding`): + The :class:`~tokenizers.Encoding` corresponding to the main sequence. + + pair (:class:`~tokenizers.Encoding`, `optional`): + An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Returns: + :class:`~tokenizers.Encoding`: The final post-processed encoding + """ + pass + + @property + def post_processor(self): + """ + The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer + """ + pass + + @property + def pre_tokenizer(self): + """ + The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer + """ + pass + + def save(self, path, pretty=True): + """ + Save the :class:`~tokenizers.Tokenizer` to the file at the given path. + + Args: + path (:obj:`str`): + A path to a file in which to save the serialized tokenizer. + + pretty (:obj:`bool`, defaults to :obj:`True`): + Whether the JSON file should be pretty formatted. + """ + pass + + def to_str(self, pretty=False): + """ + Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. + + Args: + pretty (:obj:`bool`, defaults to :obj:`False`): + Whether the JSON string should be pretty formatted. + + Returns: + :obj:`str`: A string representing the serialized Tokenizer + """ + pass + + def token_to_id(self, token): + """ + Convert the given token to its corresponding id if it exists + + Args: + token (:obj:`str`): + The token to convert + + Returns: + :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary + """ + pass + + def train(self, files, trainer=None): + """ + Train the Tokenizer using the given files. + + Reads the files line by line, while keeping all the whitespace, even new lines. + If you want to train from data store in-memory, you can check + :meth:`~tokenizers.Tokenizer.train_from_iterator` + + Args: + files (:obj:`List[str]`): + A list of path to the files that we should use for training + + trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): + An optional trainer that should be used to train our Model + """ + pass + + def train_from_iterator(self, iterator, trainer=None, length=None): + """ + Train the Tokenizer using the provided iterator. + + You can provide anything that is a Python Iterator + + * A list of sequences :obj:`List[str]` + * A generator that yields :obj:`str` or :obj:`List[str]` + * A Numpy array of strings + * ... + + Args: + iterator (:obj:`Iterator`): + Any iterator over strings or list of strings + + trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): + An optional trainer that should be used to train our Model + + length (:obj:`int`, `optional`): + The total number of sequences in the iterator. This is used to + provide meaningful progress tracking + """ + pass + + @property + def truncation(self): + """ + Get the currently set truncation parameters + + `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` + + Returns: + (:obj:`dict`, `optional`): + A dict with the current truncation parameters if truncation is enabled + """ + pass diff --git a/parrot/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6258493ab68f20d137c3bda9a33979b94882c35 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/implementations/__init__.py b/parrot/lib/python3.10/site-packages/tokenizers/implementations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7e775892d04a91d645653ea9015954b7985d3147 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/implementations/__init__.py @@ -0,0 +1,6 @@ +from .base_tokenizer import BaseTokenizer +from .bert_wordpiece import BertWordPieceTokenizer +from .byte_level_bpe import ByteLevelBPETokenizer +from .char_level_bpe import CharBPETokenizer +from .sentencepiece_bpe import SentencePieceBPETokenizer +from .sentencepiece_unigram import SentencePieceUnigramTokenizer diff --git a/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..156a9f113628590601e226a71347b14aadd8c6c7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f3abf1fe6731427a66d8ee4be03f59132263885 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8311c558d7ce96da9ba4ce8b93a9c6a640c5ec2d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py b/parrot/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py new file mode 100644 index 0000000000000000000000000000000000000000..c7e3dbc466259795ed9d168f57d8fcabe947e96e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py @@ -0,0 +1,122 @@ +from typing import Dict, Iterator, List, Optional, Tuple, Union + +from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers +from tokenizers.models import BPE +from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str + +from .base_tokenizer import BaseTokenizer + + +class ByteLevelBPETokenizer(BaseTokenizer): + """ByteLevelBPETokenizer + + Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model + """ + + def __init__( + self, + vocab: Optional[Union[str, Dict[str, int]]] = None, + merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, + add_prefix_space: bool = False, + lowercase: bool = False, + dropout: Optional[float] = None, + unicode_normalizer: Optional[str] = None, + continuing_subword_prefix: Optional[str] = None, + end_of_word_suffix: Optional[str] = None, + trim_offsets: bool = False, + ): + if vocab is not None and merges is not None: + tokenizer = Tokenizer( + BPE( + vocab, + merges, + dropout=dropout, + continuing_subword_prefix=continuing_subword_prefix or "", + end_of_word_suffix=end_of_word_suffix or "", + ) + ) + else: + tokenizer = Tokenizer(BPE()) + + # Check for Unicode normalization first (before everything else) + normalizers = [] + + if unicode_normalizer: + normalizers += [unicode_normalizer_from_str(unicode_normalizer)] + + if lowercase: + normalizers += [Lowercase()] + + # Create the normalizer structure + if len(normalizers) > 0: + if len(normalizers) > 1: + tokenizer.normalizer = Sequence(normalizers) + else: + tokenizer.normalizer = normalizers[0] + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets) + + parameters = { + "model": "ByteLevelBPE", + "add_prefix_space": add_prefix_space, + "lowercase": lowercase, + "dropout": dropout, + "unicode_normalizer": unicode_normalizer, + "continuing_subword_prefix": continuing_subword_prefix, + "end_of_word_suffix": end_of_word_suffix, + "trim_offsets": trim_offsets, + } + + super().__init__(tokenizer, parameters) + + @staticmethod + def from_file(vocab_filename: str, merges_filename: str, **kwargs): + vocab, merges = BPE.read_file(vocab_filename, merges_filename) + return ByteLevelBPETokenizer(vocab, merges, **kwargs) + + def train( + self, + files: Union[str, List[str]], + vocab_size: int = 30000, + min_frequency: int = 2, + show_progress: bool = True, + special_tokens: List[Union[str, AddedToken]] = [], + ): + """Train the model using the given files""" + + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + min_frequency=min_frequency, + show_progress=show_progress, + special_tokens=special_tokens, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), + ) + if isinstance(files, str): + files = [files] + self._tokenizer.train(files, trainer=trainer) + + def train_from_iterator( + self, + iterator: Union[Iterator[str], Iterator[Iterator[str]]], + vocab_size: int = 30000, + min_frequency: int = 2, + show_progress: bool = True, + special_tokens: List[Union[str, AddedToken]] = [], + length: Optional[int] = None, + ): + """Train the model using the given iterator""" + + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + min_frequency=min_frequency, + show_progress=show_progress, + special_tokens=special_tokens, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), + ) + self._tokenizer.train_from_iterator( + iterator, + trainer=trainer, + length=length, + ) diff --git a/parrot/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py b/parrot/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py new file mode 100644 index 0000000000000000000000000000000000000000..1237e85eb688c02f480e9aa968f476a7401f6067 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py @@ -0,0 +1,196 @@ +import json +import os +from typing import Iterator, List, Optional, Union, Tuple + +from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers +from tokenizers.models import Unigram + +from .base_tokenizer import BaseTokenizer + + +class SentencePieceUnigramTokenizer(BaseTokenizer): + """SentencePiece Unigram Tokenizer + + Represents the Unigram algorithm, with the pretokenization used by SentencePiece + """ + + def __init__( + self, + vocab: Optional[List[Tuple[str, float]]] = None, + replacement: str = "▁", + add_prefix_space: bool = True, + ): + if vocab is not None: + # Let Unigram(..) fail if only one of them is None + tokenizer = Tokenizer(Unigram(vocab)) + else: + tokenizer = Tokenizer(Unigram()) + + tokenizer.normalizer = normalizers.Sequence( + [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")] + ) + prepend_scheme = "always" if add_prefix_space else "never" + tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + + parameters = { + "model": "SentencePieceUnigram", + "replacement": replacement, + "add_prefix_space": add_prefix_space, + } + + super().__init__(tokenizer, parameters) + + def train( + self, + files: Union[str, List[str]], + vocab_size: int = 8000, + show_progress: bool = True, + special_tokens: Optional[List[Union[str, AddedToken]]] = None, + initial_alphabet: Optional[List[str]] = None, + unk_token: Optional[str] = None, + ): + """ + Train the model using the given files + + Args: + files (:obj:`List[str]`): + A list of path to the files that we should use for training + vocab_size (:obj:`int`): + The size of the final vocabulary, including all tokens and alphabet. + show_progress (:obj:`bool`): + Whether to show progress bars while training. + special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): + A list of special tokens the model should know of. + initial_alphabet (:obj:`List[str]`, `optional`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + unk_token (:obj:`str`, `optional`): + The unknown token to be used by the model. + """ + + if special_tokens is None: + special_tokens = [] + + if initial_alphabet is None: + initial_alphabet = [] + + trainer = trainers.UnigramTrainer( + vocab_size=vocab_size, + special_tokens=special_tokens, + show_progress=show_progress, + initial_alphabet=initial_alphabet, + unk_token=unk_token, + ) + + if isinstance(files, str): + files = [files] + self._tokenizer.train(files, trainer=trainer) + + def train_from_iterator( + self, + iterator: Union[Iterator[str], Iterator[Iterator[str]]], + vocab_size: int = 8000, + show_progress: bool = True, + special_tokens: Optional[List[Union[str, AddedToken]]] = None, + initial_alphabet: Optional[List[str]] = None, + unk_token: Optional[str] = None, + length: Optional[int] = None, + ): + """ + Train the model using the given iterator + + Args: + iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`): + Any iterator over strings or list of strings + vocab_size (:obj:`int`): + The size of the final vocabulary, including all tokens and alphabet. + show_progress (:obj:`bool`): + Whether to show progress bars while training. + special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): + A list of special tokens the model should know of. + initial_alphabet (:obj:`List[str]`, `optional`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + unk_token (:obj:`str`, `optional`): + The unknown token to be used by the model. + length (:obj:`int`, `optional`): + The total number of sequences in the iterator. This is used to + provide meaningful progress tracking + """ + + if special_tokens is None: + special_tokens = [] + + if initial_alphabet is None: + initial_alphabet = [] + + trainer = trainers.UnigramTrainer( + vocab_size=vocab_size, + special_tokens=special_tokens, + show_progress=show_progress, + initial_alphabet=initial_alphabet, + unk_token=unk_token, + ) + + self._tokenizer.train_from_iterator( + iterator, + trainer=trainer, + length=length, + ) + + @staticmethod + def from_spm(filename: str): + try: + import sys + + sys.path.append(".") + + import sentencepiece_model_pb2 as model + except Exception: + raise Exception( + "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." + ) + + m = model.ModelProto() + m.ParseFromString(open(filename, "rb").read()) + + precompiled_charsmap = m.normalizer_spec.precompiled_charsmap + vocab = [(piece.piece, piece.score) for piece in m.pieces] + unk_id = m.trainer_spec.unk_id + model_type = m.trainer_spec.model_type + byte_fallback = m.trainer_spec.byte_fallback + if model_type != 1: + raise Exception( + "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" + ) + + replacement = "▁" + add_prefix_space = True + + tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback)) + + if precompiled_charsmap: + tokenizer.normalizer = normalizers.Sequence( + [ + normalizers.Precompiled(precompiled_charsmap), + normalizers.Replace(Regex(" {2,}"), " "), + ] + ) + else: + tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")]) + prepend_scheme = "always" if add_prefix_space else "never" + tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + + parameters = { + "model": "SentencePieceUnigram", + } + + obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters) + BaseTokenizer.__init__(obj, tokenizer, parameters) + return obj diff --git a/parrot/lib/python3.10/site-packages/tokenizers/models/__init__.py b/parrot/lib/python3.10/site-packages/tokenizers/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..68ac211aa8032249db6b929ca64f9130c358d40b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/models/__init__.py @@ -0,0 +1,8 @@ +# Generated content DO NOT EDIT +from .. import models + +Model = models.Model +BPE = models.BPE +Unigram = models.Unigram +WordLevel = models.WordLevel +WordPiece = models.WordPiece diff --git a/parrot/lib/python3.10/site-packages/tokenizers/models/__init__.pyi b/parrot/lib/python3.10/site-packages/tokenizers/models/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..955b9a163770bd7c8ded18da904fe8fcd545a301 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/models/__init__.pyi @@ -0,0 +1,591 @@ +# Generated content DO NOT EDIT +class Model: + """ + Base class for all models + + The model represents the actual tokenization algorithm. This is the part that + will contain and manage the learned vocabulary. + + This class cannot be constructed directly. Please use one of the concrete models. + """ + def get_trainer(self): + """ + Get the associated :class:`~tokenizers.trainers.Trainer` + + Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this + :class:`~tokenizers.models.Model`. + + Returns: + :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model + """ + pass + + def id_to_token(self, id): + """ + Get the token associated to an ID + + Args: + id (:obj:`int`): + An ID to convert to a token + + Returns: + :obj:`str`: The token associated to the ID + """ + pass + + def save(self, folder, prefix): + """ + Save the current model + + Save the current model in the given folder, using the given prefix for the various + files that will get created. + Any file with the same name that already exists in this folder will be overwritten. + + Args: + folder (:obj:`str`): + The path to the target folder in which to save the various files + + prefix (:obj:`str`, `optional`): + An optional prefix, used to prefix each file name + + Returns: + :obj:`List[str]`: The list of saved files + """ + pass + + def token_to_id(self, tokens): + """ + Get the ID associated to a token + + Args: + token (:obj:`str`): + A token to convert to an ID + + Returns: + :obj:`int`: The ID associated to the token + """ + pass + + def tokenize(self, sequence): + """ + Tokenize a sequence + + Args: + sequence (:obj:`str`): + A sequence to tokenize + + Returns: + A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens + """ + pass + +class BPE(Model): + """ + An implementation of the BPE (Byte-Pair Encoding) algorithm + + Args: + vocab (:obj:`Dict[str, int]`, `optional`): + A dictionnary of string keys and their ids :obj:`{"am": 0,...}` + + merges (:obj:`List[Tuple[str, str]]`, `optional`): + A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` + + cache_capacity (:obj:`int`, `optional`): + The number of words that the BPE cache can contain. The cache allows + to speed-up the process by keeping the result of the merge operations + for a number of words. + + dropout (:obj:`float`, `optional`): + A float between 0 and 1 that represents the BPE dropout to use. + + unk_token (:obj:`str`, `optional`): + The unknown token to be used by the model. + + continuing_subword_prefix (:obj:`str`, `optional`): + The prefix to attach to subword units that don't represent a beginning of word. + + end_of_word_suffix (:obj:`str`, `optional`): + The suffix to attach to subword units that represent an end of word. + + fuse_unk (:obj:`bool`, `optional`): + Whether to fuse any subsequent unknown tokens into a single one + + byte_fallback (:obj:`bool`, `optional`): + Whether to use spm byte-fallback trick (defaults to False) + + ignore_merges (:obj:`bool`, `optional`): + Whether or not to match tokens with the vocab before using merges. + """ + def __init__( + self, + vocab=None, + merges=None, + cache_capacity=None, + dropout=None, + unk_token=None, + continuing_subword_prefix=None, + end_of_word_suffix=None, + fuse_unk=None, + byte_fallback=False, + ignore_merges=False, + ): + pass + + @staticmethod + def from_file(cls, vocab, merge, **kwargs): + """ + Instantiate a BPE model from the given files. + + This method is roughly equivalent to doing:: + + vocab, merges = BPE.read_file(vocab_filename, merges_filename) + bpe = BPE(vocab, merges) + + If you don't need to keep the :obj:`vocab, merges` values lying around, + this method is more optimized than manually calling + :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` + + Args: + vocab (:obj:`str`): + The path to a :obj:`vocab.json` file + + merges (:obj:`str`): + The path to a :obj:`merges.txt` file + + Returns: + :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files + """ + pass + + def get_trainer(self): + """ + Get the associated :class:`~tokenizers.trainers.Trainer` + + Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this + :class:`~tokenizers.models.Model`. + + Returns: + :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model + """ + pass + + def id_to_token(self, id): + """ + Get the token associated to an ID + + Args: + id (:obj:`int`): + An ID to convert to a token + + Returns: + :obj:`str`: The token associated to the ID + """ + pass + + @staticmethod + def read_file(self, vocab, merges): + """ + Read a :obj:`vocab.json` and a :obj:`merges.txt` files + + This method provides a way to read and parse the content of these files, + returning the relevant data structures. If you want to instantiate some BPE models + from memory, this method gives you the expected input from the standard files. + + Args: + vocab (:obj:`str`): + The path to a :obj:`vocab.json` file + + merges (:obj:`str`): + The path to a :obj:`merges.txt` file + + Returns: + A :obj:`Tuple` with the vocab and the merges: + The vocabulary and merges loaded into memory + """ + pass + + def save(self, folder, prefix): + """ + Save the current model + + Save the current model in the given folder, using the given prefix for the various + files that will get created. + Any file with the same name that already exists in this folder will be overwritten. + + Args: + folder (:obj:`str`): + The path to the target folder in which to save the various files + + prefix (:obj:`str`, `optional`): + An optional prefix, used to prefix each file name + + Returns: + :obj:`List[str]`: The list of saved files + """ + pass + + def token_to_id(self, tokens): + """ + Get the ID associated to a token + + Args: + token (:obj:`str`): + A token to convert to an ID + + Returns: + :obj:`int`: The ID associated to the token + """ + pass + + def tokenize(self, sequence): + """ + Tokenize a sequence + + Args: + sequence (:obj:`str`): + A sequence to tokenize + + Returns: + A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens + """ + pass + +class Unigram(Model): + """ + An implementation of the Unigram algorithm + + Args: + vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): + A list of vocabulary items and their relative score [("am", -0.2442),...] + """ + def __init__(self, vocab, unk_id, byte_fallback): + pass + + def get_trainer(self): + """ + Get the associated :class:`~tokenizers.trainers.Trainer` + + Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this + :class:`~tokenizers.models.Model`. + + Returns: + :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model + """ + pass + + def id_to_token(self, id): + """ + Get the token associated to an ID + + Args: + id (:obj:`int`): + An ID to convert to a token + + Returns: + :obj:`str`: The token associated to the ID + """ + pass + + def save(self, folder, prefix): + """ + Save the current model + + Save the current model in the given folder, using the given prefix for the various + files that will get created. + Any file with the same name that already exists in this folder will be overwritten. + + Args: + folder (:obj:`str`): + The path to the target folder in which to save the various files + + prefix (:obj:`str`, `optional`): + An optional prefix, used to prefix each file name + + Returns: + :obj:`List[str]`: The list of saved files + """ + pass + + def token_to_id(self, tokens): + """ + Get the ID associated to a token + + Args: + token (:obj:`str`): + A token to convert to an ID + + Returns: + :obj:`int`: The ID associated to the token + """ + pass + + def tokenize(self, sequence): + """ + Tokenize a sequence + + Args: + sequence (:obj:`str`): + A sequence to tokenize + + Returns: + A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens + """ + pass + +class WordLevel(Model): + """ + An implementation of the WordLevel algorithm + + Most simple tokenizer model based on mapping tokens to their corresponding id. + + Args: + vocab (:obj:`str`, `optional`): + A dictionnary of string keys and their ids :obj:`{"am": 0,...}` + + unk_token (:obj:`str`, `optional`): + The unknown token to be used by the model. + """ + def __init__(self, vocab, unk_token): + pass + + @staticmethod + def from_file(vocab, unk_token): + """ + Instantiate a WordLevel model from the given file + + This method is roughly equivalent to doing:: + + vocab = WordLevel.read_file(vocab_filename) + wordlevel = WordLevel(vocab) + + If you don't need to keep the :obj:`vocab` values lying around, this method is + more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to + initialize a :class:`~tokenizers.models.WordLevel` + + Args: + vocab (:obj:`str`): + The path to a :obj:`vocab.json` file + + Returns: + :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file + """ + pass + + def get_trainer(self): + """ + Get the associated :class:`~tokenizers.trainers.Trainer` + + Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this + :class:`~tokenizers.models.Model`. + + Returns: + :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model + """ + pass + + def id_to_token(self, id): + """ + Get the token associated to an ID + + Args: + id (:obj:`int`): + An ID to convert to a token + + Returns: + :obj:`str`: The token associated to the ID + """ + pass + + @staticmethod + def read_file(vocab): + """ + Read a :obj:`vocab.json` + + This method provides a way to read and parse the content of a vocabulary file, + returning the relevant data structures. If you want to instantiate some WordLevel models + from memory, this method gives you the expected input from the standard files. + + Args: + vocab (:obj:`str`): + The path to a :obj:`vocab.json` file + + Returns: + :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` + """ + pass + + def save(self, folder, prefix): + """ + Save the current model + + Save the current model in the given folder, using the given prefix for the various + files that will get created. + Any file with the same name that already exists in this folder will be overwritten. + + Args: + folder (:obj:`str`): + The path to the target folder in which to save the various files + + prefix (:obj:`str`, `optional`): + An optional prefix, used to prefix each file name + + Returns: + :obj:`List[str]`: The list of saved files + """ + pass + + def token_to_id(self, tokens): + """ + Get the ID associated to a token + + Args: + token (:obj:`str`): + A token to convert to an ID + + Returns: + :obj:`int`: The ID associated to the token + """ + pass + + def tokenize(self, sequence): + """ + Tokenize a sequence + + Args: + sequence (:obj:`str`): + A sequence to tokenize + + Returns: + A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens + """ + pass + +class WordPiece(Model): + """ + An implementation of the WordPiece algorithm + + Args: + vocab (:obj:`Dict[str, int]`, `optional`): + A dictionnary of string keys and their ids :obj:`{"am": 0,...}` + + unk_token (:obj:`str`, `optional`): + The unknown token to be used by the model. + + max_input_chars_per_word (:obj:`int`, `optional`): + The maximum number of characters to authorize in a single word. + """ + def __init__(self, vocab, unk_token, max_input_chars_per_word): + pass + + @staticmethod + def from_file(vocab, **kwargs): + """ + Instantiate a WordPiece model from the given file + + This method is roughly equivalent to doing:: + + vocab = WordPiece.read_file(vocab_filename) + wordpiece = WordPiece(vocab) + + If you don't need to keep the :obj:`vocab` values lying around, this method is + more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to + initialize a :class:`~tokenizers.models.WordPiece` + + Args: + vocab (:obj:`str`): + The path to a :obj:`vocab.txt` file + + Returns: + :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file + """ + pass + + def get_trainer(self): + """ + Get the associated :class:`~tokenizers.trainers.Trainer` + + Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this + :class:`~tokenizers.models.Model`. + + Returns: + :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model + """ + pass + + def id_to_token(self, id): + """ + Get the token associated to an ID + + Args: + id (:obj:`int`): + An ID to convert to a token + + Returns: + :obj:`str`: The token associated to the ID + """ + pass + + @staticmethod + def read_file(vocab): + """ + Read a :obj:`vocab.txt` file + + This method provides a way to read and parse the content of a standard `vocab.txt` + file as used by the WordPiece Model, returning the relevant data structures. If you + want to instantiate some WordPiece models from memory, this method gives you the + expected input from the standard files. + + Args: + vocab (:obj:`str`): + The path to a :obj:`vocab.txt` file + + Returns: + :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` + """ + pass + + def save(self, folder, prefix): + """ + Save the current model + + Save the current model in the given folder, using the given prefix for the various + files that will get created. + Any file with the same name that already exists in this folder will be overwritten. + + Args: + folder (:obj:`str`): + The path to the target folder in which to save the various files + + prefix (:obj:`str`, `optional`): + An optional prefix, used to prefix each file name + + Returns: + :obj:`List[str]`: The list of saved files + """ + pass + + def token_to_id(self, tokens): + """ + Get the ID associated to a token + + Args: + token (:obj:`str`): + A token to convert to an ID + + Returns: + :obj:`int`: The ID associated to the token + """ + pass + + def tokenize(self, sequence): + """ + Tokenize a sequence + + Args: + sequence (:obj:`str`): + A sequence to tokenize + + Returns: + A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens + """ + pass diff --git a/parrot/lib/python3.10/site-packages/tokenizers/models/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..787fd2571b33c7706bd80cad64a9a015fd3b9b73 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.py b/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15a16f1e268daac5f70292bebe9cfac5243612d9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.py @@ -0,0 +1,29 @@ +from .. import normalizers + + +Normalizer = normalizers.Normalizer +BertNormalizer = normalizers.BertNormalizer +NFD = normalizers.NFD +NFKD = normalizers.NFKD +NFC = normalizers.NFC +NFKC = normalizers.NFKC +Sequence = normalizers.Sequence +Lowercase = normalizers.Lowercase +Prepend = normalizers.Prepend +Strip = normalizers.Strip +StripAccents = normalizers.StripAccents +Nmt = normalizers.Nmt +Precompiled = normalizers.Precompiled +Replace = normalizers.Replace + + +NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD} + + +def unicode_normalizer_from_str(normalizer: str) -> Normalizer: + if normalizer not in NORMALIZERS: + raise ValueError( + "{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys()) + ) + + return NORMALIZERS[normalizer]() diff --git a/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi b/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..507d4473163f7b48af6665af8534b512cf456a7a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi @@ -0,0 +1,595 @@ +# Generated content DO NOT EDIT +class Normalizer: + """ + Base class for all normalizers + + This class is not supposed to be instantiated directly. Instead, any implementation of a + Normalizer will return an instance of this class when instantiated. + """ + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class BertNormalizer(Normalizer): + """ + BertNormalizer + + Takes care of normalizing raw text before giving it to a Bert model. + This includes cleaning the text, handling accents, chinese chars and lowercasing + + Args: + clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to clean the text, by removing any control characters + and replacing all whitespaces by the classic one. + + handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to handle chinese chars by putting spaces around them. + + strip_accents (:obj:`bool`, `optional`): + Whether to strip all accents. If this option is not specified (ie == None), + then it will be determined by the value for `lowercase` (as in the original Bert). + + lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to lowercase. + """ + def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Lowercase(Normalizer): + """ + Lowercase Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class NFC(Normalizer): + """ + NFC Unicode Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class NFD(Normalizer): + """ + NFD Unicode Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class NFKC(Normalizer): + """ + NFKC Unicode Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class NFKD(Normalizer): + """ + NFKD Unicode Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Nmt(Normalizer): + """ + Nmt normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Precompiled(Normalizer): + """ + Precompiled normalizer + Don't use manually it is used for compatiblity for SentencePiece. + """ + def __init__(self, precompiled_charsmap): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Prepend(Normalizer): + """ + Prepend normalizer + """ + def __init__(self, prepend): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Replace(Normalizer): + """ + Replace normalizer + """ + def __init__(self, pattern, content): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Sequence(Normalizer): + """ + Allows concatenating multiple other Normalizer as a Sequence. + All the normalizers run in sequence in the given order + + Args: + normalizers (:obj:`List[Normalizer]`): + A list of Normalizer to be run as a sequence + """ + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Strip(Normalizer): + """ + Strip normalizer + """ + def __init__(self, left=True, right=True): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class StripAccents(Normalizer): + """ + StripAccents normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass diff --git a/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2f55d41d1a90f8a75f6467fde4471a66534f4f3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.py b/parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..06d124037b6d932615fa0d31b02f8ac82ac0b5fc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.py @@ -0,0 +1,9 @@ +# Generated content DO NOT EDIT +from .. import processors + +PostProcessor = processors.PostProcessor +BertProcessing = processors.BertProcessing +ByteLevel = processors.ByteLevel +RobertaProcessing = processors.RobertaProcessing +Sequence = processors.Sequence +TemplateProcessing = processors.TemplateProcessing diff --git a/parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi b/parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5136d02bbc4d391eba1b2feb4882c1f563db92f3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi @@ -0,0 +1,342 @@ +# Generated content DO NOT EDIT +class PostProcessor: + """ + Base class for all post-processors + + This class is not supposed to be instantiated directly. Instead, any implementation of + a PostProcessor will return an instance of this class when instantiated. + """ + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class BertProcessing(PostProcessor): + """ + This post-processor takes care of adding the special tokens needed by + a Bert model: + + - a SEP token + - a CLS token + + Args: + sep (:obj:`Tuple[str, int]`): + A tuple with the string representation of the SEP token, and its id + + cls (:obj:`Tuple[str, int]`): + A tuple with the string representation of the CLS token, and its id + """ + def __init__(self, sep, cls): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class ByteLevel(PostProcessor): + """ + This post-processor takes care of trimming the offsets. + + By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't + want the offsets to include these whitespaces, then this PostProcessor must be used. + + Args: + trim_offsets (:obj:`bool`): + Whether to trim the whitespaces from the produced offsets. + """ + def __init__(self, trim_offsets=True): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class RobertaProcessing(PostProcessor): + """ + This post-processor takes care of adding the special tokens needed by + a Roberta model: + + - a SEP token + - a CLS token + + It also takes care of trimming the offsets. + By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't + want the offsets to include these whitespaces, then this PostProcessor should be initialized + with :obj:`trim_offsets=True` + + Args: + sep (:obj:`Tuple[str, int]`): + A tuple with the string representation of the SEP token, and its id + + cls (:obj:`Tuple[str, int]`): + A tuple with the string representation of the CLS token, and its id + + trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to trim the whitespaces from the produced offsets. + + add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether the add_prefix_space option was enabled during pre-tokenization. This + is relevant because it defines the way the offsets are trimmed out. + """ + def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class Sequence(PostProcessor): + """ + Sequence Processor + + Args: + processors (:obj:`List[PostProcessor]`) + The processors that need to be chained + """ + def __init__(self, processors): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class TemplateProcessing(PostProcessor): + """ + Provides a way to specify templates in order to add the special tokens to each + input sequence as relevant. + + Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to + delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first + sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair + sequences. The final result looks like this: + + - Single sequence: :obj:`[CLS] Hello there [SEP]` + - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` + + With the type ids as following:: + + [CLS] ... [SEP] ... [SEP] + 0 0 0 1 1 + + You can achieve such behavior using a TemplateProcessing:: + + TemplateProcessing( + single="[CLS] $0 [SEP]", + pair="[CLS] $A [SEP] $B:1 [SEP]:1", + special_tokens=[("[CLS]", 1), ("[SEP]", 0)], + ) + + In this example, each input sequence is identified using a ``$`` construct. This identifier + lets us specify each input sequence, and the type_id to use. When nothing is specified, + it uses the default values. Here are the different ways to specify it: + + - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` + - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... + - Specifying both: ``$A:0``, ``$B:1``, ... + + The same construct is used for special tokens: ``(:)?``. + + **Warning**: You must ensure that you are giving the correct tokens/ids as these + will be added to the Encoding without any further check. If the given ids correspond + to something totally different in a `Tokenizer` using this `PostProcessor`, it + might lead to unexpected results. + + Args: + single (:obj:`Template`): + The template used for single sequences + + pair (:obj:`Template`): + The template used when both sequences are specified + + special_tokens (:obj:`Tokens`): + The list of special tokens used in each sequences + + Types: + + Template (:obj:`str` or :obj:`List`): + - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens + - If a :obj:`List[str]` is provided, a list of tokens + + Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): + - A :obj:`Tuple` with both a token and its associated ID, in any order + - A :obj:`dict` with the following keys: + - "id": :obj:`str` => The special token id, as specified in the Template + - "ids": :obj:`List[int]` => The associated IDs + - "tokens": :obj:`List[str]` => The associated tokens + + The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have + the same length. + """ + def __init__(self, single, pair, special_tokens): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass diff --git a/parrot/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1eed6117a81f2ea359d51490acc0d9323a2c7c6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/tools/__init__.py b/parrot/lib/python3.10/site-packages/tokenizers/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f941e2ed39c7d69fa14abff7dcf973d93843ea06 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/tools/__init__.py @@ -0,0 +1 @@ +from .visualizer import Annotation, EncodingVisualizer diff --git a/parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2ff247f0ea1fc4201546715eb67b36e41524435 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55dac308544c333431a3d533394a19d001fa6851 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css b/parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css new file mode 100644 index 0000000000000000000000000000000000000000..f54fde45ada66c902c0b41969d0f40d51c9717da --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css @@ -0,0 +1,170 @@ +.tokenized-text { + width:100%; + padding:2rem; + max-height: 400px; + overflow-y: auto; + box-sizing:border-box; + line-height:4rem; /* Lots of space between lines */ + font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace; + box-shadow: 2px 2px 2px rgba(0,0,0,0.2); + background-color: rgba(0,0,0,0.01); + letter-spacing:2px; /* Give some extra separation between chars */ +} +.non-token{ + /* White space and other things the tokenizer ignores*/ + white-space: pre; + letter-spacing:4px; + border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/ + border-bottom:1px solid #A0A0A0; + line-height: 1rem; + height: calc(100% - 2px); +} + +.token { + white-space: pre; + position:relative; + color:black; + letter-spacing:2px; +} + +.annotation{ + white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */ + border-radius:4px; + position:relative; + width:fit-content; +} +.annotation:before { + /*The before holds the text and the after holds the background*/ + z-index:1000; /* Make sure this is above the background */ + content:attr(data-label); /* The annotations label is on a data attribute */ + color:white; + position:absolute; + font-size:1rem; + text-align:center; + font-weight:bold; + + top:1.75rem; + line-height:0; + left:0; + width:100%; + padding:0.5rem 0; + /* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/ + overflow: hidden; + white-space: nowrap; + text-overflow:ellipsis; +} + +.annotation:after { + content:attr(data-label); /* The content defines the width of the annotation*/ + position:absolute; + font-size:0.75rem; + text-align:center; + font-weight:bold; + text-overflow:ellipsis; + top:1.75rem; + line-height:0; + overflow: hidden; + white-space: nowrap; + + left:0; + width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ + + padding:0.5rem 0; + /* Nast hack below: + We set the annotations color in code because we don't know the colors at css time. + But you can't pass a color as a data attribute to get it into the pseudo element (this thing) + So to get around that, annotations have the color set on them with a style attribute and then we + can get the color with currentColor. + Annotations wrap tokens and tokens set the color back to black + */ + background-color: currentColor; +} +.annotation:hover::after, .annotation:hover::before{ + /* When the user hovers over an annotation expand the label to display in full + */ + min-width: fit-content; +} + +.annotation:hover{ + /* Emphasize the annotation start end with a border on hover*/ + border-color: currentColor; + border: 2px solid; +} +.special-token:not(:empty){ + /* + A none empty special token is like UNK (as opposed to CLS which has no representation in the text ) + */ + position:relative; +} +.special-token:empty::before{ + /* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/ + content:attr(data-stok); + background:#202020; + font-size:0.75rem; + color:white; + margin: 0 0.25rem; + padding: 0.25rem; + border-radius:4px +} + +.special-token:not(:empty):before { + /* Special tokens that have text (UNK) are displayed above the actual text*/ + content:attr(data-stok); + position:absolute; + bottom:1.75rem; + min-width:100%; + width:100%; + height:1rem; + line-height:1rem; + font-size:1rem; + text-align:center; + color:white; + font-weight:bold; + background:#202020; + border-radius:10%; +} +/* +We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations +instead we apply even and odd class at generation time and color them that way + */ +.even-token{ + background:#DCDCDC ; + border: 1px solid #DCDCDC; +} +.odd-token{ + background:#A0A0A0; + border: 1px solid #A0A0A0; +} +.even-token.multi-token,.odd-token.multi-token{ + background: repeating-linear-gradient( + 45deg, + transparent, + transparent 1px, + #ccc 1px, + #ccc 1px + ), + /* on "bottom" */ + linear-gradient( + to bottom, + #FFB6C1, + #999 + ); +} + +.multi-token:hover::after { + content:"This char has more than 1 token"; /* The content defines the width of the annotation*/ + color:white; + background-color: black; + position:absolute; + font-size:0.75rem; + text-align:center; + font-weight:bold; + text-overflow:ellipsis; + top:1.75rem; + line-height:0; + overflow: hidden; + white-space: nowrap; + left:0; + width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ + padding:0.5rem 0; +} diff --git a/parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer.py b/parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c988a6481fd167e0013aa18cb4ff16067b704245 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/tools/visualizer.py @@ -0,0 +1,403 @@ +import itertools +import os +import re +from string import Template +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple + +from tokenizers import Encoding, Tokenizer + + +dirname = os.path.dirname(__file__) +css_filename = os.path.join(dirname, "visualizer-styles.css") +with open(css_filename) as f: + css = f.read() + + +class Annotation: + start: int + end: int + label: int + + def __init__(self, start: int, end: int, label: str): + self.start = start + self.end = end + self.label = label + + +AnnotationList = List[Annotation] +PartialIntList = List[Optional[int]] + + +class CharStateKey(NamedTuple): + token_ix: Optional[int] + anno_ix: Optional[int] + + +class CharState: + char_ix: Optional[int] + + def __init__(self, char_ix): + self.char_ix = char_ix + + self.anno_ix: Optional[int] = None + self.tokens: List[int] = [] + + @property + def token_ix(self): + return self.tokens[0] if len(self.tokens) > 0 else None + + @property + def is_multitoken(self): + """ + BPE tokenizers can output more than one token for a char + """ + return len(self.tokens) > 1 + + def partition_key(self) -> CharStateKey: + return CharStateKey( + token_ix=self.token_ix, + anno_ix=self.anno_ix, + ) + + +class Aligned: + pass + + +class EncodingVisualizer: + """ + Build an EncodingVisualizer + + Args: + + tokenizer (:class:`~tokenizers.Tokenizer`): + A tokenizer instance + + default_to_notebook (:obj:`bool`): + Whether to render html output in a notebook by default + + annotation_converter (:obj:`Callable`, `optional`): + An optional (lambda) function that takes an annotation in any format and returns + an Annotation object + """ + + unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE) + + def __init__( + self, + tokenizer: Tokenizer, + default_to_notebook: bool = True, + annotation_converter: Optional[Callable[[Any], Annotation]] = None, + ): + if default_to_notebook: + try: + from IPython.core.display import HTML, display + except ImportError: + raise Exception( + """We couldn't import IPython utils for html display. + Are you running in a notebook? + You can also pass `default_to_notebook=False` to get back raw HTML + """ + ) + + self.tokenizer = tokenizer + self.default_to_notebook = default_to_notebook + self.annotation_coverter = annotation_converter + pass + + def __call__( + self, + text: str, + annotations: AnnotationList = [], + default_to_notebook: Optional[bool] = None, + ) -> Optional[str]: + """ + Build a visualization of the given text + + Args: + text (:obj:`str`): + The text to tokenize + + annotations (:obj:`List[Annotation]`, `optional`): + An optional list of annotations of the text. The can either be an annotation class + or anything else if you instantiated the visualizer with a converter function + + default_to_notebook (:obj:`bool`, `optional`, defaults to `False`): + If True, will render the html in a notebook. Otherwise returns an html string. + + Returns: + The HTML string if default_to_notebook is False, otherwise (default) returns None and + renders the HTML in the notebook + + """ + final_default_to_notebook = self.default_to_notebook + if default_to_notebook is not None: + final_default_to_notebook = default_to_notebook + if final_default_to_notebook: + try: + from IPython.core.display import HTML, display + except ImportError: + raise Exception( + """We couldn't import IPython utils for html display. + Are you running in a notebook?""" + ) + if self.annotation_coverter is not None: + annotations = list(map(self.annotation_coverter, annotations)) + encoding = self.tokenizer.encode(text) + html = EncodingVisualizer.__make_html(text, encoding, annotations) + if final_default_to_notebook: + display(HTML(html)) + else: + return html + + @staticmethod + def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]: + """ + Generates a color palette for all the labels in a given set of annotations + + Args: + annotations (:obj:`Annotation`): + A list of annotations + + Returns: + :obj:`dict`: A dictionary mapping labels to colors in HSL format + """ + if len(annotations) == 0: + return {} + labels = set(map(lambda x: x.label, annotations)) + num_labels = len(labels) + h_step = int(255 / num_labels) + if h_step < 20: + h_step = 20 + s = 32 + l = 64 # noqa: E741 + h = 10 + colors = {} + + for label in sorted(labels): # sort so we always get the same colors for a given set of labels + colors[label] = f"hsl({h},{s}%,{l}%" + h += h_step + return colors + + @staticmethod + def consecutive_chars_to_html( + consecutive_chars_list: List[CharState], + text: str, + encoding: Encoding, + ): + """ + Converts a list of "consecutive chars" into a single HTML element. + Chars are consecutive if they fall under the same word, token and annotation. + The CharState class is a named tuple with a "partition_key" method that makes it easy to + compare if two chars are consecutive. + + Args: + consecutive_chars_list (:obj:`List[CharState]`): + A list of CharStates that have been grouped together + + text (:obj:`str`): + The original text being processed + + encoding (:class:`~tokenizers.Encoding`): + The encoding returned from the tokenizer + + Returns: + :obj:`str`: The HTML span for a set of consecutive chars + """ + first = consecutive_chars_list[0] + if first.char_ix is None: + # its a special token + stoken = encoding.tokens[first.token_ix] + # special tokens are represented as empty spans. We use the data attribute and css + # magic to display it + return f'' + # We're not in a special token so this group has a start and end. + last = consecutive_chars_list[-1] + start = first.char_ix + end = last.char_ix + 1 + span_text = text[start:end] + css_classes = [] # What css classes will we apply on the resulting span + data_items = {} # What data attributes will we apply on the result span + if first.token_ix is not None: + # We can either be in a token or not (e.g. in white space) + css_classes.append("token") + if first.is_multitoken: + css_classes.append("multi-token") + if first.token_ix % 2: + # We use this to color alternating tokens. + # A token might be split by an annotation that ends in the middle of it, so this + # lets us visually indicate a consecutive token despite its possible splitting in + # the html markup + css_classes.append("odd-token") + else: + # Like above, but a different color so we can see the tokens alternate + css_classes.append("even-token") + if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None: + # This is a special token that is in the text. probably UNK + css_classes.append("special-token") + # TODO is this the right name for the data attribute ? + data_items["stok"] = encoding.tokens[first.token_ix] + else: + # In this case we are looking at a group/single char that is not tokenized. + # e.g. white space + css_classes.append("non-token") + css = f'''class="{' '.join(css_classes)}"''' + data = "" + for key, val in data_items.items(): + data += f' data-{key}="{val}"' + return f"{span_text}" + + @staticmethod + def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str: + char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations) + current_consecutive_chars = [char_states[0]] + prev_anno_ix = char_states[0].anno_ix + spans = [] + label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations) + cur_anno_ix = char_states[0].anno_ix + if cur_anno_ix is not None: + # If we started in an annotation make a span for it + anno = annotations[cur_anno_ix] + label = anno.label + color = label_colors_dict[label] + spans.append(f'') + + for cs in char_states[1:]: + cur_anno_ix = cs.anno_ix + if cur_anno_ix != prev_anno_ix: + # If we've transitioned in or out of an annotation + spans.append( + # Create a span from the current consecutive characters + EncodingVisualizer.consecutive_chars_to_html( + current_consecutive_chars, + text=text, + encoding=encoding, + ) + ) + current_consecutive_chars = [cs] + + if prev_anno_ix is not None: + # if we transitioned out of an annotation close it's span + spans.append("") + if cur_anno_ix is not None: + # If we entered a new annotation make a span for it + anno = annotations[cur_anno_ix] + label = anno.label + color = label_colors_dict[label] + spans.append(f'') + prev_anno_ix = cur_anno_ix + + if cs.partition_key() == current_consecutive_chars[0].partition_key(): + # If the current charchter is in the same "group" as the previous one + current_consecutive_chars.append(cs) + else: + # Otherwise we make a span for the previous group + spans.append( + EncodingVisualizer.consecutive_chars_to_html( + current_consecutive_chars, + text=text, + encoding=encoding, + ) + ) + # An reset the consecutive_char_list to form a new group + current_consecutive_chars = [cs] + # All that's left is to fill out the final span + # TODO I think there is an edge case here where an annotation's span might not close + spans.append( + EncodingVisualizer.consecutive_chars_to_html( + current_consecutive_chars, + text=text, + encoding=encoding, + ) + ) + res = HTMLBody(spans) # Send the list of spans to the body of our html + return res + + @staticmethod + def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList: + """ + Args: + text (:obj:`str`): + The raw text we want to align to + + annotations (:obj:`AnnotationList`): + A (possibly empty) list of annotations + + Returns: + A list of length len(text) whose entry at index i is None if there is no annotation on + charachter i or k, the index of the annotation that covers index i where k is with + respect to the list of annotations + """ + annotation_map = [None] * len(text) + for anno_ix, a in enumerate(annotations): + for i in range(a.start, a.end): + annotation_map[i] = anno_ix + return annotation_map + + @staticmethod + def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]: + """ + For each character in the original text, we emit a tuple representing it's "state": + + * which token_ix it corresponds to + * which word_ix it corresponds to + * which annotation_ix it corresponds to + + Args: + text (:obj:`str`): + The raw text we want to align to + + annotations (:obj:`List[Annotation]`): + A (possibly empty) list of annotations + + encoding: (:class:`~tokenizers.Encoding`): + The encoding returned from the tokenizer + + Returns: + :obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what + it's state is + """ + annotation_map = EncodingVisualizer.__make_anno_map(text, annotations) + # Todo make this a dataclass or named tuple + char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))] + for token_ix, token in enumerate(encoding.tokens): + offsets = encoding.token_to_chars(token_ix) + if offsets is not None: + start, end = offsets + for i in range(start, end): + char_states[i].tokens.append(token_ix) + for char_ix, anno_ix in enumerate(annotation_map): + char_states[char_ix].anno_ix = anno_ix + + return char_states + + +def HTMLBody(children: List[str], css_styles=css) -> str: + """ + Generates the full html with css from a list of html spans + + Args: + children (:obj:`List[str]`): + A list of strings, assumed to be html elements + + css_styles (:obj:`str`, `optional`): + Optional alternative implementation of the css + + Returns: + :obj:`str`: An HTML string with style markup + """ + children_text = "".join(children) + return f""" + + + + + +
+ {children_text} +
+ + + """ diff --git a/parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.py b/parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..22f94c50b7cf63f0b38231ab1ecec88141a678fd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.py @@ -0,0 +1,8 @@ +# Generated content DO NOT EDIT +from .. import trainers + +Trainer = trainers.Trainer +BpeTrainer = trainers.BpeTrainer +UnigramTrainer = trainers.UnigramTrainer +WordLevelTrainer = trainers.WordLevelTrainer +WordPieceTrainer = trainers.WordPieceTrainer diff --git a/parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi b/parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d6c5257188b57df682dd34be5f58237c36363c64 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi @@ -0,0 +1,156 @@ +# Generated content DO NOT EDIT +class Trainer: + """ + Base class for all trainers + + This class is not supposed to be instantiated directly. Instead, any implementation of a + Trainer will return an instance of this class when instantiated. + """ + +class BpeTrainer(Trainer): + """ + Trainer capable of training a BPE model + + Args: + vocab_size (:obj:`int`, `optional`): + The size of the final vocabulary, including all tokens and alphabet. + + min_frequency (:obj:`int`, `optional`): + The minimum frequency a pair should have in order to be merged. + + show_progress (:obj:`bool`, `optional`): + Whether to show progress bars while training. + + special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): + A list of special tokens the model should know of. + + limit_alphabet (:obj:`int`, `optional`): + The maximum different characters to keep in the alphabet. + + initial_alphabet (:obj:`List[str]`, `optional`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + + continuing_subword_prefix (:obj:`str`, `optional`): + A prefix to be used for every subword that is not a beginning-of-word. + + end_of_word_suffix (:obj:`str`, `optional`): + A suffix to be used for every subword that is a end-of-word. + + max_token_length (:obj:`int`, `optional`): + Prevents creating tokens longer than the specified size. + This can help with reducing polluting your vocabulary with + highly repetitive tokens like `======` for wikipedia + + """ + +class UnigramTrainer(Trainer): + """ + Trainer capable of training a Unigram model + + Args: + vocab_size (:obj:`int`): + The size of the final vocabulary, including all tokens and alphabet. + + show_progress (:obj:`bool`): + Whether to show progress bars while training. + + special_tokens (:obj:`List[Union[str, AddedToken]]`): + A list of special tokens the model should know of. + + initial_alphabet (:obj:`List[str]`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + + shrinking_factor (:obj:`float`): + The shrinking factor used at each step of the training to prune the + vocabulary. + + unk_token (:obj:`str`): + The token used for out-of-vocabulary tokens. + + max_piece_length (:obj:`int`): + The maximum length of a given token. + + n_sub_iterations (:obj:`int`): + The number of iterations of the EM algorithm to perform before + pruning the vocabulary. + """ + def __init__( + self, + vocab_size=8000, + show_progress=True, + special_tokens=[], + shrinking_factor=0.75, + unk_token=None, + max_piece_length=16, + n_sub_iterations=2, + ): + pass + +class WordLevelTrainer(Trainer): + """ + Trainer capable of training a WorldLevel model + + Args: + vocab_size (:obj:`int`, `optional`): + The size of the final vocabulary, including all tokens and alphabet. + + min_frequency (:obj:`int`, `optional`): + The minimum frequency a pair should have in order to be merged. + + show_progress (:obj:`bool`, `optional`): + Whether to show progress bars while training. + + special_tokens (:obj:`List[Union[str, AddedToken]]`): + A list of special tokens the model should know of. + """ + +class WordPieceTrainer(Trainer): + """ + Trainer capable of training a WordPiece model + + Args: + vocab_size (:obj:`int`, `optional`): + The size of the final vocabulary, including all tokens and alphabet. + + min_frequency (:obj:`int`, `optional`): + The minimum frequency a pair should have in order to be merged. + + show_progress (:obj:`bool`, `optional`): + Whether to show progress bars while training. + + special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): + A list of special tokens the model should know of. + + limit_alphabet (:obj:`int`, `optional`): + The maximum different characters to keep in the alphabet. + + initial_alphabet (:obj:`List[str]`, `optional`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + + continuing_subword_prefix (:obj:`str`, `optional`): + A prefix to be used for every subword that is not a beginning-of-word. + + end_of_word_suffix (:obj:`str`, `optional`): + A suffix to be used for every subword that is a end-of-word. + """ + def __init__( + self, + vocab_size=30000, + min_frequency=0, + show_progress=True, + special_tokens=[], + limit_alphabet=None, + initial_alphabet=[], + continuing_subword_prefix="##", + end_of_word_suffix=None, + ): + pass diff --git a/parrot/lib/python3.10/site-packages/tokenizers/trainers/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tokenizers/trainers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac25493ab8d9527c185d1248559c64c3219f74d7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tokenizers/trainers/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/__init__.py b/parrot/lib/python3.10/site-packages/torchvision/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..857625a783c4c59dd0fe5f5fc64f1014a431aa6e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/__init__.py @@ -0,0 +1,105 @@ +import os +import warnings +from modulefinder import Module + +import torch + +# Don't re-order these, we need to load the _C extension (done when importing +# .extensions) before entering _meta_registrations. +from .extension import _HAS_OPS # usort:skip +from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils # usort:skip + +try: + from .version import __version__ # noqa: F401 +except ImportError: + pass + + +# Check if torchvision is being imported within the root folder +if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join( + os.path.realpath(os.getcwd()), "torchvision" +): + message = ( + "You are importing torchvision within its own root folder ({}). " + "This is not expected to work and may give errors. Please exit the " + "torchvision project source and relaunch your python interpreter." + ) + warnings.warn(message.format(os.getcwd())) + +_image_backend = "PIL" + +_video_backend = "pyav" + + +def set_image_backend(backend): + """ + Specifies the package used to load images. + + Args: + backend (string): Name of the image backend. one of {'PIL', 'accimage'}. + The :mod:`accimage` package uses the Intel IPP library. It is + generally faster than PIL, but does not support as many operations. + """ + global _image_backend + if backend not in ["PIL", "accimage"]: + raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'") + _image_backend = backend + + +def get_image_backend(): + """ + Gets the name of the package used to load images + """ + return _image_backend + + +def set_video_backend(backend): + """ + Specifies the package used to decode videos. + + Args: + backend (string): Name of the video backend. one of {'pyav', 'video_reader'}. + The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic + binding for the FFmpeg libraries. + The :mod:`video_reader` package includes a native C++ implementation on + top of FFMPEG libraries, and a python API of TorchScript custom operator. + It generally decodes faster than :mod:`pyav`, but is perhaps less robust. + + .. note:: + Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader' + backend, please compile torchvision from source. + """ + global _video_backend + if backend not in ["pyav", "video_reader", "cuda"]: + raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend) + if backend == "video_reader" and not io._HAS_VIDEO_OPT: + # TODO: better messages + message = "video_reader video backend is not available. Please compile torchvision from source and try again" + raise RuntimeError(message) + elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER: + # TODO: better messages + message = "cuda video backend is not available." + raise RuntimeError(message) + else: + _video_backend = backend + + +def get_video_backend(): + """ + Returns the currently active video backend used to decode videos. + + Returns: + str: Name of the video backend. one of {'pyav', 'video_reader'}. + """ + + return _video_backend + + +def _is_tracing(): + return torch._C._get_tracing_state() + + +def disable_beta_transforms_warning(): + # Noop, only exists to avoid breaking existing code. + # See https://github.com/pytorch/vision/issues/7896 + pass diff --git a/parrot/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py b/parrot/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d9a6e261ea277989f4362037352cb24da6564460 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py @@ -0,0 +1,50 @@ +import importlib.machinery +import os + +from torch.hub import _get_torch_home + + +_HOME = os.path.join(_get_torch_home(), "datasets", "vision") +_USE_SHARDED_DATASETS = False + + +def _download_file_from_remote_location(fpath: str, url: str) -> None: + pass + + +def _is_remote_location_available() -> bool: + return False + + +try: + from torch.hub import load_state_dict_from_url # noqa: 401 +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401 + + +def _get_extension_path(lib_name): + + lib_dir = os.path.dirname(__file__) + if os.name == "nt": + # Register the main torchvision library location on the default DLL path + import ctypes + + kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True) + with_load_library_flags = hasattr(kernel32, "AddDllDirectory") + prev_error_mode = kernel32.SetErrorMode(0x0001) + + if with_load_library_flags: + kernel32.AddDllDirectory.restype = ctypes.c_void_p + + os.add_dll_directory(lib_dir) + + kernel32.SetErrorMode(prev_error_mode) + + loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES) + + extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) + ext_specs = extfinder.find_spec(lib_name) + if ext_specs is None: + raise ImportError + + return ext_specs.origin diff --git a/parrot/lib/python3.10/site-packages/torchvision/_meta_registrations.py b/parrot/lib/python3.10/site-packages/torchvision/_meta_registrations.py new file mode 100644 index 0000000000000000000000000000000000000000..f75bfb77a7f25a1842509de595f109f232994574 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/_meta_registrations.py @@ -0,0 +1,225 @@ +import functools + +import torch +import torch._custom_ops +import torch.library + +# Ensure that torch.ops.torchvision is visible +import torchvision.extension # noqa: F401 + + +@functools.lru_cache(None) +def get_meta_lib(): + return torch.library.Library("torchvision", "IMPL", "Meta") + + +def register_meta(op_name, overload_name="default"): + def wrapper(fn): + if torchvision.extension._has_ops(): + get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn) + return fn + + return wrapper + + +@register_meta("roi_align") +def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned): + torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]") + torch._check( + input.dtype == rois.dtype, + lambda: ( + "Expected tensor for input to have the same type as tensor for rois; " + f"but type {input.dtype} does not equal {rois.dtype}" + ), + ) + num_rois = rois.size(0) + channels = input.size(1) + return input.new_empty((num_rois, channels, pooled_height, pooled_width)) + + +@register_meta("_roi_align_backward") +def meta_roi_align_backward( + grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned +): + torch._check( + grad.dtype == rois.dtype, + lambda: ( + "Expected tensor for grad to have the same type as tensor for rois; " + f"but type {grad.dtype} does not equal {rois.dtype}" + ), + ) + return grad.new_empty((batch_size, channels, height, width)) + + +@register_meta("ps_roi_align") +def meta_ps_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio): + torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]") + torch._check( + input.dtype == rois.dtype, + lambda: ( + "Expected tensor for input to have the same type as tensor for rois; " + f"but type {input.dtype} does not equal {rois.dtype}" + ), + ) + channels = input.size(1) + torch._check( + channels % (pooled_height * pooled_width) == 0, + "input channels must be a multiple of pooling height * pooling width", + ) + + num_rois = rois.size(0) + out_size = (num_rois, channels // (pooled_height * pooled_width), pooled_height, pooled_width) + return input.new_empty(out_size), torch.empty(out_size, dtype=torch.int32, device="meta") + + +@register_meta("_ps_roi_align_backward") +def meta_ps_roi_align_backward( + grad, + rois, + channel_mapping, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + batch_size, + channels, + height, + width, +): + torch._check( + grad.dtype == rois.dtype, + lambda: ( + "Expected tensor for grad to have the same type as tensor for rois; " + f"but type {grad.dtype} does not equal {rois.dtype}" + ), + ) + return grad.new_empty((batch_size, channels, height, width)) + + +@register_meta("roi_pool") +def meta_roi_pool(input, rois, spatial_scale, pooled_height, pooled_width): + torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]") + torch._check( + input.dtype == rois.dtype, + lambda: ( + "Expected tensor for input to have the same type as tensor for rois; " + f"but type {input.dtype} does not equal {rois.dtype}" + ), + ) + num_rois = rois.size(0) + channels = input.size(1) + out_size = (num_rois, channels, pooled_height, pooled_width) + return input.new_empty(out_size), torch.empty(out_size, device="meta", dtype=torch.int32) + + +@register_meta("_roi_pool_backward") +def meta_roi_pool_backward( + grad, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width +): + torch._check( + grad.dtype == rois.dtype, + lambda: ( + "Expected tensor for grad to have the same type as tensor for rois; " + f"but type {grad.dtype} does not equal {rois.dtype}" + ), + ) + return grad.new_empty((batch_size, channels, height, width)) + + +@register_meta("ps_roi_pool") +def meta_ps_roi_pool(input, rois, spatial_scale, pooled_height, pooled_width): + torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]") + torch._check( + input.dtype == rois.dtype, + lambda: ( + "Expected tensor for input to have the same type as tensor for rois; " + f"but type {input.dtype} does not equal {rois.dtype}" + ), + ) + channels = input.size(1) + torch._check( + channels % (pooled_height * pooled_width) == 0, + "input channels must be a multiple of pooling height * pooling width", + ) + num_rois = rois.size(0) + out_size = (num_rois, channels // (pooled_height * pooled_width), pooled_height, pooled_width) + return input.new_empty(out_size), torch.empty(out_size, device="meta", dtype=torch.int32) + + +@register_meta("_ps_roi_pool_backward") +def meta_ps_roi_pool_backward( + grad, rois, channel_mapping, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width +): + torch._check( + grad.dtype == rois.dtype, + lambda: ( + "Expected tensor for grad to have the same type as tensor for rois; " + f"but type {grad.dtype} does not equal {rois.dtype}" + ), + ) + return grad.new_empty((batch_size, channels, height, width)) + + +@torch.library.register_fake("torchvision::nms") +def meta_nms(dets, scores, iou_threshold): + torch._check(dets.dim() == 2, lambda: f"boxes should be a 2d tensor, got {dets.dim()}D") + torch._check(dets.size(1) == 4, lambda: f"boxes should have 4 elements in dimension 1, got {dets.size(1)}") + torch._check(scores.dim() == 1, lambda: f"scores should be a 1d tensor, got {scores.dim()}") + torch._check( + dets.size(0) == scores.size(0), + lambda: f"boxes and scores should have same number of elements in dimension 0, got {dets.size(0)} and {scores.size(0)}", + ) + ctx = torch._custom_ops.get_ctx() + num_to_keep = ctx.create_unbacked_symint() + return dets.new_empty(num_to_keep, dtype=torch.long) + + +@register_meta("deform_conv2d") +def meta_deform_conv2d( + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dil_h, + dil_w, + n_weight_grps, + n_offset_grps, + use_mask, +): + + out_height, out_width = offset.shape[-2:] + out_channels = weight.shape[0] + batch_size = input.shape[0] + return input.new_empty((batch_size, out_channels, out_height, out_width)) + + +@register_meta("_deform_conv2d_backward") +def meta_deform_conv2d_backward( + grad, + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask, +): + + grad_input = input.new_empty(input.shape) + grad_weight = weight.new_empty(weight.shape) + grad_offset = offset.new_empty(offset.shape) + grad_mask = mask.new_empty(mask.shape) + grad_bias = bias.new_empty(bias.shape) + return grad_input, grad_weight, grad_offset, grad_mask, grad_bias diff --git a/parrot/lib/python3.10/site-packages/torchvision/_utils.py b/parrot/lib/python3.10/site-packages/torchvision/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b739ef0966e9b6fac4574f3d6f04051799f75a16 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/_utils.py @@ -0,0 +1,32 @@ +import enum +from typing import Sequence, Type, TypeVar + +T = TypeVar("T", bound=enum.Enum) + + +class StrEnumMeta(enum.EnumMeta): + auto = enum.auto + + def from_str(self: Type[T], member: str) -> T: # type: ignore[misc] + try: + return self[member] + except KeyError: + # TODO: use `add_suggestion` from torchvision.prototype.utils._internal to improve the error message as + # soon as it is migrated. + raise ValueError(f"Unknown value '{member}' for {self.__name__}.") from None + + +class StrEnum(enum.Enum, metaclass=StrEnumMeta): + pass + + +def sequence_to_str(seq: Sequence, separate_last: str = "") -> str: + if not seq: + return "" + if len(seq) == 1: + return f"'{seq[0]}'" + + head = "'" + "', '".join([str(item) for item in seq[:-1]]) + "'" + tail = f"{'' if separate_last and len(seq) == 2 else ','} {separate_last}'{seq[-1]}'" + + return head + tail diff --git a/parrot/lib/python3.10/site-packages/torchvision/extension.py b/parrot/lib/python3.10/site-packages/torchvision/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..67801056e88b44d40bc2d382d62c389bf4ef039e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/extension.py @@ -0,0 +1,92 @@ +import os +import sys + +import torch + +from ._internally_replaced_utils import _get_extension_path + + +_HAS_OPS = False + + +def _has_ops(): + return False + + +try: + # On Windows Python-3.8.x has `os.add_dll_directory` call, + # which is called to configure dll search path. + # To find cuda related dlls we need to make sure the + # conda environment/bin path is configured Please take a look: + # https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python + # Please note: if some path can't be added using add_dll_directory we simply ignore this path + if os.name == "nt" and sys.version_info < (3, 9): + env_path = os.environ["PATH"] + path_arr = env_path.split(";") + for path in path_arr: + if os.path.exists(path): + try: + os.add_dll_directory(path) # type: ignore[attr-defined] + except Exception: + pass + + lib_path = _get_extension_path("_C") + torch.ops.load_library(lib_path) + _HAS_OPS = True + + def _has_ops(): # noqa: F811 + return True + +except (ImportError, OSError): + pass + + +def _assert_has_ops(): + if not _has_ops(): + raise RuntimeError( + "Couldn't load custom C++ ops. This can happen if your PyTorch and " + "torchvision versions are incompatible, or if you had errors while compiling " + "torchvision from source. For further information on the compatible versions, check " + "https://github.com/pytorch/vision#installation for the compatibility matrix. " + "Please check your PyTorch version with torch.__version__ and your torchvision " + "version with torchvision.__version__ and verify if they are compatible, and if not " + "please reinstall torchvision so that it matches your PyTorch install." + ) + + +def _check_cuda_version(): + """ + Make sure that CUDA versions match between the pytorch install and torchvision install + """ + if not _HAS_OPS: + return -1 + from torch.version import cuda as torch_version_cuda + + _version = torch.ops.torchvision._cuda_version() + if _version != -1 and torch_version_cuda is not None: + tv_version = str(_version) + if int(tv_version) < 10000: + tv_major = int(tv_version[0]) + tv_minor = int(tv_version[2]) + else: + tv_major = int(tv_version[0:2]) + tv_minor = int(tv_version[3]) + t_version = torch_version_cuda.split(".") + t_major = int(t_version[0]) + t_minor = int(t_version[1]) + if t_major != tv_major: + raise RuntimeError( + "Detected that PyTorch and torchvision were compiled with different CUDA major versions. " + f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has " + f"CUDA Version={tv_major}.{tv_minor}. " + "Please reinstall the torchvision that matches your PyTorch install." + ) + return _version + + +def _load_library(lib_name): + lib_path = _get_extension_path(lib_name) + torch.ops.load_library(lib_path) + + +_check_cuda_version() diff --git a/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbae667c285d3d6f51c88560933fbf392be14698 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_load_gpu_decoder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_load_gpu_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3d710b8da319aad4df6cb28c866ce39d9ce69b5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_load_gpu_decoder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_video_opt.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_video_opt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d8f01e7f1d016f6a87d9025e5dea433c9329243 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/_video_opt.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/image.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc24665041647e39193c6c6554d799af69247dad Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/image.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87e61daccf4b27816ffd51be8a775f6834f74423 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video_reader.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8b653d47974edb0472d3d0ae7af4a858d7364ec Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/io/__pycache__/video_reader.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/io/_video_opt.py b/parrot/lib/python3.10/site-packages/torchvision/io/_video_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..2bd7d11929e15c519f4f5227ed40b937e1460bb7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/io/_video_opt.py @@ -0,0 +1,512 @@ +import math +import warnings +from fractions import Fraction +from typing import Dict, List, Optional, Tuple, Union + +import torch + +from ..extension import _load_library + + +try: + _load_library("video_reader") + _HAS_VIDEO_OPT = True +except (ImportError, OSError): + _HAS_VIDEO_OPT = False + +default_timebase = Fraction(0, 1) + + +# simple class for torch scripting +# the complex Fraction class from fractions module is not scriptable +class Timebase: + __annotations__ = {"numerator": int, "denominator": int} + __slots__ = ["numerator", "denominator"] + + def __init__( + self, + numerator: int, + denominator: int, + ) -> None: + self.numerator = numerator + self.denominator = denominator + + +class VideoMetaData: + __annotations__ = { + "has_video": bool, + "video_timebase": Timebase, + "video_duration": float, + "video_fps": float, + "has_audio": bool, + "audio_timebase": Timebase, + "audio_duration": float, + "audio_sample_rate": float, + } + __slots__ = [ + "has_video", + "video_timebase", + "video_duration", + "video_fps", + "has_audio", + "audio_timebase", + "audio_duration", + "audio_sample_rate", + ] + + def __init__(self) -> None: + self.has_video = False + self.video_timebase = Timebase(0, 1) + self.video_duration = 0.0 + self.video_fps = 0.0 + self.has_audio = False + self.audio_timebase = Timebase(0, 1) + self.audio_duration = 0.0 + self.audio_sample_rate = 0.0 + + +def _validate_pts(pts_range: Tuple[int, int]) -> None: + + if pts_range[0] > pts_range[1] > 0: + raise ValueError( + f"Start pts should not be smaller than end pts, got start pts: {pts_range[0]} and end pts: {pts_range[1]}" + ) + + +def _fill_info( + vtimebase: torch.Tensor, + vfps: torch.Tensor, + vduration: torch.Tensor, + atimebase: torch.Tensor, + asample_rate: torch.Tensor, + aduration: torch.Tensor, +) -> VideoMetaData: + """ + Build update VideoMetaData struct with info about the video + """ + meta = VideoMetaData() + if vtimebase.numel() > 0: + meta.video_timebase = Timebase(int(vtimebase[0].item()), int(vtimebase[1].item())) + timebase = vtimebase[0].item() / float(vtimebase[1].item()) + if vduration.numel() > 0: + meta.has_video = True + meta.video_duration = float(vduration.item()) * timebase + if vfps.numel() > 0: + meta.video_fps = float(vfps.item()) + if atimebase.numel() > 0: + meta.audio_timebase = Timebase(int(atimebase[0].item()), int(atimebase[1].item())) + timebase = atimebase[0].item() / float(atimebase[1].item()) + if aduration.numel() > 0: + meta.has_audio = True + meta.audio_duration = float(aduration.item()) * timebase + if asample_rate.numel() > 0: + meta.audio_sample_rate = float(asample_rate.item()) + + return meta + + +def _align_audio_frames( + aframes: torch.Tensor, aframe_pts: torch.Tensor, audio_pts_range: Tuple[int, int] +) -> torch.Tensor: + start, end = aframe_pts[0], aframe_pts[-1] + num_samples = aframes.size(0) + step_per_aframe = float(end - start + 1) / float(num_samples) + s_idx = 0 + e_idx = num_samples + if start < audio_pts_range[0]: + s_idx = int((audio_pts_range[0] - start) / step_per_aframe) + if audio_pts_range[1] != -1 and end > audio_pts_range[1]: + e_idx = int((audio_pts_range[1] - end) / step_per_aframe) + return aframes[s_idx:e_idx, :] + + +def _read_video_from_file( + filename: str, + seek_frame_margin: float = 0.25, + read_video_stream: bool = True, + video_width: int = 0, + video_height: int = 0, + video_min_dimension: int = 0, + video_max_dimension: int = 0, + video_pts_range: Tuple[int, int] = (0, -1), + video_timebase: Fraction = default_timebase, + read_audio_stream: bool = True, + audio_samples: int = 0, + audio_channels: int = 0, + audio_pts_range: Tuple[int, int] = (0, -1), + audio_timebase: Fraction = default_timebase, +) -> Tuple[torch.Tensor, torch.Tensor, VideoMetaData]: + """ + Reads a video from a file, returning both the video frames and the audio frames + + Args: + filename (str): path to the video file + seek_frame_margin (double, optional): seeking frame in the stream is imprecise. Thus, + when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds + read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0 + video_width/video_height/video_min_dimension/video_max_dimension (int): together decide + the size of decoded frames: + + - When video_width = 0, video_height = 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the original frame resolution + - When video_width = 0, video_height = 0, video_min_dimension != 0, + and video_max_dimension = 0, keep the aspect ratio and resize the + frame so that shorter edge size is video_min_dimension + - When video_width = 0, video_height = 0, video_min_dimension = 0, + and video_max_dimension != 0, keep the aspect ratio and resize + the frame so that longer edge size is video_max_dimension + - When video_width = 0, video_height = 0, video_min_dimension != 0, + and video_max_dimension != 0, resize the frame so that shorter + edge size is video_min_dimension, and longer edge size is + video_max_dimension. The aspect ratio may not be preserved + - When video_width = 0, video_height != 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the aspect ratio and resize + the frame so that frame video_height is $video_height + - When video_width != 0, video_height == 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the aspect ratio and resize + the frame so that frame video_width is $video_width + - When video_width != 0, video_height != 0, video_min_dimension = 0, + and video_max_dimension = 0, resize the frame so that frame + video_width and video_height are set to $video_width and + $video_height, respectively + video_pts_range (list(int), optional): the start and end presentation timestamp of video stream + video_timebase (Fraction, optional): a Fraction rational number which denotes timebase in video stream + read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0 + audio_samples (int, optional): audio sampling rate + audio_channels (int optional): audio channels + audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream + audio_timebase (Fraction, optional): a Fraction rational number which denotes time base in audio stream + + Returns + vframes (Tensor[T, H, W, C]): the `T` video frames + aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and + `K` is the number of audio_channels + info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) + and audio_fps (int) + """ + _validate_pts(video_pts_range) + _validate_pts(audio_pts_range) + + result = torch.ops.video_reader.read_video_from_file( + filename, + seek_frame_margin, + 0, # getPtsOnly + read_video_stream, + video_width, + video_height, + video_min_dimension, + video_max_dimension, + video_pts_range[0], + video_pts_range[1], + video_timebase.numerator, + video_timebase.denominator, + read_audio_stream, + audio_samples, + audio_channels, + audio_pts_range[0], + audio_pts_range[1], + audio_timebase.numerator, + audio_timebase.denominator, + ) + vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + if aframes.numel() > 0: + # when audio stream is found + aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range) + return vframes, aframes, info + + +def _read_video_timestamps_from_file(filename: str) -> Tuple[List[int], List[int], VideoMetaData]: + """ + Decode all video- and audio frames in the video. Only pts + (presentation timestamp) is returned. The actual frame pixel data is not + copied. Thus, it is much faster than read_video(...) + """ + result = torch.ops.video_reader.read_video_from_file( + filename, + 0, # seek_frame_margin + 1, # getPtsOnly + 1, # read_video_stream + 0, # video_width + 0, # video_height + 0, # video_min_dimension + 0, # video_max_dimension + 0, # video_start_pts + -1, # video_end_pts + 0, # video_timebase_num + 1, # video_timebase_den + 1, # read_audio_stream + 0, # audio_samples + 0, # audio_channels + 0, # audio_start_pts + -1, # audio_end_pts + 0, # audio_timebase_num + 1, # audio_timebase_den + ) + _vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + + vframe_pts = vframe_pts.numpy().tolist() + aframe_pts = aframe_pts.numpy().tolist() + return vframe_pts, aframe_pts, info + + +def _probe_video_from_file(filename: str) -> VideoMetaData: + """ + Probe a video file and return VideoMetaData with info about the video + """ + result = torch.ops.video_reader.probe_video_from_file(filename) + vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + return info + + +def _read_video_from_memory( + video_data: torch.Tensor, + seek_frame_margin: float = 0.25, + read_video_stream: int = 1, + video_width: int = 0, + video_height: int = 0, + video_min_dimension: int = 0, + video_max_dimension: int = 0, + video_pts_range: Tuple[int, int] = (0, -1), + video_timebase_numerator: int = 0, + video_timebase_denominator: int = 1, + read_audio_stream: int = 1, + audio_samples: int = 0, + audio_channels: int = 0, + audio_pts_range: Tuple[int, int] = (0, -1), + audio_timebase_numerator: int = 0, + audio_timebase_denominator: int = 1, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Reads a video from memory, returning both the video frames as the audio frames + This function is torchscriptable. + + Args: + video_data (data type could be 1) torch.Tensor, dtype=torch.int8 or 2) python bytes): + compressed video content stored in either 1) torch.Tensor 2) python bytes + seek_frame_margin (double, optional): seeking frame in the stream is imprecise. + Thus, when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds + read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0 + video_width/video_height/video_min_dimension/video_max_dimension (int): together decide + the size of decoded frames: + + - When video_width = 0, video_height = 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the original frame resolution + - When video_width = 0, video_height = 0, video_min_dimension != 0, + and video_max_dimension = 0, keep the aspect ratio and resize the + frame so that shorter edge size is video_min_dimension + - When video_width = 0, video_height = 0, video_min_dimension = 0, + and video_max_dimension != 0, keep the aspect ratio and resize + the frame so that longer edge size is video_max_dimension + - When video_width = 0, video_height = 0, video_min_dimension != 0, + and video_max_dimension != 0, resize the frame so that shorter + edge size is video_min_dimension, and longer edge size is + video_max_dimension. The aspect ratio may not be preserved + - When video_width = 0, video_height != 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the aspect ratio and resize + the frame so that frame video_height is $video_height + - When video_width != 0, video_height == 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the aspect ratio and resize + the frame so that frame video_width is $video_width + - When video_width != 0, video_height != 0, video_min_dimension = 0, + and video_max_dimension = 0, resize the frame so that frame + video_width and video_height are set to $video_width and + $video_height, respectively + video_pts_range (list(int), optional): the start and end presentation timestamp of video stream + video_timebase_numerator / video_timebase_denominator (float, optional): a rational + number which denotes timebase in video stream + read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0 + audio_samples (int, optional): audio sampling rate + audio_channels (int optional): audio audio_channels + audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream + audio_timebase_numerator / audio_timebase_denominator (float, optional): + a rational number which denotes time base in audio stream + + Returns: + vframes (Tensor[T, H, W, C]): the `T` video frames + aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and + `K` is the number of channels + """ + + _validate_pts(video_pts_range) + _validate_pts(audio_pts_range) + + if not isinstance(video_data, torch.Tensor): + with warnings.catch_warnings(): + # Ignore the warning because we actually don't modify the buffer in this function + warnings.filterwarnings("ignore", message="The given buffer is not writable") + video_data = torch.frombuffer(video_data, dtype=torch.uint8) + + result = torch.ops.video_reader.read_video_from_memory( + video_data, + seek_frame_margin, + 0, # getPtsOnly + read_video_stream, + video_width, + video_height, + video_min_dimension, + video_max_dimension, + video_pts_range[0], + video_pts_range[1], + video_timebase_numerator, + video_timebase_denominator, + read_audio_stream, + audio_samples, + audio_channels, + audio_pts_range[0], + audio_pts_range[1], + audio_timebase_numerator, + audio_timebase_denominator, + ) + + vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result + + if aframes.numel() > 0: + # when audio stream is found + aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range) + + return vframes, aframes + + +def _read_video_timestamps_from_memory( + video_data: torch.Tensor, +) -> Tuple[List[int], List[int], VideoMetaData]: + """ + Decode all frames in the video. Only pts (presentation timestamp) is returned. + The actual frame pixel data is not copied. Thus, read_video_timestamps(...) + is much faster than read_video(...) + """ + if not isinstance(video_data, torch.Tensor): + with warnings.catch_warnings(): + # Ignore the warning because we actually don't modify the buffer in this function + warnings.filterwarnings("ignore", message="The given buffer is not writable") + video_data = torch.frombuffer(video_data, dtype=torch.uint8) + result = torch.ops.video_reader.read_video_from_memory( + video_data, + 0, # seek_frame_margin + 1, # getPtsOnly + 1, # read_video_stream + 0, # video_width + 0, # video_height + 0, # video_min_dimension + 0, # video_max_dimension + 0, # video_start_pts + -1, # video_end_pts + 0, # video_timebase_num + 1, # video_timebase_den + 1, # read_audio_stream + 0, # audio_samples + 0, # audio_channels + 0, # audio_start_pts + -1, # audio_end_pts + 0, # audio_timebase_num + 1, # audio_timebase_den + ) + _vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + + vframe_pts = vframe_pts.numpy().tolist() + aframe_pts = aframe_pts.numpy().tolist() + return vframe_pts, aframe_pts, info + + +def _probe_video_from_memory( + video_data: torch.Tensor, +) -> VideoMetaData: + """ + Probe a video in memory and return VideoMetaData with info about the video + This function is torchscriptable + """ + if not isinstance(video_data, torch.Tensor): + with warnings.catch_warnings(): + # Ignore the warning because we actually don't modify the buffer in this function + warnings.filterwarnings("ignore", message="The given buffer is not writable") + video_data = torch.frombuffer(video_data, dtype=torch.uint8) + result = torch.ops.video_reader.probe_video_from_memory(video_data) + vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + return info + + +def _read_video( + filename: str, + start_pts: Union[float, Fraction] = 0, + end_pts: Optional[Union[float, Fraction]] = None, + pts_unit: str = "pts", +) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, float]]: + if end_pts is None: + end_pts = float("inf") + + if pts_unit == "pts": + warnings.warn( + "The pts_unit 'pts' gives wrong results and will be removed in a " + + "follow-up version. Please use pts_unit 'sec'." + ) + + info = _probe_video_from_file(filename) + + has_video = info.has_video + has_audio = info.has_audio + + def get_pts(time_base): + start_offset = start_pts + end_offset = end_pts + if pts_unit == "sec": + start_offset = int(math.floor(start_pts * (1 / time_base))) + if end_offset != float("inf"): + end_offset = int(math.ceil(end_pts * (1 / time_base))) + if end_offset == float("inf"): + end_offset = -1 + return start_offset, end_offset + + video_pts_range = (0, -1) + video_timebase = default_timebase + if has_video: + video_timebase = Fraction(info.video_timebase.numerator, info.video_timebase.denominator) + video_pts_range = get_pts(video_timebase) + + audio_pts_range = (0, -1) + audio_timebase = default_timebase + if has_audio: + audio_timebase = Fraction(info.audio_timebase.numerator, info.audio_timebase.denominator) + audio_pts_range = get_pts(audio_timebase) + + vframes, aframes, info = _read_video_from_file( + filename, + read_video_stream=True, + video_pts_range=video_pts_range, + video_timebase=video_timebase, + read_audio_stream=True, + audio_pts_range=audio_pts_range, + audio_timebase=audio_timebase, + ) + _info = {} + if has_video: + _info["video_fps"] = info.video_fps + if has_audio: + _info["audio_fps"] = info.audio_sample_rate + + return vframes, aframes, _info + + +def _read_video_timestamps( + filename: str, pts_unit: str = "pts" +) -> Tuple[Union[List[int], List[Fraction]], Optional[float]]: + if pts_unit == "pts": + warnings.warn( + "The pts_unit 'pts' gives wrong results and will be removed in a " + + "follow-up version. Please use pts_unit 'sec'." + ) + + pts: Union[List[int], List[Fraction]] + pts, _, info = _read_video_timestamps_from_file(filename) + + if pts_unit == "sec": + video_time_base = Fraction(info.video_timebase.numerator, info.video_timebase.denominator) + pts = [x * video_time_base for x in pts] + + video_fps = info.video_fps if info.has_video else None + + return pts, video_fps diff --git a/parrot/lib/python3.10/site-packages/torchvision/ops/_utils.py b/parrot/lib/python3.10/site-packages/torchvision/ops/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a6ca557a98b899b7c2a11ba0dca3d64730af4268 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/ops/_utils.py @@ -0,0 +1,106 @@ +from typing import List, Optional, Tuple, Union + +import torch +from torch import nn, Tensor + + +def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor: + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + # TODO add back the assert + # assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + + +def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor: + concat_boxes = _cat([b for b in boxes], dim=0) + temp = [] + for i, b in enumerate(boxes): + temp.append(torch.full_like(b[:, :1], i)) + ids = _cat(temp, dim=0) + rois = torch.cat([ids, concat_boxes], dim=1) + return rois + + +def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]): + if isinstance(boxes, (list, tuple)): + for _tensor in boxes: + torch._assert( + _tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]" + ) + elif isinstance(boxes, torch.Tensor): + torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]") + else: + torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]") + return + + +def split_normalization_params( + model: nn.Module, norm_classes: Optional[List[type]] = None +) -> Tuple[List[Tensor], List[Tensor]]: + # Adapted from https://github.com/facebookresearch/ClassyVision/blob/659d7f78/classy_vision/generic/util.py#L501 + if not norm_classes: + norm_classes = [ + nn.modules.batchnorm._BatchNorm, + nn.LayerNorm, + nn.GroupNorm, + nn.modules.instancenorm._InstanceNorm, + nn.LocalResponseNorm, + ] + + for t in norm_classes: + if not issubclass(t, nn.Module): + raise ValueError(f"Class {t} is not a subclass of nn.Module.") + + classes = tuple(norm_classes) + + norm_params = [] + other_params = [] + for module in model.modules(): + if next(module.children(), None): + other_params.extend(p for p in module.parameters(recurse=False) if p.requires_grad) + elif isinstance(module, classes): + norm_params.extend(p for p in module.parameters() if p.requires_grad) + else: + other_params.extend(p for p in module.parameters() if p.requires_grad) + return norm_params, other_params + + +def _upcast(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.is_floating_point(): + return t if t.dtype in (torch.float32, torch.float64) else t.float() + else: + return t if t.dtype in (torch.int32, torch.int64) else t.int() + + +def _upcast_non_float(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.dtype not in (torch.float32, torch.float64): + return t.float() + return t + + +def _loss_inter_union( + boxes1: torch.Tensor, + boxes2: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # Intersection keypoints + xkis1 = torch.max(x1, x1g) + ykis1 = torch.max(y1, y1g) + xkis2 = torch.min(x2, x2g) + ykis2 = torch.min(y2, y2g) + + intsctk = torch.zeros_like(x1) + mask = (ykis2 > ykis1) & (xkis2 > xkis1) + intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) + unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk + + return intsctk, unionk diff --git a/parrot/lib/python3.10/site-packages/torchvision/ops/giou_loss.py b/parrot/lib/python3.10/site-packages/torchvision/ops/giou_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ec8bc8852fe71258cb5a46fdf5428581a23b4c3e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/ops/giou_loss.py @@ -0,0 +1,76 @@ +import torch + +from ..utils import _log_api_usage_once +from ._utils import _loss_inter_union, _upcast_non_float + + +def generalized_box_iou_loss( + boxes1: torch.Tensor, + boxes2: torch.Tensor, + reduction: str = "none", + eps: float = 1e-7, +) -> torch.Tensor: + + """ + Gradient-friendly IoU loss with an additional penalty that is non-zero when the + boxes do not overlap and scales with the size of their smallest enclosing box. + This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable. + + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``, and The two boxes should have the + same dimensions. + + Args: + boxes1 (Tensor[N, 4] or Tensor[4]): first set of boxes + boxes2 (Tensor[N, 4] or Tensor[4]): second set of boxes + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be + applied to the output. ``'mean'``: The output will be averaged. + ``'sum'``: The output will be summed. Default: ``'none'`` + eps (float): small number to prevent division by zero. Default: 1e-7 + + Returns: + Tensor: Loss tensor with the reduction option applied. + + Reference: + Hamid Rezatofighi et al.: Generalized Intersection over Union: + A Metric and A Loss for Bounding Box Regression: + https://arxiv.org/abs/1902.09630 + """ + + # Original implementation from https://github.com/facebookresearch/fvcore/blob/bfff2ef/fvcore/nn/giou_loss.py + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(generalized_box_iou_loss) + + boxes1 = _upcast_non_float(boxes1) + boxes2 = _upcast_non_float(boxes2) + intsctk, unionk = _loss_inter_union(boxes1, boxes2) + iouk = intsctk / (unionk + eps) + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # smallest enclosing box + xc1 = torch.min(x1, x1g) + yc1 = torch.min(y1, y1g) + xc2 = torch.max(x2, x2g) + yc2 = torch.max(y2, y2g) + + area_c = (xc2 - xc1) * (yc2 - yc1) + miouk = iouk - ((area_c - unionk) / (area_c + eps)) + + loss = 1 - miouk + + # Check reduction option and return loss accordingly + if reduction == "none": + pass + elif reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + else: + raise ValueError( + f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'" + ) + return loss diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/__init__.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77680a14f0d0599f4004a2ce5c299c0f5e13a0d5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/__init__.py @@ -0,0 +1,2 @@ +from .transforms import * +from .autoaugment import * diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..777b97a56b99902bdd24321e28200a39eccd1c1d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_pil.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_pil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70493f34df9cf171f10a9ee8b55fa51bdd02f74e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_pil.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_tensor.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cdab8fc43187527d8864cd34275a55bc6360a70 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_tensor.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_video.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_video.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5725b4fe9ab88464bd6c4c6319e4e00a14e472c0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/__pycache__/_functional_video.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_pil.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_pil.py new file mode 100644 index 0000000000000000000000000000000000000000..527879bb6f1b249e2c6208032f30c42139a81b99 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_pil.py @@ -0,0 +1,393 @@ +import numbers +from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from PIL import Image, ImageEnhance, ImageOps + +try: + import accimage +except ImportError: + accimage = None + + +@torch.jit.unused +def _is_pil_image(img: Any) -> bool: + if accimage is not None: + return isinstance(img, (Image.Image, accimage.Image)) + else: + return isinstance(img, Image.Image) + + +@torch.jit.unused +def get_dimensions(img: Any) -> List[int]: + if _is_pil_image(img): + if hasattr(img, "getbands"): + channels = len(img.getbands()) + else: + channels = img.channels + width, height = img.size + return [channels, height, width] + raise TypeError(f"Unexpected type {type(img)}") + + +@torch.jit.unused +def get_image_size(img: Any) -> List[int]: + if _is_pil_image(img): + return list(img.size) + raise TypeError(f"Unexpected type {type(img)}") + + +@torch.jit.unused +def get_image_num_channels(img: Any) -> int: + if _is_pil_image(img): + if hasattr(img, "getbands"): + return len(img.getbands()) + else: + return img.channels + raise TypeError(f"Unexpected type {type(img)}") + + +@torch.jit.unused +def hflip(img: Image.Image) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + return img.transpose(Image.FLIP_LEFT_RIGHT) + + +@torch.jit.unused +def vflip(img: Image.Image) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + return img.transpose(Image.FLIP_TOP_BOTTOM) + + +@torch.jit.unused +def adjust_brightness(img: Image.Image, brightness_factor: float) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + enhancer = ImageEnhance.Brightness(img) + img = enhancer.enhance(brightness_factor) + return img + + +@torch.jit.unused +def adjust_contrast(img: Image.Image, contrast_factor: float) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + enhancer = ImageEnhance.Contrast(img) + img = enhancer.enhance(contrast_factor) + return img + + +@torch.jit.unused +def adjust_saturation(img: Image.Image, saturation_factor: float) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + enhancer = ImageEnhance.Color(img) + img = enhancer.enhance(saturation_factor) + return img + + +@torch.jit.unused +def adjust_hue(img: Image.Image, hue_factor: float) -> Image.Image: + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].") + + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + input_mode = img.mode + if input_mode in {"L", "1", "I", "F"}: + return img + + h, s, v = img.convert("HSV").split() + + np_h = np.array(h, dtype=np.uint8) + # This will over/underflow, as desired + np_h += np.array(hue_factor * 255).astype(np.uint8) + + h = Image.fromarray(np_h, "L") + + img = Image.merge("HSV", (h, s, v)).convert(input_mode) + return img + + +@torch.jit.unused +def adjust_gamma( + img: Image.Image, + gamma: float, + gain: float = 1.0, +) -> Image.Image: + + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + if gamma < 0: + raise ValueError("Gamma should be a non-negative real number") + + input_mode = img.mode + img = img.convert("RGB") + gamma_map = [int((255 + 1 - 1e-3) * gain * pow(ele / 255.0, gamma)) for ele in range(256)] * 3 + img = img.point(gamma_map) # use PIL's point-function to accelerate this part + + img = img.convert(input_mode) + return img + + +@torch.jit.unused +def pad( + img: Image.Image, + padding: Union[int, List[int], Tuple[int, ...]], + fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0, + padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant", +) -> Image.Image: + + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + if not isinstance(padding, (numbers.Number, tuple, list)): + raise TypeError("Got inappropriate padding arg") + if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)): + raise TypeError("Got inappropriate fill arg") + if not isinstance(padding_mode, str): + raise TypeError("Got inappropriate padding_mode arg") + + if isinstance(padding, list): + padding = tuple(padding) + + if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]: + raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple") + + if isinstance(padding, tuple) and len(padding) == 1: + # Compatibility with `functional_tensor.pad` + padding = padding[0] + + if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: + raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") + + if padding_mode == "constant": + opts = _parse_fill(fill, img, name="fill") + if img.mode == "P": + palette = img.getpalette() + image = ImageOps.expand(img, border=padding, **opts) + image.putpalette(palette) + return image + + return ImageOps.expand(img, border=padding, **opts) + else: + if isinstance(padding, int): + pad_left = pad_right = pad_top = pad_bottom = padding + if isinstance(padding, tuple) and len(padding) == 2: + pad_left = pad_right = padding[0] + pad_top = pad_bottom = padding[1] + if isinstance(padding, tuple) and len(padding) == 4: + pad_left = padding[0] + pad_top = padding[1] + pad_right = padding[2] + pad_bottom = padding[3] + + p = [pad_left, pad_top, pad_right, pad_bottom] + cropping = -np.minimum(p, 0) + + if cropping.any(): + crop_left, crop_top, crop_right, crop_bottom = cropping + img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom)) + + pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0) + + if img.mode == "P": + palette = img.getpalette() + img = np.asarray(img) + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), mode=padding_mode) + img = Image.fromarray(img) + img.putpalette(palette) + return img + + img = np.asarray(img) + # RGB image + if len(img.shape) == 3: + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode) + # Grayscale image + if len(img.shape) == 2: + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) + + return Image.fromarray(img) + + +@torch.jit.unused +def crop( + img: Image.Image, + top: int, + left: int, + height: int, + width: int, +) -> Image.Image: + + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + return img.crop((left, top, left + width, top + height)) + + +@torch.jit.unused +def resize( + img: Image.Image, + size: Union[List[int], int], + interpolation: int = Image.BILINEAR, +) -> Image.Image: + + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + if not (isinstance(size, list) and len(size) == 2): + raise TypeError(f"Got inappropriate size arg: {size}") + + return img.resize(tuple(size[::-1]), interpolation) + + +@torch.jit.unused +def _parse_fill( + fill: Optional[Union[float, List[float], Tuple[float, ...]]], + img: Image.Image, + name: str = "fillcolor", +) -> Dict[str, Optional[Union[float, List[float], Tuple[float, ...]]]]: + + # Process fill color for affine transforms + num_channels = get_image_num_channels(img) + if fill is None: + fill = 0 + if isinstance(fill, (int, float)) and num_channels > 1: + fill = tuple([fill] * num_channels) + if isinstance(fill, (list, tuple)): + if len(fill) == 1: + fill = fill * num_channels + elif len(fill) != num_channels: + msg = "The number of elements in 'fill' does not match the number of channels of the image ({} != {})" + raise ValueError(msg.format(len(fill), num_channels)) + + fill = tuple(fill) # type: ignore[arg-type] + + if img.mode != "F": + if isinstance(fill, (list, tuple)): + fill = tuple(int(x) for x in fill) + else: + fill = int(fill) + + return {name: fill} + + +@torch.jit.unused +def affine( + img: Image.Image, + matrix: List[float], + interpolation: int = Image.NEAREST, + fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None, +) -> Image.Image: + + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + output_size = img.size + opts = _parse_fill(fill, img) + return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts) + + +@torch.jit.unused +def rotate( + img: Image.Image, + angle: float, + interpolation: int = Image.NEAREST, + expand: bool = False, + center: Optional[Tuple[int, int]] = None, + fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None, +) -> Image.Image: + + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + opts = _parse_fill(fill, img) + return img.rotate(angle, interpolation, expand, center, **opts) + + +@torch.jit.unused +def perspective( + img: Image.Image, + perspective_coeffs: List[float], + interpolation: int = Image.BICUBIC, + fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None, +) -> Image.Image: + + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + opts = _parse_fill(fill, img) + + return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts) + + +@torch.jit.unused +def to_grayscale(img: Image.Image, num_output_channels: int) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + if num_output_channels == 1: + img = img.convert("L") + elif num_output_channels == 3: + img = img.convert("L") + np_img = np.array(img, dtype=np.uint8) + np_img = np.dstack([np_img, np_img, np_img]) + img = Image.fromarray(np_img, "RGB") + else: + raise ValueError("num_output_channels should be either 1 or 3") + + return img + + +@torch.jit.unused +def invert(img: Image.Image) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + return ImageOps.invert(img) + + +@torch.jit.unused +def posterize(img: Image.Image, bits: int) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + return ImageOps.posterize(img, bits) + + +@torch.jit.unused +def solarize(img: Image.Image, threshold: int) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + return ImageOps.solarize(img, threshold) + + +@torch.jit.unused +def adjust_sharpness(img: Image.Image, sharpness_factor: float) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + + enhancer = ImageEnhance.Sharpness(img) + img = enhancer.enhance(sharpness_factor) + return img + + +@torch.jit.unused +def autocontrast(img: Image.Image) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + return ImageOps.autocontrast(img) + + +@torch.jit.unused +def equalize(img: Image.Image) -> Image.Image: + if not _is_pil_image(img): + raise TypeError(f"img should be PIL Image. Got {type(img)}") + return ImageOps.equalize(img) diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_tensor.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..348f01bb1e6bf93ba60da7469294631e75cc0193 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_tensor.py @@ -0,0 +1,960 @@ +import warnings +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.nn.functional import conv2d, grid_sample, interpolate, pad as torch_pad + + +def _is_tensor_a_torch_image(x: Tensor) -> bool: + return x.ndim >= 2 + + +def _assert_image_tensor(img: Tensor) -> None: + if not _is_tensor_a_torch_image(img): + raise TypeError("Tensor is not a torch image.") + + +def get_dimensions(img: Tensor) -> List[int]: + _assert_image_tensor(img) + channels = 1 if img.ndim == 2 else img.shape[-3] + height, width = img.shape[-2:] + return [channels, height, width] + + +def get_image_size(img: Tensor) -> List[int]: + # Returns (w, h) of tensor image + _assert_image_tensor(img) + return [img.shape[-1], img.shape[-2]] + + +def get_image_num_channels(img: Tensor) -> int: + _assert_image_tensor(img) + if img.ndim == 2: + return 1 + elif img.ndim > 2: + return img.shape[-3] + + raise TypeError(f"Input ndim should be 2 or more. Got {img.ndim}") + + +def _max_value(dtype: torch.dtype) -> int: + if dtype == torch.uint8: + return 255 + elif dtype == torch.int8: + return 127 + elif dtype == torch.int16: + return 32767 + elif dtype == torch.int32: + return 2147483647 + elif dtype == torch.int64: + return 9223372036854775807 + else: + # This is only here for completeness. This value is implicitly assumed in a lot of places so changing it is not + # easy. + return 1 + + +def _assert_channels(img: Tensor, permitted: List[int]) -> None: + c = get_dimensions(img)[0] + if c not in permitted: + raise TypeError(f"Input image tensor permitted channel values are {permitted}, but found {c}") + + +def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor: + if image.dtype == dtype: + return image + + if image.is_floating_point(): + + # TODO: replace with dtype.is_floating_point when torchscript supports it + if torch.tensor(0, dtype=dtype).is_floating_point(): + return image.to(dtype) + + # float to int + if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or ( + image.dtype == torch.float64 and dtype == torch.int64 + ): + msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely." + raise RuntimeError(msg) + + # https://github.com/pytorch/vision/pull/2078#issuecomment-612045321 + # For data in the range 0-1, (float * 255).to(uint) is only 255 + # when float is exactly 1.0. + # `max + 1 - epsilon` provides more evenly distributed mapping of + # ranges of floats to ints. + eps = 1e-3 + max_val = float(_max_value(dtype)) + result = image.mul(max_val + 1.0 - eps) + return result.to(dtype) + else: + input_max = float(_max_value(image.dtype)) + + # int to float + # TODO: replace with dtype.is_floating_point when torchscript supports it + if torch.tensor(0, dtype=dtype).is_floating_point(): + image = image.to(dtype) + return image / input_max + + output_max = float(_max_value(dtype)) + + # int to int + if input_max > output_max: + # factor should be forced to int for torch jit script + # otherwise factor is a float and image // factor can produce different results + factor = int((input_max + 1) // (output_max + 1)) + image = torch.div(image, factor, rounding_mode="floor") + return image.to(dtype) + else: + # factor should be forced to int for torch jit script + # otherwise factor is a float and image * factor can produce different results + factor = int((output_max + 1) // (input_max + 1)) + image = image.to(dtype) + return image * factor + + +def vflip(img: Tensor) -> Tensor: + _assert_image_tensor(img) + + return img.flip(-2) + + +def hflip(img: Tensor) -> Tensor: + _assert_image_tensor(img) + + return img.flip(-1) + + +def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: + _assert_image_tensor(img) + + _, h, w = get_dimensions(img) + right = left + width + bottom = top + height + + if left < 0 or top < 0 or right > w or bottom > h: + padding_ltrb = [ + max(-left + min(0, right), 0), + max(-top + min(0, bottom), 0), + max(right - max(w, left), 0), + max(bottom - max(h, top), 0), + ] + return pad(img[..., max(top, 0) : bottom, max(left, 0) : right], padding_ltrb, fill=0) + return img[..., top:bottom, left:right] + + +def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + _assert_channels(img, [1, 3]) + + if num_output_channels not in (1, 3): + raise ValueError("num_output_channels should be either 1 or 3") + + if img.shape[-3] == 3: + r, g, b = img.unbind(dim=-3) + # This implementation closely follows the TF one: + # https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138 + l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype) + l_img = l_img.unsqueeze(dim=-3) + else: + l_img = img.clone() + + if num_output_channels == 3: + return l_img.expand(img.shape) + + return l_img + + +def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: + if brightness_factor < 0: + raise ValueError(f"brightness_factor ({brightness_factor}) is not non-negative.") + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + + return _blend(img, torch.zeros_like(img), brightness_factor) + + +def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: + if contrast_factor < 0: + raise ValueError(f"contrast_factor ({contrast_factor}) is not non-negative.") + + _assert_image_tensor(img) + + _assert_channels(img, [3, 1]) + c = get_dimensions(img)[0] + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + if c == 3: + mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True) + else: + mean = torch.mean(img.to(dtype), dim=(-3, -2, -1), keepdim=True) + + return _blend(img, mean, contrast_factor) + + +def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].") + + if not (isinstance(img, torch.Tensor)): + raise TypeError("Input img should be Tensor image") + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + if get_dimensions(img)[0] == 1: # Match PIL behaviour + return img + + orig_dtype = img.dtype + img = convert_image_dtype(img, torch.float32) + + img = _rgb2hsv(img) + h, s, v = img.unbind(dim=-3) + h = (h + hue_factor) % 1.0 + img = torch.stack((h, s, v), dim=-3) + img_hue_adj = _hsv2rgb(img) + + return convert_image_dtype(img_hue_adj, orig_dtype) + + +def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: + if saturation_factor < 0: + raise ValueError(f"saturation_factor ({saturation_factor}) is not non-negative.") + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + + if get_dimensions(img)[0] == 1: # Match PIL behaviour + return img + + return _blend(img, rgb_to_grayscale(img), saturation_factor) + + +def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: + if not isinstance(img, torch.Tensor): + raise TypeError("Input img should be a Tensor.") + + _assert_channels(img, [1, 3]) + + if gamma < 0: + raise ValueError("Gamma should be a non-negative real number") + + result = img + dtype = img.dtype + if not torch.is_floating_point(img): + result = convert_image_dtype(result, torch.float32) + + result = (gain * result**gamma).clamp(0, 1) + + result = convert_image_dtype(result, dtype) + return result + + +def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor: + ratio = float(ratio) + bound = _max_value(img1.dtype) + return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype) + + +def _rgb2hsv(img: Tensor) -> Tensor: + r, g, b = img.unbind(dim=-3) + + # Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/ + # src/libImaging/Convert.c#L330 + maxc = torch.max(img, dim=-3).values + minc = torch.min(img, dim=-3).values + + # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN + # from happening in the results, because + # + S channel has division by `maxc`, which is zero only if `maxc = minc` + # + H channel has division by `(maxc - minc)`. + # + # Instead of overwriting NaN afterwards, we just prevent it from occurring, so + # we don't need to deal with it in case we save the NaN in a buffer in + # backprop, if it is ever supported, but it doesn't hurt to do so. + eqc = maxc == minc + + cr = maxc - minc + # Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine. + ones = torch.ones_like(maxc) + s = cr / torch.where(eqc, ones, maxc) + # Note that `eqc => maxc = minc = r = g = b`. So the following calculation + # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it + # would not matter what values `rc`, `gc`, and `bc` have here, and thus + # replacing denominator with 1 when `eqc` is fine. + cr_divisor = torch.where(eqc, ones, cr) + rc = (maxc - r) / cr_divisor + gc = (maxc - g) / cr_divisor + bc = (maxc - b) / cr_divisor + + hr = (maxc == r) * (bc - gc) + hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc) + hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc) + h = hr + hg + hb + h = torch.fmod((h / 6.0 + 1.0), 1.0) + return torch.stack((h, s, maxc), dim=-3) + + +def _hsv2rgb(img: Tensor) -> Tensor: + h, s, v = img.unbind(dim=-3) + i = torch.floor(h * 6.0) + f = (h * 6.0) - i + i = i.to(dtype=torch.int32) + + p = torch.clamp((v * (1.0 - s)), 0.0, 1.0) + q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0) + t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0) + i = i % 6 + + mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1) + + a1 = torch.stack((v, q, p, p, t, v), dim=-3) + a2 = torch.stack((t, v, v, q, p, p), dim=-3) + a3 = torch.stack((p, p, t, v, v, q), dim=-3) + a4 = torch.stack((a1, a2, a3), dim=-4) + + return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4) + + +def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor: + # padding is left, right, top, bottom + + # crop if needed + if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0: + neg_min_padding = [-min(x, 0) for x in padding] + crop_left, crop_right, crop_top, crop_bottom = neg_min_padding + img = img[..., crop_top : img.shape[-2] - crop_bottom, crop_left : img.shape[-1] - crop_right] + padding = [max(x, 0) for x in padding] + + in_sizes = img.size() + + _x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...] + left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0] + right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3] + x_indices = torch.tensor(left_indices + _x_indices + right_indices, device=img.device) + + _y_indices = [i for i in range(in_sizes[-2])] + top_indices = [i for i in range(padding[2] - 1, -1, -1)] + bottom_indices = [-(i + 1) for i in range(padding[3])] + y_indices = torch.tensor(top_indices + _y_indices + bottom_indices, device=img.device) + + ndim = img.ndim + if ndim == 3: + return img[:, y_indices[:, None], x_indices[None, :]] + elif ndim == 4: + return img[:, :, y_indices[:, None], x_indices[None, :]] + else: + raise RuntimeError("Symmetric padding of N-D tensors are not supported yet") + + +def _parse_pad_padding(padding: Union[int, List[int]]) -> List[int]: + if isinstance(padding, int): + if torch.jit.is_scripting(): + # This maybe unreachable + raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]") + pad_left = pad_right = pad_top = pad_bottom = padding + elif len(padding) == 1: + pad_left = pad_right = pad_top = pad_bottom = padding[0] + elif len(padding) == 2: + pad_left = pad_right = padding[0] + pad_top = pad_bottom = padding[1] + else: + pad_left = padding[0] + pad_top = padding[1] + pad_right = padding[2] + pad_bottom = padding[3] + + return [pad_left, pad_right, pad_top, pad_bottom] + + +def pad( + img: Tensor, padding: Union[int, List[int]], fill: Optional[Union[int, float]] = 0, padding_mode: str = "constant" +) -> Tensor: + _assert_image_tensor(img) + + if fill is None: + fill = 0 + + if not isinstance(padding, (int, tuple, list)): + raise TypeError("Got inappropriate padding arg") + if not isinstance(fill, (int, float)): + raise TypeError("Got inappropriate fill arg") + if not isinstance(padding_mode, str): + raise TypeError("Got inappropriate padding_mode arg") + + if isinstance(padding, tuple): + padding = list(padding) + + if isinstance(padding, list): + # TODO: Jit is failing on loading this op when scripted and saved + # https://github.com/pytorch/pytorch/issues/81100 + if len(padding) not in [1, 2, 4]: + raise ValueError( + f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple" + ) + + if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: + raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") + + p = _parse_pad_padding(padding) + + if padding_mode == "edge": + # remap padding_mode str + padding_mode = "replicate" + elif padding_mode == "symmetric": + # route to another implementation + return _pad_symmetric(img, p) + + need_squeeze = False + if img.ndim < 4: + img = img.unsqueeze(dim=0) + need_squeeze = True + + out_dtype = img.dtype + need_cast = False + if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64): + # Here we temporarily cast input tensor to float + # until pytorch issue is resolved : + # https://github.com/pytorch/pytorch/issues/40763 + need_cast = True + img = img.to(torch.float32) + + if padding_mode in ("reflect", "replicate"): + img = torch_pad(img, p, mode=padding_mode) + else: + img = torch_pad(img, p, mode=padding_mode, value=float(fill)) + + if need_squeeze: + img = img.squeeze(dim=0) + + if need_cast: + img = img.to(out_dtype) + + return img + + +def resize( + img: Tensor, + size: List[int], + interpolation: str = "bilinear", + antialias: Optional[bool] = True, +) -> Tensor: + _assert_image_tensor(img) + + if isinstance(size, tuple): + size = list(size) + + if antialias is None: + antialias = False + + if antialias and interpolation not in ["bilinear", "bicubic"]: + # We manually set it to False to avoid an error downstream in interpolate() + # This behaviour is documented: the parameter is irrelevant for modes + # that are not bilinear or bicubic. We used to raise an error here, but + # now we don't as True is the default. + antialias = False + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [torch.float32, torch.float64]) + + # Define align_corners to avoid warnings + align_corners = False if interpolation in ["bilinear", "bicubic"] else None + + img = interpolate(img, size=size, mode=interpolation, align_corners=align_corners, antialias=antialias) + + if interpolation == "bicubic" and out_dtype == torch.uint8: + img = img.clamp(min=0, max=255) + + img = _cast_squeeze_out(img, need_cast=need_cast, need_squeeze=need_squeeze, out_dtype=out_dtype) + + return img + + +def _assert_grid_transform_inputs( + img: Tensor, + matrix: Optional[List[float]], + interpolation: str, + fill: Optional[Union[int, float, List[float]]], + supported_interpolation_modes: List[str], + coeffs: Optional[List[float]] = None, +) -> None: + + if not (isinstance(img, torch.Tensor)): + raise TypeError("Input img should be Tensor") + + _assert_image_tensor(img) + + if matrix is not None and not isinstance(matrix, list): + raise TypeError("Argument matrix should be a list") + + if matrix is not None and len(matrix) != 6: + raise ValueError("Argument matrix should have 6 float values") + + if coeffs is not None and len(coeffs) != 8: + raise ValueError("Argument coeffs should have 8 float values") + + if fill is not None and not isinstance(fill, (int, float, tuple, list)): + warnings.warn("Argument fill should be either int, float, tuple or list") + + # Check fill + num_channels = get_dimensions(img)[0] + if fill is not None and isinstance(fill, (tuple, list)) and len(fill) > 1 and len(fill) != num_channels: + msg = ( + "The number of elements in 'fill' cannot broadcast to match the number of " + "channels of the image ({} != {})" + ) + raise ValueError(msg.format(len(fill), num_channels)) + + if interpolation not in supported_interpolation_modes: + raise ValueError(f"Interpolation mode '{interpolation}' is unsupported with Tensor input") + + +def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]: + need_squeeze = False + # make image NCHW + if img.ndim < 4: + img = img.unsqueeze(dim=0) + need_squeeze = True + + out_dtype = img.dtype + need_cast = False + if out_dtype not in req_dtypes: + need_cast = True + req_dtype = req_dtypes[0] + img = img.to(req_dtype) + return img, need_cast, need_squeeze, out_dtype + + +def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype) -> Tensor: + if need_squeeze: + img = img.squeeze(dim=0) + + if need_cast: + if out_dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64): + # it is better to round before cast + img = torch.round(img) + img = img.to(out_dtype) + + return img + + +def _apply_grid_transform( + img: Tensor, grid: Tensor, mode: str, fill: Optional[Union[int, float, List[float]]] +) -> Tensor: + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype]) + + if img.shape[0] > 1: + # Apply same grid to a batch of images + grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3]) + + # Append a dummy mask for customized fill colors, should be faster than grid_sample() twice + if fill is not None: + mask = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device) + img = torch.cat((img, mask), dim=1) + + img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False) + + # Fill with required color + if fill is not None: + mask = img[:, -1:, :, :] # N * 1 * H * W + img = img[:, :-1, :, :] # N * C * H * W + mask = mask.expand_as(img) + fill_list, len_fill = (fill, len(fill)) if isinstance(fill, (tuple, list)) else ([float(fill)], 1) + fill_img = torch.tensor(fill_list, dtype=img.dtype, device=img.device).view(1, len_fill, 1, 1).expand_as(img) + if mode == "nearest": + mask = mask < 0.5 + img[mask] = fill_img[mask] + else: # 'bilinear' + img = img * mask + (1.0 - mask) * fill_img + + img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype) + return img + + +def _gen_affine_grid( + theta: Tensor, + w: int, + h: int, + ow: int, + oh: int, +) -> Tensor: + # https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/ + # AffineGridGenerator.cpp#L18 + # Difference with AffineGridGenerator is that: + # 1) we normalize grid values after applying theta + # 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate + + d = 0.5 + base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device) + x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device) + base_grid[..., 0].copy_(x_grid) + y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1) + base_grid[..., 1].copy_(y_grid) + base_grid[..., 2].fill_(1) + + rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device) + output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta) + return output_grid.view(1, oh, ow, 2) + + +def affine( + img: Tensor, + matrix: List[float], + interpolation: str = "nearest", + fill: Optional[Union[int, float, List[float]]] = None, +) -> Tensor: + _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"]) + + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3) + shape = img.shape + # grid will be generated on the same device as theta and img + grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2]) + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]: + + # Inspired of PIL implementation: + # https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054 + + # pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points. + # Points are shifted due to affine matrix torch convention about + # the center point. Center is (0, 0) for image center pivot point (w * 0.5, h * 0.5) + pts = torch.tensor( + [ + [-0.5 * w, -0.5 * h, 1.0], + [-0.5 * w, 0.5 * h, 1.0], + [0.5 * w, 0.5 * h, 1.0], + [0.5 * w, -0.5 * h, 1.0], + ] + ) + theta = torch.tensor(matrix, dtype=torch.float).view(2, 3) + new_pts = torch.matmul(pts, theta.T) + min_vals, _ = new_pts.min(dim=0) + max_vals, _ = new_pts.max(dim=0) + + # shift points to [0, w] and [0, h] interval to match PIL results + min_vals += torch.tensor((w * 0.5, h * 0.5)) + max_vals += torch.tensor((w * 0.5, h * 0.5)) + + # Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0 + tol = 1e-4 + cmax = torch.ceil((max_vals / tol).trunc_() * tol) + cmin = torch.floor((min_vals / tol).trunc_() * tol) + size = cmax - cmin + return int(size[0]), int(size[1]) # w, h + + +def rotate( + img: Tensor, + matrix: List[float], + interpolation: str = "nearest", + expand: bool = False, + fill: Optional[Union[int, float, List[float]]] = None, +) -> Tensor: + _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"]) + w, h = img.shape[-1], img.shape[-2] + ow, oh = _compute_affine_output_size(matrix, w, h) if expand else (w, h) + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3) + # grid will be generated on the same device as theta and img + grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh) + + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> Tensor: + # https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/ + # src/libImaging/Geometry.c#L394 + + # + # x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1) + # y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1) + # + theta1 = torch.tensor( + [[[coeffs[0], coeffs[1], coeffs[2]], [coeffs[3], coeffs[4], coeffs[5]]]], dtype=dtype, device=device + ) + theta2 = torch.tensor([[[coeffs[6], coeffs[7], 1.0], [coeffs[6], coeffs[7], 1.0]]], dtype=dtype, device=device) + + d = 0.5 + base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device) + x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device) + base_grid[..., 0].copy_(x_grid) + y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1) + base_grid[..., 1].copy_(y_grid) + base_grid[..., 2].fill_(1) + + rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device) + output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1) + output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2)) + + output_grid = output_grid1 / output_grid2 - 1.0 + return output_grid.view(1, oh, ow, 2) + + +def perspective( + img: Tensor, + perspective_coeffs: List[float], + interpolation: str = "bilinear", + fill: Optional[Union[int, float, List[float]]] = None, +) -> Tensor: + if not (isinstance(img, torch.Tensor)): + raise TypeError("Input img should be Tensor.") + + _assert_image_tensor(img) + + _assert_grid_transform_inputs( + img, + matrix=None, + interpolation=interpolation, + fill=fill, + supported_interpolation_modes=["nearest", "bilinear"], + coeffs=perspective_coeffs, + ) + + ow, oh = img.shape[-1], img.shape[-2] + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device) + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _get_gaussian_kernel1d(kernel_size: int, sigma: float, dtype: torch.dtype, device: torch.device) -> Tensor: + ksize_half = (kernel_size - 1) * 0.5 + + x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, dtype=dtype, device=device) + pdf = torch.exp(-0.5 * (x / sigma).pow(2)) + kernel1d = pdf / pdf.sum() + + return kernel1d + + +def _get_gaussian_kernel2d( + kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device +) -> Tensor: + kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0], dtype, device) + kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1], dtype, device) + kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :]) + return kernel2d + + +def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor: + if not (isinstance(img, torch.Tensor)): + raise TypeError(f"img should be Tensor. Got {type(img)}") + + _assert_image_tensor(img) + + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device) + kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1]) + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype]) + + # padding = (left, right, top, bottom) + padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2] + img = torch_pad(img, padding, mode="reflect") + img = conv2d(img, kernel, groups=img.shape[-3]) + + img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype) + return img + + +def invert(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + + _assert_channels(img, [1, 3]) + + return _max_value(img.dtype) - img + + +def posterize(img: Tensor, bits: int) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + if img.dtype != torch.uint8: + raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}") + + _assert_channels(img, [1, 3]) + mask = -int(2 ** (8 - bits)) # JIT-friendly for: ~(2 ** (8 - bits) - 1) + return img & mask + + +def solarize(img: Tensor, threshold: float) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + + _assert_channels(img, [1, 3]) + + if threshold > _max_value(img.dtype): + raise TypeError("Threshold should be less than bound of img.") + + inverted_img = invert(img) + return torch.where(img >= threshold, inverted_img, img) + + +def _blurred_degenerate_image(img: Tensor) -> Tensor: + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + + kernel = torch.ones((3, 3), dtype=dtype, device=img.device) + kernel[1, 1] = 5.0 + kernel /= kernel.sum() + kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1]) + + result_tmp, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype]) + result_tmp = conv2d(result_tmp, kernel, groups=result_tmp.shape[-3]) + result_tmp = _cast_squeeze_out(result_tmp, need_cast, need_squeeze, out_dtype) + + result = img.clone() + result[..., 1:-1, 1:-1] = result_tmp + + return result + + +def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: + if sharpness_factor < 0: + raise ValueError(f"sharpness_factor ({sharpness_factor}) is not non-negative.") + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + + if img.size(-1) <= 2 or img.size(-2) <= 2: + return img + + return _blend(img, _blurred_degenerate_image(img), sharpness_factor) + + +def autocontrast(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + + _assert_channels(img, [1, 3]) + + bound = _max_value(img.dtype) + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + + minimum = img.amin(dim=(-2, -1), keepdim=True).to(dtype) + maximum = img.amax(dim=(-2, -1), keepdim=True).to(dtype) + scale = bound / (maximum - minimum) + eq_idxs = torch.isfinite(scale).logical_not() + minimum[eq_idxs] = 0 + scale[eq_idxs] = 1 + + return ((img - minimum) * scale).clamp(0, bound).to(img.dtype) + + +def _scale_channel(img_chan: Tensor) -> Tensor: + # TODO: we should expect bincount to always be faster than histc, but this + # isn't always the case. Once + # https://github.com/pytorch/pytorch/issues/53194 is fixed, remove the if + # block and only use bincount. + if img_chan.is_cuda: + hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255) + else: + hist = torch.bincount(img_chan.reshape(-1), minlength=256) + + nonzero_hist = hist[hist != 0] + step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode="floor") + if step == 0: + return img_chan + + lut = torch.div(torch.cumsum(hist, 0) + torch.div(step, 2, rounding_mode="floor"), step, rounding_mode="floor") + lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255) + + return lut[img_chan.to(torch.int64)].to(torch.uint8) + + +def _equalize_single_image(img: Tensor) -> Tensor: + return torch.stack([_scale_channel(img[c]) for c in range(img.size(0))]) + + +def equalize(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if not (3 <= img.ndim <= 4): + raise TypeError(f"Input image tensor should have 3 or 4 dimensions, but found {img.ndim}") + if img.dtype != torch.uint8: + raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}") + + _assert_channels(img, [1, 3]) + + if img.ndim == 3: + return _equalize_single_image(img) + + return torch.stack([_equalize_single_image(x) for x in img]) + + +def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor: + _assert_image_tensor(tensor) + + if not tensor.is_floating_point(): + raise TypeError(f"Input tensor should be a float tensor. Got {tensor.dtype}.") + + if tensor.ndim < 3: + raise ValueError( + f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {tensor.size()}" + ) + + if not inplace: + tensor = tensor.clone() + + dtype = tensor.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device) + std = torch.as_tensor(std, dtype=dtype, device=tensor.device) + if (std == 0).any(): + raise ValueError(f"std evaluated to zero after conversion to {dtype}, leading to division by zero.") + if mean.ndim == 1: + mean = mean.view(-1, 1, 1) + if std.ndim == 1: + std = std.view(-1, 1, 1) + return tensor.sub_(mean).div_(std) + + +def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor: + _assert_image_tensor(img) + + if not inplace: + img = img.clone() + + img[..., i : i + h, j : j + w] = v + return img + + +def _create_identity_grid(size: List[int]) -> Tensor: + hw_space = [torch.linspace((-s + 1) / s, (s - 1) / s, s) for s in size] + grid_y, grid_x = torch.meshgrid(hw_space, indexing="ij") + return torch.stack([grid_x, grid_y], -1).unsqueeze(0) # 1 x H x W x 2 + + +def elastic_transform( + img: Tensor, + displacement: Tensor, + interpolation: str = "bilinear", + fill: Optional[Union[int, float, List[float]]] = None, +) -> Tensor: + + if not (isinstance(img, torch.Tensor)): + raise TypeError(f"img should be Tensor. Got {type(img)}") + + size = list(img.shape[-2:]) + displacement = displacement.to(img.device) + + identity_grid = _create_identity_grid(size) + grid = identity_grid.to(img.device) + displacement + return _apply_grid_transform(img, grid, interpolation, fill) diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py new file mode 100644 index 0000000000000000000000000000000000000000..91df7d42cd71fc554aba51fcf5e90db30e3c3851 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py @@ -0,0 +1,114 @@ +import warnings + +import torch + + +warnings.warn( + "The 'torchvision.transforms._functional_video' module is deprecated since 0.12 and will be removed in the future. " + "Please use the 'torchvision.transforms.functional' module instead." +) + + +def _is_tensor_video_clip(clip): + if not torch.is_tensor(clip): + raise TypeError("clip should be Tensor. Got %s" % type(clip)) + + if not clip.ndimension() == 4: + raise ValueError("clip should be 4D. Got %dD" % clip.dim()) + + return True + + +def crop(clip, i, j, h, w): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + """ + if len(clip.size()) != 4: + raise ValueError("clip should be a 4D tensor") + return clip[..., i : i + h, j : j + w] + + +def resize(clip, target_size, interpolation_mode): + if len(target_size) != 2: + raise ValueError(f"target size should be tuple (height, width), instead got {target_size}") + return torch.nn.functional.interpolate(clip, size=target_size, mode=interpolation_mode, align_corners=False) + + +def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): + """ + Do spatial cropping and resizing to the video clip + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the cropped region. + w (int): Width of the cropped region. + size (tuple(int, int)): height and width of resized clip + Returns: + clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + clip = crop(clip, i, j, h, w) + clip = resize(clip, size, interpolation_mode) + return clip + + +def center_crop(clip, crop_size): + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + h, w = clip.size(-2), clip.size(-1) + th, tw = crop_size + if h < th or w < tw: + raise ValueError("height and width must be no smaller than crop_size") + + i = int(round((h - th) / 2.0)) + j = int(round((w - tw) / 2.0)) + return crop(clip, i, j, th, tw) + + +def to_tensor(clip): + """ + Convert tensor data type from uint8 to float, divide value by 255.0 and + permute the dimensions of clip tensor + Args: + clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) + Return: + clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) + """ + _is_tensor_video_clip(clip) + if not clip.dtype == torch.uint8: + raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype)) + return clip.float().permute(3, 0, 1, 2) / 255.0 + + +def normalize(clip, mean, std, inplace=False): + """ + Args: + clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) + mean (tuple): pixel RGB mean. Size is (3) + std (tuple): pixel standard deviation. Size is (3) + Returns: + normalized clip (torch.tensor): Size is (C, T, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + if not inplace: + clip = clip.clone() + mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) + std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + return clip + + +def hflip(clip): + """ + Args: + clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) + Returns: + flipped clip (torch.tensor): Size is (C, T, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + return clip.flip(-1) diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/_presets.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/_presets.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f88bdb992c8df2a826a04cec29a66deb4feab1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/_presets.py @@ -0,0 +1,216 @@ +""" +This file is part of the private API. Please do not use directly these classes as they will be modified on +future versions without warning. The classes should be accessed only via the transforms argument of Weights. +""" +from typing import Optional, Tuple + +import torch +from torch import nn, Tensor + +from . import functional as F, InterpolationMode + + +__all__ = [ + "ObjectDetection", + "ImageClassification", + "VideoClassification", + "SemanticSegmentation", + "OpticalFlow", +] + + +class ObjectDetection(nn.Module): + def forward(self, img: Tensor) -> Tensor: + if not isinstance(img, Tensor): + img = F.pil_to_tensor(img) + return F.convert_image_dtype(img, torch.float) + + def __repr__(self) -> str: + return self.__class__.__name__ + "()" + + def describe(self) -> str: + return ( + "Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. " + "The images are rescaled to ``[0.0, 1.0]``." + ) + + +class ImageClassification(nn.Module): + def __init__( + self, + *, + crop_size: int, + resize_size: int = 256, + mean: Tuple[float, ...] = (0.485, 0.456, 0.406), + std: Tuple[float, ...] = (0.229, 0.224, 0.225), + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + antialias: Optional[bool] = True, + ) -> None: + super().__init__() + self.crop_size = [crop_size] + self.resize_size = [resize_size] + self.mean = list(mean) + self.std = list(std) + self.interpolation = interpolation + self.antialias = antialias + + def forward(self, img: Tensor) -> Tensor: + img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=self.antialias) + img = F.center_crop(img, self.crop_size) + if not isinstance(img, Tensor): + img = F.pil_to_tensor(img) + img = F.convert_image_dtype(img, torch.float) + img = F.normalize(img, mean=self.mean, std=self.std) + return img + + def __repr__(self) -> str: + format_string = self.__class__.__name__ + "(" + format_string += f"\n crop_size={self.crop_size}" + format_string += f"\n resize_size={self.resize_size}" + format_string += f"\n mean={self.mean}" + format_string += f"\n std={self.std}" + format_string += f"\n interpolation={self.interpolation}" + format_string += "\n)" + return format_string + + def describe(self) -> str: + return ( + "Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. " + f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, " + f"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to " + f"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``." + ) + + +class VideoClassification(nn.Module): + def __init__( + self, + *, + crop_size: Tuple[int, int], + resize_size: Tuple[int, int], + mean: Tuple[float, ...] = (0.43216, 0.394666, 0.37645), + std: Tuple[float, ...] = (0.22803, 0.22145, 0.216989), + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + ) -> None: + super().__init__() + self.crop_size = list(crop_size) + self.resize_size = list(resize_size) + self.mean = list(mean) + self.std = list(std) + self.interpolation = interpolation + + def forward(self, vid: Tensor) -> Tensor: + need_squeeze = False + if vid.ndim < 5: + vid = vid.unsqueeze(dim=0) + need_squeeze = True + + N, T, C, H, W = vid.shape + vid = vid.view(-1, C, H, W) + # We hard-code antialias=False to preserve results after we changed + # its default from None to True (see + # https://github.com/pytorch/vision/pull/7160) + # TODO: we could re-train the video models with antialias=True? + vid = F.resize(vid, self.resize_size, interpolation=self.interpolation, antialias=False) + vid = F.center_crop(vid, self.crop_size) + vid = F.convert_image_dtype(vid, torch.float) + vid = F.normalize(vid, mean=self.mean, std=self.std) + H, W = self.crop_size + vid = vid.view(N, T, C, H, W) + vid = vid.permute(0, 2, 1, 3, 4) # (N, T, C, H, W) => (N, C, T, H, W) + + if need_squeeze: + vid = vid.squeeze(dim=0) + return vid + + def __repr__(self) -> str: + format_string = self.__class__.__name__ + "(" + format_string += f"\n crop_size={self.crop_size}" + format_string += f"\n resize_size={self.resize_size}" + format_string += f"\n mean={self.mean}" + format_string += f"\n std={self.std}" + format_string += f"\n interpolation={self.interpolation}" + format_string += "\n)" + return format_string + + def describe(self) -> str: + return ( + "Accepts batched ``(B, T, C, H, W)`` and single ``(T, C, H, W)`` video frame ``torch.Tensor`` objects. " + f"The frames are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, " + f"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to " + f"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``. Finally the output " + "dimensions are permuted to ``(..., C, T, H, W)`` tensors." + ) + + +class SemanticSegmentation(nn.Module): + def __init__( + self, + *, + resize_size: Optional[int], + mean: Tuple[float, ...] = (0.485, 0.456, 0.406), + std: Tuple[float, ...] = (0.229, 0.224, 0.225), + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + antialias: Optional[bool] = True, + ) -> None: + super().__init__() + self.resize_size = [resize_size] if resize_size is not None else None + self.mean = list(mean) + self.std = list(std) + self.interpolation = interpolation + self.antialias = antialias + + def forward(self, img: Tensor) -> Tensor: + if isinstance(self.resize_size, list): + img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=self.antialias) + if not isinstance(img, Tensor): + img = F.pil_to_tensor(img) + img = F.convert_image_dtype(img, torch.float) + img = F.normalize(img, mean=self.mean, std=self.std) + return img + + def __repr__(self) -> str: + format_string = self.__class__.__name__ + "(" + format_string += f"\n resize_size={self.resize_size}" + format_string += f"\n mean={self.mean}" + format_string += f"\n std={self.std}" + format_string += f"\n interpolation={self.interpolation}" + format_string += "\n)" + return format_string + + def describe(self) -> str: + return ( + "Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. " + f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. " + f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and " + f"``std={self.std}``." + ) + + +class OpticalFlow(nn.Module): + def forward(self, img1: Tensor, img2: Tensor) -> Tuple[Tensor, Tensor]: + if not isinstance(img1, Tensor): + img1 = F.pil_to_tensor(img1) + if not isinstance(img2, Tensor): + img2 = F.pil_to_tensor(img2) + + img1 = F.convert_image_dtype(img1, torch.float) + img2 = F.convert_image_dtype(img2, torch.float) + + # map [0, 1] into [-1, 1] + img1 = F.normalize(img1, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + img2 = F.normalize(img2, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + img1 = img1.contiguous() + img2 = img2.contiguous() + + return img1, img2 + + def __repr__(self) -> str: + return self.__class__.__name__ + "()" + + def describe(self) -> str: + return ( + "Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. " + "The images are rescaled to ``[-1.0, 1.0]``." + ) diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/_transforms_video.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/_transforms_video.py new file mode 100644 index 0000000000000000000000000000000000000000..a04da4f74849805641e4c470f6b6b8d5f7000e3a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/_transforms_video.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 + +import numbers +import random +import warnings + +from torchvision.transforms import RandomCrop, RandomResizedCrop + +from . import _functional_video as F + + +__all__ = [ + "RandomCropVideo", + "RandomResizedCropVideo", + "CenterCropVideo", + "NormalizeVideo", + "ToTensorVideo", + "RandomHorizontalFlipVideo", +] + + +warnings.warn( + "The 'torchvision.transforms._transforms_video' module is deprecated since 0.12 and will be removed in the future. " + "Please use the 'torchvision.transforms' module instead." +) + + +class RandomCropVideo(RandomCrop): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: randomly cropped/resized video clip. + size is (C, T, OH, OW) + """ + i, j, h, w = self.get_params(clip, self.size) + return F.crop(clip, i, j, h, w) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size})" + + +class RandomResizedCropVideo(RandomResizedCrop): + def __init__( + self, + size, + scale=(0.08, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), + interpolation_mode="bilinear", + ): + if isinstance(size, tuple): + if len(size) != 2: + raise ValueError(f"size should be tuple (height, width), instead got {size}") + self.size = size + else: + self.size = (size, size) + + self.interpolation_mode = interpolation_mode + self.scale = scale + self.ratio = ratio + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: randomly cropped/resized video clip. + size is (C, T, H, W) + """ + i, j, h, w = self.get_params(clip, self.scale, self.ratio) + return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})" + + +class CenterCropVideo: + def __init__(self, crop_size): + if isinstance(crop_size, numbers.Number): + self.crop_size = (int(crop_size), int(crop_size)) + else: + self.crop_size = crop_size + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: central cropping of video clip. Size is + (C, T, crop_size, crop_size) + """ + return F.center_crop(clip, self.crop_size) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(crop_size={self.crop_size})" + + +class NormalizeVideo: + """ + Normalize the video clip by mean subtraction and division by standard deviation + Args: + mean (3-tuple): pixel RGB mean + std (3-tuple): pixel RGB standard deviation + inplace (boolean): whether do in-place normalization + """ + + def __init__(self, mean, std, inplace=False): + self.mean = mean + self.std = std + self.inplace = inplace + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W) + """ + return F.normalize(clip, self.mean, self.std, self.inplace) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})" + + +class ToTensorVideo: + """ + Convert tensor data type from uint8 to float, divide value by 255.0 and + permute the dimensions of clip tensor + """ + + def __init__(self): + pass + + def __call__(self, clip): + """ + Args: + clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) + Return: + clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) + """ + return F.to_tensor(clip) + + def __repr__(self) -> str: + return self.__class__.__name__ + + +class RandomHorizontalFlipVideo: + """ + Flip the video clip along the horizontal direction with a given probability + Args: + p (float): probability of the clip being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + self.p = p + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Size is (C, T, H, W) + Return: + clip (torch.tensor): Size is (C, T, H, W) + """ + if random.random() < self.p: + clip = F.hflip(clip) + return clip + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/autoaugment.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/autoaugment.py new file mode 100644 index 0000000000000000000000000000000000000000..9dbbe91e741093b01ff8491ba8b39d9b6f578103 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/autoaugment.py @@ -0,0 +1,615 @@ +import math +from enum import Enum +from typing import Dict, List, Optional, Tuple + +import torch +from torch import Tensor + +from . import functional as F, InterpolationMode + +__all__ = ["AutoAugmentPolicy", "AutoAugment", "RandAugment", "TrivialAugmentWide", "AugMix"] + + +def _apply_op( + img: Tensor, op_name: str, magnitude: float, interpolation: InterpolationMode, fill: Optional[List[float]] +): + if op_name == "ShearX": + # magnitude should be arctan(magnitude) + # official autoaug: (1, level, 0, 0, 1, 0) + # https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290 + # compared to + # torchvision: (1, tan(level), 0, 0, 1, 0) + # https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976 + img = F.affine( + img, + angle=0.0, + translate=[0, 0], + scale=1.0, + shear=[math.degrees(math.atan(magnitude)), 0.0], + interpolation=interpolation, + fill=fill, + center=[0, 0], + ) + elif op_name == "ShearY": + # magnitude should be arctan(magnitude) + # See above + img = F.affine( + img, + angle=0.0, + translate=[0, 0], + scale=1.0, + shear=[0.0, math.degrees(math.atan(magnitude))], + interpolation=interpolation, + fill=fill, + center=[0, 0], + ) + elif op_name == "TranslateX": + img = F.affine( + img, + angle=0.0, + translate=[int(magnitude), 0], + scale=1.0, + interpolation=interpolation, + shear=[0.0, 0.0], + fill=fill, + ) + elif op_name == "TranslateY": + img = F.affine( + img, + angle=0.0, + translate=[0, int(magnitude)], + scale=1.0, + interpolation=interpolation, + shear=[0.0, 0.0], + fill=fill, + ) + elif op_name == "Rotate": + img = F.rotate(img, magnitude, interpolation=interpolation, fill=fill) + elif op_name == "Brightness": + img = F.adjust_brightness(img, 1.0 + magnitude) + elif op_name == "Color": + img = F.adjust_saturation(img, 1.0 + magnitude) + elif op_name == "Contrast": + img = F.adjust_contrast(img, 1.0 + magnitude) + elif op_name == "Sharpness": + img = F.adjust_sharpness(img, 1.0 + magnitude) + elif op_name == "Posterize": + img = F.posterize(img, int(magnitude)) + elif op_name == "Solarize": + img = F.solarize(img, magnitude) + elif op_name == "AutoContrast": + img = F.autocontrast(img) + elif op_name == "Equalize": + img = F.equalize(img) + elif op_name == "Invert": + img = F.invert(img) + elif op_name == "Identity": + pass + else: + raise ValueError(f"The provided operator {op_name} is not recognized.") + return img + + +class AutoAugmentPolicy(Enum): + """AutoAugment policies learned on different datasets. + Available policies are IMAGENET, CIFAR10 and SVHN. + """ + + IMAGENET = "imagenet" + CIFAR10 = "cifar10" + SVHN = "svhn" + + +# FIXME: Eliminate copy-pasted code for fill standardization and _augmentation_space() by moving stuff on a base class +class AutoAugment(torch.nn.Module): + r"""AutoAugment data augmentation method based on + `"AutoAugment: Learning Augmentation Strategies from Data" `_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + policy (AutoAugmentPolicy): Desired policy enum defined by + :class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__( + self, + policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET, + interpolation: InterpolationMode = InterpolationMode.NEAREST, + fill: Optional[List[float]] = None, + ) -> None: + super().__init__() + self.policy = policy + self.interpolation = interpolation + self.fill = fill + self.policies = self._get_policies(policy) + + def _get_policies( + self, policy: AutoAugmentPolicy + ) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]: + if policy == AutoAugmentPolicy.IMAGENET: + return [ + (("Posterize", 0.4, 8), ("Rotate", 0.6, 9)), + (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)), + (("Equalize", 0.8, None), ("Equalize", 0.6, None)), + (("Posterize", 0.6, 7), ("Posterize", 0.6, 6)), + (("Equalize", 0.4, None), ("Solarize", 0.2, 4)), + (("Equalize", 0.4, None), ("Rotate", 0.8, 8)), + (("Solarize", 0.6, 3), ("Equalize", 0.6, None)), + (("Posterize", 0.8, 5), ("Equalize", 1.0, None)), + (("Rotate", 0.2, 3), ("Solarize", 0.6, 8)), + (("Equalize", 0.6, None), ("Posterize", 0.4, 6)), + (("Rotate", 0.8, 8), ("Color", 0.4, 0)), + (("Rotate", 0.4, 9), ("Equalize", 0.6, None)), + (("Equalize", 0.0, None), ("Equalize", 0.8, None)), + (("Invert", 0.6, None), ("Equalize", 1.0, None)), + (("Color", 0.6, 4), ("Contrast", 1.0, 8)), + (("Rotate", 0.8, 8), ("Color", 1.0, 2)), + (("Color", 0.8, 8), ("Solarize", 0.8, 7)), + (("Sharpness", 0.4, 7), ("Invert", 0.6, None)), + (("ShearX", 0.6, 5), ("Equalize", 1.0, None)), + (("Color", 0.4, 0), ("Equalize", 0.6, None)), + (("Equalize", 0.4, None), ("Solarize", 0.2, 4)), + (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)), + (("Invert", 0.6, None), ("Equalize", 1.0, None)), + (("Color", 0.6, 4), ("Contrast", 1.0, 8)), + (("Equalize", 0.8, None), ("Equalize", 0.6, None)), + ] + elif policy == AutoAugmentPolicy.CIFAR10: + return [ + (("Invert", 0.1, None), ("Contrast", 0.2, 6)), + (("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)), + (("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)), + (("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)), + (("AutoContrast", 0.5, None), ("Equalize", 0.9, None)), + (("ShearY", 0.2, 7), ("Posterize", 0.3, 7)), + (("Color", 0.4, 3), ("Brightness", 0.6, 7)), + (("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)), + (("Equalize", 0.6, None), ("Equalize", 0.5, None)), + (("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)), + (("Color", 0.7, 7), ("TranslateX", 0.5, 8)), + (("Equalize", 0.3, None), ("AutoContrast", 0.4, None)), + (("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)), + (("Brightness", 0.9, 6), ("Color", 0.2, 8)), + (("Solarize", 0.5, 2), ("Invert", 0.0, None)), + (("Equalize", 0.2, None), ("AutoContrast", 0.6, None)), + (("Equalize", 0.2, None), ("Equalize", 0.6, None)), + (("Color", 0.9, 9), ("Equalize", 0.6, None)), + (("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)), + (("Brightness", 0.1, 3), ("Color", 0.7, 0)), + (("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)), + (("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)), + (("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)), + (("Equalize", 0.8, None), ("Invert", 0.1, None)), + (("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)), + ] + elif policy == AutoAugmentPolicy.SVHN: + return [ + (("ShearX", 0.9, 4), ("Invert", 0.2, None)), + (("ShearY", 0.9, 8), ("Invert", 0.7, None)), + (("Equalize", 0.6, None), ("Solarize", 0.6, 6)), + (("Invert", 0.9, None), ("Equalize", 0.6, None)), + (("Equalize", 0.6, None), ("Rotate", 0.9, 3)), + (("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)), + (("ShearY", 0.9, 8), ("Invert", 0.4, None)), + (("ShearY", 0.9, 5), ("Solarize", 0.2, 6)), + (("Invert", 0.9, None), ("AutoContrast", 0.8, None)), + (("Equalize", 0.6, None), ("Rotate", 0.9, 3)), + (("ShearX", 0.9, 4), ("Solarize", 0.3, 3)), + (("ShearY", 0.8, 8), ("Invert", 0.7, None)), + (("Equalize", 0.9, None), ("TranslateY", 0.6, 6)), + (("Invert", 0.9, None), ("Equalize", 0.6, None)), + (("Contrast", 0.3, 3), ("Rotate", 0.8, 4)), + (("Invert", 0.8, None), ("TranslateY", 0.0, 2)), + (("ShearY", 0.7, 6), ("Solarize", 0.4, 8)), + (("Invert", 0.6, None), ("Rotate", 0.8, 4)), + (("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)), + (("ShearX", 0.1, 6), ("Invert", 0.6, None)), + (("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)), + (("ShearY", 0.8, 4), ("Invert", 0.8, None)), + (("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)), + (("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)), + (("ShearX", 0.7, 2), ("Invert", 0.1, None)), + ] + else: + raise ValueError(f"The provided policy {policy} is not recognized.") + + def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]: + return { + # op_name: (magnitudes, signed) + "ShearX": (torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": (torch.linspace(0.0, 150.0 / 331.0 * image_size[1], num_bins), True), + "TranslateY": (torch.linspace(0.0, 150.0 / 331.0 * image_size[0], num_bins), True), + "Rotate": (torch.linspace(0.0, 30.0, num_bins), True), + "Brightness": (torch.linspace(0.0, 0.9, num_bins), True), + "Color": (torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (torch.linspace(0.0, 0.9, num_bins), True), + "Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False), + "Solarize": (torch.linspace(255.0, 0.0, num_bins), False), + "AutoContrast": (torch.tensor(0.0), False), + "Equalize": (torch.tensor(0.0), False), + "Invert": (torch.tensor(0.0), False), + } + + @staticmethod + def get_params(transform_num: int) -> Tuple[int, Tensor, Tensor]: + """Get parameters for autoaugment transformation + + Returns: + params required by the autoaugment transformation + """ + policy_id = int(torch.randint(transform_num, (1,)).item()) + probs = torch.rand((2,)) + signs = torch.randint(2, (2,)) + + return policy_id, probs, signs + + def forward(self, img: Tensor) -> Tensor: + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: AutoAugmented image. + """ + fill = self.fill + channels, height, width = F.get_dimensions(img) + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * channels + elif fill is not None: + fill = [float(f) for f in fill] + + transform_id, probs, signs = self.get_params(len(self.policies)) + + op_meta = self._augmentation_space(10, (height, width)) + for i, (op_name, p, magnitude_id) in enumerate(self.policies[transform_id]): + if probs[i] <= p: + magnitudes, signed = op_meta[op_name] + magnitude = float(magnitudes[magnitude_id].item()) if magnitude_id is not None else 0.0 + if signed and signs[i] == 0: + magnitude *= -1.0 + img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) + + return img + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(policy={self.policy}, fill={self.fill})" + + +class RandAugment(torch.nn.Module): + r"""RandAugment data augmentation method based on + `"RandAugment: Practical automated data augmentation with a reduced search space" + `_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + num_ops (int): Number of augmentation transformations to apply sequentially. + magnitude (int): Magnitude for all the transformations. + num_magnitude_bins (int): The number of different magnitude values. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__( + self, + num_ops: int = 2, + magnitude: int = 9, + num_magnitude_bins: int = 31, + interpolation: InterpolationMode = InterpolationMode.NEAREST, + fill: Optional[List[float]] = None, + ) -> None: + super().__init__() + self.num_ops = num_ops + self.magnitude = magnitude + self.num_magnitude_bins = num_magnitude_bins + self.interpolation = interpolation + self.fill = fill + + def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]: + return { + # op_name: (magnitudes, signed) + "Identity": (torch.tensor(0.0), False), + "ShearX": (torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": (torch.linspace(0.0, 150.0 / 331.0 * image_size[1], num_bins), True), + "TranslateY": (torch.linspace(0.0, 150.0 / 331.0 * image_size[0], num_bins), True), + "Rotate": (torch.linspace(0.0, 30.0, num_bins), True), + "Brightness": (torch.linspace(0.0, 0.9, num_bins), True), + "Color": (torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (torch.linspace(0.0, 0.9, num_bins), True), + "Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False), + "Solarize": (torch.linspace(255.0, 0.0, num_bins), False), + "AutoContrast": (torch.tensor(0.0), False), + "Equalize": (torch.tensor(0.0), False), + } + + def forward(self, img: Tensor) -> Tensor: + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: Transformed image. + """ + fill = self.fill + channels, height, width = F.get_dimensions(img) + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * channels + elif fill is not None: + fill = [float(f) for f in fill] + + op_meta = self._augmentation_space(self.num_magnitude_bins, (height, width)) + for _ in range(self.num_ops): + op_index = int(torch.randint(len(op_meta), (1,)).item()) + op_name = list(op_meta.keys())[op_index] + magnitudes, signed = op_meta[op_name] + magnitude = float(magnitudes[self.magnitude].item()) if magnitudes.ndim > 0 else 0.0 + if signed and torch.randint(2, (1,)): + magnitude *= -1.0 + img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) + + return img + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"num_ops={self.num_ops}" + f", magnitude={self.magnitude}" + f", num_magnitude_bins={self.num_magnitude_bins}" + f", interpolation={self.interpolation}" + f", fill={self.fill}" + f")" + ) + return s + + +class TrivialAugmentWide(torch.nn.Module): + r"""Dataset-independent data-augmentation with TrivialAugment Wide, as described in + `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" `_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + num_magnitude_bins (int): The number of different magnitude values. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__( + self, + num_magnitude_bins: int = 31, + interpolation: InterpolationMode = InterpolationMode.NEAREST, + fill: Optional[List[float]] = None, + ) -> None: + super().__init__() + self.num_magnitude_bins = num_magnitude_bins + self.interpolation = interpolation + self.fill = fill + + def _augmentation_space(self, num_bins: int) -> Dict[str, Tuple[Tensor, bool]]: + return { + # op_name: (magnitudes, signed) + "Identity": (torch.tensor(0.0), False), + "ShearX": (torch.linspace(0.0, 0.99, num_bins), True), + "ShearY": (torch.linspace(0.0, 0.99, num_bins), True), + "TranslateX": (torch.linspace(0.0, 32.0, num_bins), True), + "TranslateY": (torch.linspace(0.0, 32.0, num_bins), True), + "Rotate": (torch.linspace(0.0, 135.0, num_bins), True), + "Brightness": (torch.linspace(0.0, 0.99, num_bins), True), + "Color": (torch.linspace(0.0, 0.99, num_bins), True), + "Contrast": (torch.linspace(0.0, 0.99, num_bins), True), + "Sharpness": (torch.linspace(0.0, 0.99, num_bins), True), + "Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6)).round().int(), False), + "Solarize": (torch.linspace(255.0, 0.0, num_bins), False), + "AutoContrast": (torch.tensor(0.0), False), + "Equalize": (torch.tensor(0.0), False), + } + + def forward(self, img: Tensor) -> Tensor: + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: Transformed image. + """ + fill = self.fill + channels, height, width = F.get_dimensions(img) + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * channels + elif fill is not None: + fill = [float(f) for f in fill] + + op_meta = self._augmentation_space(self.num_magnitude_bins) + op_index = int(torch.randint(len(op_meta), (1,)).item()) + op_name = list(op_meta.keys())[op_index] + magnitudes, signed = op_meta[op_name] + magnitude = ( + float(magnitudes[torch.randint(len(magnitudes), (1,), dtype=torch.long)].item()) + if magnitudes.ndim > 0 + else 0.0 + ) + if signed and torch.randint(2, (1,)): + magnitude *= -1.0 + + return _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"num_magnitude_bins={self.num_magnitude_bins}" + f", interpolation={self.interpolation}" + f", fill={self.fill}" + f")" + ) + return s + + +class AugMix(torch.nn.Module): + r"""AugMix data augmentation method based on + `"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" `_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + severity (int): The severity of base augmentation operators. Default is ``3``. + mixture_width (int): The number of augmentation chains. Default is ``3``. + chain_depth (int): The depth of augmentation chains. A negative value denotes stochastic depth sampled from the interval [1, 3]. + Default is ``-1``. + alpha (float): The hyperparameter for the probability distributions. Default is ``1.0``. + all_ops (bool): Use all operations (including brightness, contrast, color and sharpness). Default is ``True``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__( + self, + severity: int = 3, + mixture_width: int = 3, + chain_depth: int = -1, + alpha: float = 1.0, + all_ops: bool = True, + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + fill: Optional[List[float]] = None, + ) -> None: + super().__init__() + self._PARAMETER_MAX = 10 + if not (1 <= severity <= self._PARAMETER_MAX): + raise ValueError(f"The severity must be between [1, {self._PARAMETER_MAX}]. Got {severity} instead.") + self.severity = severity + self.mixture_width = mixture_width + self.chain_depth = chain_depth + self.alpha = alpha + self.all_ops = all_ops + self.interpolation = interpolation + self.fill = fill + + def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]: + s = { + # op_name: (magnitudes, signed) + "ShearX": (torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": (torch.linspace(0.0, image_size[1] / 3.0, num_bins), True), + "TranslateY": (torch.linspace(0.0, image_size[0] / 3.0, num_bins), True), + "Rotate": (torch.linspace(0.0, 30.0, num_bins), True), + "Posterize": (4 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False), + "Solarize": (torch.linspace(255.0, 0.0, num_bins), False), + "AutoContrast": (torch.tensor(0.0), False), + "Equalize": (torch.tensor(0.0), False), + } + if self.all_ops: + s.update( + { + "Brightness": (torch.linspace(0.0, 0.9, num_bins), True), + "Color": (torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (torch.linspace(0.0, 0.9, num_bins), True), + } + ) + return s + + @torch.jit.unused + def _pil_to_tensor(self, img) -> Tensor: + return F.pil_to_tensor(img) + + @torch.jit.unused + def _tensor_to_pil(self, img: Tensor): + return F.to_pil_image(img) + + def _sample_dirichlet(self, params: Tensor) -> Tensor: + # Must be on a separate method so that we can overwrite it in tests. + return torch._sample_dirichlet(params) + + def forward(self, orig_img: Tensor) -> Tensor: + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: Transformed image. + """ + fill = self.fill + channels, height, width = F.get_dimensions(orig_img) + if isinstance(orig_img, Tensor): + img = orig_img + if isinstance(fill, (int, float)): + fill = [float(fill)] * channels + elif fill is not None: + fill = [float(f) for f in fill] + else: + img = self._pil_to_tensor(orig_img) + + op_meta = self._augmentation_space(self._PARAMETER_MAX, (height, width)) + + orig_dims = list(img.shape) + batch = img.view([1] * max(4 - img.ndim, 0) + orig_dims) + batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1) + + # Sample the beta weights for combining the original and augmented image. To get Beta, we use a Dirichlet + # with 2 parameters. The 1st column stores the weights of the original and the 2nd the ones of augmented image. + m = self._sample_dirichlet( + torch.tensor([self.alpha, self.alpha], device=batch.device).expand(batch_dims[0], -1) + ) + + # Sample the mixing weights and combine them with the ones sampled from Beta for the augmented images. + combined_weights = self._sample_dirichlet( + torch.tensor([self.alpha] * self.mixture_width, device=batch.device).expand(batch_dims[0], -1) + ) * m[:, 1].view([batch_dims[0], -1]) + + mix = m[:, 0].view(batch_dims) * batch + for i in range(self.mixture_width): + aug = batch + depth = self.chain_depth if self.chain_depth > 0 else int(torch.randint(low=1, high=4, size=(1,)).item()) + for _ in range(depth): + op_index = int(torch.randint(len(op_meta), (1,)).item()) + op_name = list(op_meta.keys())[op_index] + magnitudes, signed = op_meta[op_name] + magnitude = ( + float(magnitudes[torch.randint(self.severity, (1,), dtype=torch.long)].item()) + if magnitudes.ndim > 0 + else 0.0 + ) + if signed and torch.randint(2, (1,)): + magnitude *= -1.0 + aug = _apply_op(aug, op_name, magnitude, interpolation=self.interpolation, fill=fill) + mix.add_(combined_weights[:, i].view(batch_dims) * aug) + mix = mix.view(orig_dims).to(dtype=img.dtype) + + if not isinstance(orig_img, Tensor): + return self._tensor_to_pil(mix) + return mix + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"severity={self.severity}" + f", mixture_width={self.mixture_width}" + f", chain_depth={self.chain_depth}" + f", alpha={self.alpha}" + f", all_ops={self.all_ops}" + f", interpolation={self.interpolation}" + f", fill={self.fill}" + f")" + ) + return s diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/functional.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..8efe2a8878a06b37d7ede9496b213076c1f59c01 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/functional.py @@ -0,0 +1,1586 @@ +import math +import numbers +import sys +import warnings +from enum import Enum +from typing import Any, List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL import Image +from PIL.Image import Image as PILImage +from torch import Tensor + +try: + import accimage +except ImportError: + accimage = None + +from ..utils import _log_api_usage_once +from . import _functional_pil as F_pil, _functional_tensor as F_t + + +class InterpolationMode(Enum): + """Interpolation modes + Available interpolation methods are ``nearest``, ``nearest-exact``, ``bilinear``, ``bicubic``, ``box``, ``hamming``, + and ``lanczos``. + """ + + NEAREST = "nearest" + NEAREST_EXACT = "nearest-exact" + BILINEAR = "bilinear" + BICUBIC = "bicubic" + # For PIL compatibility + BOX = "box" + HAMMING = "hamming" + LANCZOS = "lanczos" + + +# TODO: Once torchscript supports Enums with staticmethod +# this can be put into InterpolationMode as staticmethod +def _interpolation_modes_from_int(i: int) -> InterpolationMode: + inverse_modes_mapping = { + 0: InterpolationMode.NEAREST, + 2: InterpolationMode.BILINEAR, + 3: InterpolationMode.BICUBIC, + 4: InterpolationMode.BOX, + 5: InterpolationMode.HAMMING, + 1: InterpolationMode.LANCZOS, + } + return inverse_modes_mapping[i] + + +pil_modes_mapping = { + InterpolationMode.NEAREST: 0, + InterpolationMode.BILINEAR: 2, + InterpolationMode.BICUBIC: 3, + InterpolationMode.NEAREST_EXACT: 0, + InterpolationMode.BOX: 4, + InterpolationMode.HAMMING: 5, + InterpolationMode.LANCZOS: 1, +} + +_is_pil_image = F_pil._is_pil_image + + +def get_dimensions(img: Tensor) -> List[int]: + """Returns the dimensions of an image as [channels, height, width]. + + Args: + img (PIL Image or Tensor): The image to be checked. + + Returns: + List[int]: The image dimensions. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(get_dimensions) + if isinstance(img, torch.Tensor): + return F_t.get_dimensions(img) + + return F_pil.get_dimensions(img) + + +def get_image_size(img: Tensor) -> List[int]: + """Returns the size of an image as [width, height]. + + Args: + img (PIL Image or Tensor): The image to be checked. + + Returns: + List[int]: The image size. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(get_image_size) + if isinstance(img, torch.Tensor): + return F_t.get_image_size(img) + + return F_pil.get_image_size(img) + + +def get_image_num_channels(img: Tensor) -> int: + """Returns the number of channels of an image. + + Args: + img (PIL Image or Tensor): The image to be checked. + + Returns: + int: The number of channels. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(get_image_num_channels) + if isinstance(img, torch.Tensor): + return F_t.get_image_num_channels(img) + + return F_pil.get_image_num_channels(img) + + +@torch.jit.unused +def _is_numpy(img: Any) -> bool: + return isinstance(img, np.ndarray) + + +@torch.jit.unused +def _is_numpy_image(img: Any) -> bool: + return img.ndim in {2, 3} + + +def to_tensor(pic: Union[PILImage, np.ndarray]) -> Tensor: + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. + This function does not support torchscript. + + See :class:`~torchvision.transforms.ToTensor` for more details. + + Args: + pic (PIL Image or numpy.ndarray): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(to_tensor) + if not (F_pil._is_pil_image(pic) or _is_numpy(pic)): + raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}") + + if _is_numpy(pic) and not _is_numpy_image(pic): + raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.") + + default_float_dtype = torch.get_default_dtype() + + if isinstance(pic, np.ndarray): + # handle numpy array + if pic.ndim == 2: + pic = pic[:, :, None] + + img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous() + # backward compatibility + if isinstance(img, torch.ByteTensor): + return img.to(dtype=default_float_dtype).div(255) + else: + return img + + if accimage is not None and isinstance(pic, accimage.Image): + nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32) + pic.copyto(nppic) + return torch.from_numpy(nppic).to(dtype=default_float_dtype) + + # handle PIL Image + mode_to_nptype = {"I": np.int32, "I;16" if sys.byteorder == "little" else "I;16B": np.int16, "F": np.float32} + img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True)) + + if pic.mode == "1": + img = 255 * img + img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic)) + # put it from HWC to CHW format + img = img.permute((2, 0, 1)).contiguous() + if isinstance(img, torch.ByteTensor): + return img.to(dtype=default_float_dtype).div(255) + else: + return img + + +def pil_to_tensor(pic: Any) -> Tensor: + """Convert a ``PIL Image`` to a tensor of the same type. + This function does not support torchscript. + + See :class:`~torchvision.transforms.PILToTensor` for more details. + + .. note:: + + A deep copy of the underlying array is performed. + + Args: + pic (PIL Image): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(pil_to_tensor) + if not F_pil._is_pil_image(pic): + raise TypeError(f"pic should be PIL Image. Got {type(pic)}") + + if accimage is not None and isinstance(pic, accimage.Image): + # accimage format is always uint8 internally, so always return uint8 here + nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8) + pic.copyto(nppic) + return torch.as_tensor(nppic) + + # handle PIL Image + img = torch.as_tensor(np.array(pic, copy=True)) + img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic)) + # put it from HWC to CHW format + img = img.permute((2, 0, 1)) + return img + + +def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor: + """Convert a tensor image to the given ``dtype`` and scale the values accordingly + This function does not support PIL Image. + + Args: + image (torch.Tensor): Image to be converted + dtype (torch.dtype): Desired data type of the output + + Returns: + Tensor: Converted image + + .. note:: + + When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly. + If converted back and forth, this mismatch has no effect. + + Raises: + RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as + well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to + overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range + of the integer ``dtype``. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(convert_image_dtype) + if not isinstance(image, torch.Tensor): + raise TypeError("Input img should be Tensor Image") + + return F_t.convert_image_dtype(image, dtype) + + +def to_pil_image(pic, mode=None): + """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript. + + See :class:`~torchvision.transforms.ToPILImage` for more details. + + Args: + pic (Tensor or numpy.ndarray): Image to be converted to PIL Image. + mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). + + .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes + + Returns: + PIL Image: Image converted to PIL Image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(to_pil_image) + + if isinstance(pic, torch.Tensor): + if pic.ndim == 3: + pic = pic.permute((1, 2, 0)) + pic = pic.numpy(force=True) + elif not isinstance(pic, np.ndarray): + raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.") + + if pic.ndim == 2: + # if 2D image, add channel dimension (HWC) + pic = np.expand_dims(pic, 2) + if pic.ndim != 3: + raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.") + + if pic.shape[-1] > 4: + raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-1]} channels.") + + npimg = pic + + if np.issubdtype(npimg.dtype, np.floating) and mode != "F": + npimg = (npimg * 255).astype(np.uint8) + + if npimg.shape[2] == 1: + expected_mode = None + npimg = npimg[:, :, 0] + if npimg.dtype == np.uint8: + expected_mode = "L" + elif npimg.dtype == np.int16: + expected_mode = "I;16" if sys.byteorder == "little" else "I;16B" + elif npimg.dtype == np.int32: + expected_mode = "I" + elif npimg.dtype == np.float32: + expected_mode = "F" + if mode is not None and mode != expected_mode: + raise ValueError(f"Incorrect mode ({mode}) supplied for input type {np.dtype}. Should be {expected_mode}") + mode = expected_mode + + elif npimg.shape[2] == 2: + permitted_2_channel_modes = ["LA"] + if mode is not None and mode not in permitted_2_channel_modes: + raise ValueError(f"Only modes {permitted_2_channel_modes} are supported for 2D inputs") + + if mode is None and npimg.dtype == np.uint8: + mode = "LA" + + elif npimg.shape[2] == 4: + permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"] + if mode is not None and mode not in permitted_4_channel_modes: + raise ValueError(f"Only modes {permitted_4_channel_modes} are supported for 4D inputs") + + if mode is None and npimg.dtype == np.uint8: + mode = "RGBA" + else: + permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"] + if mode is not None and mode not in permitted_3_channel_modes: + raise ValueError(f"Only modes {permitted_3_channel_modes} are supported for 3D inputs") + if mode is None and npimg.dtype == np.uint8: + mode = "RGB" + + if mode is None: + raise TypeError(f"Input type {npimg.dtype} is not supported") + + return Image.fromarray(npimg, mode=mode) + + +def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor: + """Normalize a float tensor image with mean and standard deviation. + This transform does not support PIL Image. + + .. note:: + This transform acts out of place by default, i.e., it does not mutates the input tensor. + + See :class:`~torchvision.transforms.Normalize` for more details. + + Args: + tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized. + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + inplace(bool,optional): Bool to make this operation inplace. + + Returns: + Tensor: Normalized Tensor image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(normalize) + if not isinstance(tensor, torch.Tensor): + raise TypeError(f"img should be Tensor Image. Got {type(tensor)}") + + return F_t.normalize(tensor, mean=mean, std=std, inplace=inplace) + + +def _compute_resized_output_size( + image_size: Tuple[int, int], + size: Optional[List[int]], + max_size: Optional[int] = None, + allow_size_none: bool = False, # only True in v2 +) -> List[int]: + h, w = image_size + short, long = (w, h) if w <= h else (h, w) + if size is None: + if not allow_size_none: + raise ValueError("This should never happen!!") + if not isinstance(max_size, int): + raise ValueError(f"max_size must be an integer when size is None, but got {max_size} instead.") + new_short, new_long = int(max_size * short / long), max_size + new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short) + elif len(size) == 1: # specified size only for the smallest edge + requested_new_short = size if isinstance(size, int) else size[0] + new_short, new_long = requested_new_short, int(requested_new_short * long / short) + + if max_size is not None: + if max_size <= requested_new_short: + raise ValueError( + f"max_size = {max_size} must be strictly greater than the requested " + f"size for the smaller edge size = {size}" + ) + if new_long > max_size: + new_short, new_long = int(max_size * new_short / new_long), max_size + + new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short) + else: # specified both h and w + new_w, new_h = size[1], size[0] + return [new_h, new_w] + + +def resize( + img: Tensor, + size: List[int], + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[bool] = True, +) -> Tensor: + r"""Resize the input image to the given size. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + img (PIL Image or Tensor): Image to be resized. + size (sequence or int): Desired output size. If size is a sequence like + (h, w), the output size will be matched to this. If size is an int, + the smaller edge of the image will be matched to this number maintaining + the aspect ratio. i.e, if height > width, then image will be rescaled to + :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`. + + .. note:: + In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. + Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``, + ``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are + supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + max_size (int, optional): The maximum allowed for the longer edge of + the resized image. If the longer edge of the image is greater + than ``max_size`` after being resized according to ``size``, + ``size`` will be overruled so that the longer edge is equal to + ``max_size``. + As a result, the smaller edge may be shorter than ``size``. This + is only supported if ``size`` is an int (or a sequence of length + 1 in torchscript mode). + antialias (bool, optional): Whether to apply antialiasing. + It only affects **tensors** with bilinear or bicubic modes and it is + ignored otherwise: on PIL images, antialiasing is always applied on + bilinear or bicubic modes; on other modes (for PIL images and + tensors), antialiasing makes no sense and this parameter is ignored. + Possible values are: + + - ``True`` (default): will apply antialiasing for bilinear or bicubic modes. + Other mode aren't affected. This is probably what you want to use. + - ``False``: will not apply antialiasing for tensors on any mode. PIL + images are still antialiased on bilinear or bicubic modes, because + PIL doesn't support no antialias. + - ``None``: equivalent to ``False`` for tensors and ``True`` for + PIL images. This value exists for legacy reasons and you probably + don't want to use it unless you really know what you are doing. + + The default value changed from ``None`` to ``True`` in + v0.17, for the PIL and Tensor backends to be consistent. + + Returns: + PIL Image or Tensor: Resized image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(resize) + + if isinstance(interpolation, int): + interpolation = _interpolation_modes_from_int(interpolation) + elif not isinstance(interpolation, InterpolationMode): + raise TypeError( + "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant" + ) + + if isinstance(size, (list, tuple)): + if len(size) not in [1, 2]: + raise ValueError( + f"Size must be an int or a 1 or 2 element tuple/list, not a {len(size)} element tuple/list" + ) + if max_size is not None and len(size) != 1: + raise ValueError( + "max_size should only be passed if size specifies the length of the smaller edge, " + "i.e. size should be an int or a sequence of length 1 in torchscript mode." + ) + + _, image_height, image_width = get_dimensions(img) + if isinstance(size, int): + size = [size] + output_size = _compute_resized_output_size((image_height, image_width), size, max_size) + + if [image_height, image_width] == output_size: + return img + + if not isinstance(img, torch.Tensor): + if antialias is False: + warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.") + pil_interpolation = pil_modes_mapping[interpolation] + return F_pil.resize(img, size=output_size, interpolation=pil_interpolation) + + return F_t.resize(img, size=output_size, interpolation=interpolation.value, antialias=antialias) + + +def pad(img: Tensor, padding: List[int], fill: Union[int, float] = 0, padding_mode: str = "constant") -> Tensor: + r"""Pad the given image on all sides with the given "pad" value. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric, + at most 3 leading dimensions for mode edge, + and an arbitrary number of leading dimensions for mode constant + + Args: + img (PIL Image or Tensor): Image to be padded. + padding (int or sequence): Padding on each border. If a single int is provided this + is used to pad all borders. If sequence of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a sequence of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + + .. note:: + In torchscript mode padding as single int is not supported, use a sequence of + length 1: ``[padding, ]``. + fill (number or tuple): Pixel fill value for constant fill. Default is 0. + If a tuple of length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant. + Only number is supported for torch Tensor. + Only int or tuple value is supported for PIL Image. + padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. + Default is constant. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value at the edge of the image. + If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2 + + - reflect: pads with reflection of image without repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + + Returns: + PIL Image or Tensor: Padded image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(pad) + if not isinstance(img, torch.Tensor): + return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) + + return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) + + +def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: + """Crop the given image at specified location and output size. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + If image size is smaller than output size along any edge, image is padded with 0 and then cropped. + + Args: + img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. + top (int): Vertical component of the top left corner of the crop box. + left (int): Horizontal component of the top left corner of the crop box. + height (int): Height of the crop box. + width (int): Width of the crop box. + + Returns: + PIL Image or Tensor: Cropped image. + """ + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(crop) + if not isinstance(img, torch.Tensor): + return F_pil.crop(img, top, left, height, width) + + return F_t.crop(img, top, left, height, width) + + +def center_crop(img: Tensor, output_size: List[int]) -> Tensor: + """Crops the given image at the center. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + If image size is smaller than output size along any edge, image is padded with 0 and then center cropped. + + Args: + img (PIL Image or Tensor): Image to be cropped. + output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int, + it is used for both directions. + + Returns: + PIL Image or Tensor: Cropped image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(center_crop) + if isinstance(output_size, numbers.Number): + output_size = (int(output_size), int(output_size)) + elif isinstance(output_size, (tuple, list)) and len(output_size) == 1: + output_size = (output_size[0], output_size[0]) + + _, image_height, image_width = get_dimensions(img) + crop_height, crop_width = output_size + + if crop_width > image_width or crop_height > image_height: + padding_ltrb = [ + (crop_width - image_width) // 2 if crop_width > image_width else 0, + (crop_height - image_height) // 2 if crop_height > image_height else 0, + (crop_width - image_width + 1) // 2 if crop_width > image_width else 0, + (crop_height - image_height + 1) // 2 if crop_height > image_height else 0, + ] + img = pad(img, padding_ltrb, fill=0) # PIL uses fill value 0 + _, image_height, image_width = get_dimensions(img) + if crop_width == image_width and crop_height == image_height: + return img + + crop_top = int(round((image_height - crop_height) / 2.0)) + crop_left = int(round((image_width - crop_width) / 2.0)) + return crop(img, crop_top, crop_left, crop_height, crop_width) + + +def resized_crop( + img: Tensor, + top: int, + left: int, + height: int, + width: int, + size: List[int], + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + antialias: Optional[bool] = True, +) -> Tensor: + """Crop the given image and resize it to desired size. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Notably used in :class:`~torchvision.transforms.RandomResizedCrop`. + + Args: + img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. + top (int): Vertical component of the top left corner of the crop box. + left (int): Horizontal component of the top left corner of the crop box. + height (int): Height of the crop box. + width (int): Width of the crop box. + size (sequence or int): Desired output size. Same semantics as ``resize``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. + Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``, + ``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are + supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + antialias (bool, optional): Whether to apply antialiasing. + It only affects **tensors** with bilinear or bicubic modes and it is + ignored otherwise: on PIL images, antialiasing is always applied on + bilinear or bicubic modes; on other modes (for PIL images and + tensors), antialiasing makes no sense and this parameter is ignored. + Possible values are: + + - ``True`` (default): will apply antialiasing for bilinear or bicubic modes. + Other mode aren't affected. This is probably what you want to use. + - ``False``: will not apply antialiasing for tensors on any mode. PIL + images are still antialiased on bilinear or bicubic modes, because + PIL doesn't support no antialias. + - ``None``: equivalent to ``False`` for tensors and ``True`` for + PIL images. This value exists for legacy reasons and you probably + don't want to use it unless you really know what you are doing. + + The default value changed from ``None`` to ``True`` in + v0.17, for the PIL and Tensor backends to be consistent. + Returns: + PIL Image or Tensor: Cropped image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(resized_crop) + img = crop(img, top, left, height, width) + img = resize(img, size, interpolation, antialias=antialias) + return img + + +def hflip(img: Tensor) -> Tensor: + """Horizontally flip the given image. + + Args: + img (PIL Image or Tensor): Image to be flipped. If img + is a Tensor, it is expected to be in [..., H, W] format, + where ... means it can have an arbitrary number of leading + dimensions. + + Returns: + PIL Image or Tensor: Horizontally flipped image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(hflip) + if not isinstance(img, torch.Tensor): + return F_pil.hflip(img) + + return F_t.hflip(img) + + +def _get_perspective_coeffs(startpoints: List[List[int]], endpoints: List[List[int]]) -> List[float]: + """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms. + + In Perspective Transform each pixel (x, y) in the original image gets transformed as, + (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) ) + + Args: + startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the original image. + endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image. + + Returns: + octuple (a, b, c, d, e, f, g, h) for transforming each pixel. + """ + if len(startpoints) != 4 or len(endpoints) != 4: + raise ValueError( + f"Please provide exactly four corners, got {len(startpoints)} startpoints and {len(endpoints)} endpoints." + ) + a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float64) + + for i, (p1, p2) in enumerate(zip(endpoints, startpoints)): + a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) + a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) + + b_matrix = torch.tensor(startpoints, dtype=torch.float64).view(8) + # do least squares in double precision to prevent numerical issues + res = torch.linalg.lstsq(a_matrix, b_matrix, driver="gels").solution.to(torch.float32) + + output: List[float] = res.tolist() + return output + + +def perspective( + img: Tensor, + startpoints: List[List[int]], + endpoints: List[List[int]], + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + fill: Optional[List[float]] = None, +) -> Tensor: + """Perform perspective transform of the given image. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): Image to be transformed. + startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the original image. + endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + + .. note:: + In torchscript mode single int/float value is not supported, please use a sequence + of length 1: ``[value, ]``. + + Returns: + PIL Image or Tensor: transformed Image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(perspective) + + coeffs = _get_perspective_coeffs(startpoints, endpoints) + + if isinstance(interpolation, int): + interpolation = _interpolation_modes_from_int(interpolation) + elif not isinstance(interpolation, InterpolationMode): + raise TypeError( + "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant" + ) + + if not isinstance(img, torch.Tensor): + pil_interpolation = pil_modes_mapping[interpolation] + return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill) + + return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill) + + +def vflip(img: Tensor) -> Tensor: + """Vertically flip the given image. + + Args: + img (PIL Image or Tensor): Image to be flipped. If img + is a Tensor, it is expected to be in [..., H, W] format, + where ... means it can have an arbitrary number of leading + dimensions. + + Returns: + PIL Image or Tensor: Vertically flipped image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(vflip) + if not isinstance(img, torch.Tensor): + return F_pil.vflip(img) + + return F_t.vflip(img) + + +def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + """Crop the given image into four corners and the central crop. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + .. Note:: + This transform returns a tuple of images and there may be a + mismatch in the number of inputs and targets your ``Dataset`` returns. + + Args: + img (PIL Image or Tensor): Image to be cropped. + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + + Returns: + tuple: tuple (tl, tr, bl, br, center) + Corresponding top left, top right, bottom left, bottom right and center crop. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(five_crop) + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + elif isinstance(size, (tuple, list)) and len(size) == 1: + size = (size[0], size[0]) + + if len(size) != 2: + raise ValueError("Please provide only two dimensions (h, w) for size.") + + _, image_height, image_width = get_dimensions(img) + crop_height, crop_width = size + if crop_width > image_width or crop_height > image_height: + msg = "Requested crop size {} is bigger than input size {}" + raise ValueError(msg.format(size, (image_height, image_width))) + + tl = crop(img, 0, 0, crop_height, crop_width) + tr = crop(img, 0, image_width - crop_width, crop_height, crop_width) + bl = crop(img, image_height - crop_height, 0, crop_height, crop_width) + br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width) + + center = center_crop(img, [crop_height, crop_width]) + + return tl, tr, bl, br, center + + +def ten_crop( + img: Tensor, size: List[int], vertical_flip: bool = False +) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: + """Generate ten cropped images from the given image. + Crop the given image into four corners and the central crop plus the + flipped version of these (horizontal flipping is used by default). + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + .. Note:: + This transform returns a tuple of images and there may be a + mismatch in the number of inputs and targets your ``Dataset`` returns. + + Args: + img (PIL Image or Tensor): Image to be cropped. + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + vertical_flip (bool): Use vertical flipping instead of horizontal + + Returns: + tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip) + Corresponding top left, top right, bottom left, bottom right and + center crop and same for the flipped image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(ten_crop) + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + elif isinstance(size, (tuple, list)) and len(size) == 1: + size = (size[0], size[0]) + + if len(size) != 2: + raise ValueError("Please provide only two dimensions (h, w) for size.") + + first_five = five_crop(img, size) + + if vertical_flip: + img = vflip(img) + else: + img = hflip(img) + + second_five = five_crop(img, size) + return first_five + second_five + + +def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: + """Adjust brightness of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + brightness_factor (float): How much to adjust the brightness. Can be + any non-negative number. 0 gives a black image, 1 gives the + original image while 2 increases the brightness by a factor of 2. + + Returns: + PIL Image or Tensor: Brightness adjusted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_brightness) + if not isinstance(img, torch.Tensor): + return F_pil.adjust_brightness(img, brightness_factor) + + return F_t.adjust_brightness(img, brightness_factor) + + +def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: + """Adjust contrast of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + contrast_factor (float): How much to adjust the contrast. Can be any + non-negative number. 0 gives a solid gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + + Returns: + PIL Image or Tensor: Contrast adjusted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_contrast) + if not isinstance(img, torch.Tensor): + return F_pil.adjust_contrast(img, contrast_factor) + + return F_t.adjust_contrast(img, contrast_factor) + + +def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: + """Adjust color saturation of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + saturation_factor (float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + + Returns: + PIL Image or Tensor: Saturation adjusted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_saturation) + if not isinstance(img, torch.Tensor): + return F_pil.adjust_saturation(img, saturation_factor) + + return F_t.adjust_saturation(img, saturation_factor) + + +def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: + """Adjust hue of an image. + + The image hue is adjusted by converting the image to HSV and + cyclically shifting the intensities in the hue channel (H). + The image is then converted back to original image mode. + + `hue_factor` is the amount of shift in H channel and must be in the + interval `[-0.5, 0.5]`. + + See `Hue`_ for more details. + + .. _Hue: https://en.wikipedia.org/wiki/Hue + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image mode "1", "I", "F" and modes with transparency (alpha channel) are not supported. + Note: the pixel values of the input image has to be non-negative for conversion to HSV space; + thus it does not work if you normalize your image to an interval with negative values, + or use an interpolation that generates negative values before using this function. + hue_factor (float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + + Returns: + PIL Image or Tensor: Hue adjusted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_hue) + if not isinstance(img, torch.Tensor): + return F_pil.adjust_hue(img, hue_factor) + + return F_t.adjust_hue(img, hue_factor) + + +def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: + r"""Perform gamma correction on an image. + + Also known as Power Law Transform. Intensities in RGB mode are adjusted + based on the following equation: + + .. math:: + I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma} + + See `Gamma Correction`_ for more details. + + .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction + + Args: + img (PIL Image or Tensor): PIL Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, modes with transparency (alpha channel) are not supported. + gamma (float): Non negative real number, same as :math:`\gamma` in the equation. + gamma larger than 1 make the shadows darker, + while gamma smaller than 1 make dark regions lighter. + gain (float): The constant multiplier. + Returns: + PIL Image or Tensor: Gamma correction adjusted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_gamma) + if not isinstance(img, torch.Tensor): + return F_pil.adjust_gamma(img, gamma, gain) + + return F_t.adjust_gamma(img, gamma, gain) + + +def _get_inverse_affine_matrix( + center: List[float], angle: float, translate: List[float], scale: float, shear: List[float], inverted: bool = True +) -> List[float]: + # Helper method to compute inverse matrix for affine transformation + + # Pillow requires inverse affine transformation matrix: + # Affine matrix is : M = T * C * RotateScaleShear * C^-1 + # + # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1] + # C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1] + # RotateScaleShear is rotation with scale and shear matrix + # + # RotateScaleShear(a, s, (sx, sy)) = + # = R(a) * S(s) * SHy(sy) * SHx(sx) + # = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ] + # [ s*sin(a - sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ] + # [ 0 , 0 , 1 ] + # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears: + # SHx(s) = [1, -tan(s)] and SHy(s) = [1 , 0] + # [0, 1 ] [-tan(s), 1] + # + # Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1 + + rot = math.radians(angle) + sx = math.radians(shear[0]) + sy = math.radians(shear[1]) + + cx, cy = center + tx, ty = translate + + # RSS without scaling + a = math.cos(rot - sy) / math.cos(sy) + b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot) + c = math.sin(rot - sy) / math.cos(sy) + d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot) + + if inverted: + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + matrix = [d, -b, 0.0, -c, a, 0.0] + matrix = [x / scale for x in matrix] + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty) + matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty) + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += cx + matrix[5] += cy + else: + matrix = [a, b, 0.0, c, d, 0.0] + matrix = [x * scale for x in matrix] + # Apply inverse of center translation: RSS * C^-1 + matrix[2] += matrix[0] * (-cx) + matrix[1] * (-cy) + matrix[5] += matrix[3] * (-cx) + matrix[4] * (-cy) + # Apply translation and center : T * C * RSS * C^-1 + matrix[2] += cx + tx + matrix[5] += cy + ty + + return matrix + + +def rotate( + img: Tensor, + angle: float, + interpolation: InterpolationMode = InterpolationMode.NEAREST, + expand: bool = False, + center: Optional[List[int]] = None, + fill: Optional[List[float]] = None, +) -> Tensor: + """Rotate the image by angle. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): image to be rotated. + angle (number): rotation angle value in degrees, counter-clockwise. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + expand (bool, optional): Optional expansion flag. + If true, expands the output image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (sequence, optional): Optional center of rotation. Origin is the upper left corner. + Default is the center of the image. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + + .. note:: + In torchscript mode single int/float value is not supported, please use a sequence + of length 1: ``[value, ]``. + Returns: + PIL Image or Tensor: Rotated image. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(rotate) + + if isinstance(interpolation, int): + interpolation = _interpolation_modes_from_int(interpolation) + elif not isinstance(interpolation, InterpolationMode): + raise TypeError( + "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant" + ) + + if not isinstance(angle, (int, float)): + raise TypeError("Argument angle should be int or float") + + if center is not None and not isinstance(center, (list, tuple)): + raise TypeError("Argument center should be a sequence") + + if not isinstance(img, torch.Tensor): + pil_interpolation = pil_modes_mapping[interpolation] + return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill) + + center_f = [0.0, 0.0] + if center is not None: + _, height, width = get_dimensions(img) + # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center. + center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])] + + # due to current incoherence of rotation angle direction between affine and rotate implementations + # we need to set -angle. + matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0]) + return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill) + + +def affine( + img: Tensor, + angle: float, + translate: List[int], + scale: float, + shear: List[float], + interpolation: InterpolationMode = InterpolationMode.NEAREST, + fill: Optional[List[float]] = None, + center: Optional[List[int]] = None, +) -> Tensor: + """Apply affine transformation on the image keeping image center invariant. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): image to transform. + angle (number): rotation angle in degrees between -180 and 180, clockwise direction. + translate (sequence of integers): horizontal and vertical translations (post-rotation translation) + scale (float): overall scale + shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction. + If a sequence is specified, the first value corresponds to a shear parallel to the x-axis, while + the second value corresponds to a shear parallel to the y-axis. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + + .. note:: + In torchscript mode single int/float value is not supported, please use a sequence + of length 1: ``[value, ]``. + center (sequence, optional): Optional center of rotation. Origin is the upper left corner. + Default is the center of the image. + + Returns: + PIL Image or Tensor: Transformed image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(affine) + + if isinstance(interpolation, int): + interpolation = _interpolation_modes_from_int(interpolation) + elif not isinstance(interpolation, InterpolationMode): + raise TypeError( + "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant" + ) + + if not isinstance(angle, (int, float)): + raise TypeError("Argument angle should be int or float") + + if not isinstance(translate, (list, tuple)): + raise TypeError("Argument translate should be a sequence") + + if len(translate) != 2: + raise ValueError("Argument translate should be a sequence of length 2") + + if scale <= 0.0: + raise ValueError("Argument scale should be positive") + + if not isinstance(shear, (numbers.Number, (list, tuple))): + raise TypeError("Shear should be either a single value or a sequence of two values") + + if isinstance(angle, int): + angle = float(angle) + + if isinstance(translate, tuple): + translate = list(translate) + + if isinstance(shear, numbers.Number): + shear = [shear, 0.0] + + if isinstance(shear, tuple): + shear = list(shear) + + if len(shear) == 1: + shear = [shear[0], shear[0]] + + if len(shear) != 2: + raise ValueError(f"Shear should be a sequence containing two values. Got {shear}") + + if center is not None and not isinstance(center, (list, tuple)): + raise TypeError("Argument center should be a sequence") + + _, height, width = get_dimensions(img) + if not isinstance(img, torch.Tensor): + # center = (width * 0.5 + 0.5, height * 0.5 + 0.5) + # it is visually better to estimate the center without 0.5 offset + # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine + if center is None: + center = [width * 0.5, height * 0.5] + matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear) + pil_interpolation = pil_modes_mapping[interpolation] + return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill) + + center_f = [0.0, 0.0] + if center is not None: + _, height, width = get_dimensions(img) + # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center. + center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])] + + translate_f = [1.0 * t for t in translate] + matrix = _get_inverse_affine_matrix(center_f, angle, translate_f, scale, shear) + return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill) + + +# Looks like to_grayscale() is a stand-alone functional that is never called +# from the transform classes. Perhaps it's still here for BC? I can't be +# bothered to dig. +@torch.jit.unused +def to_grayscale(img, num_output_channels=1): + """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image. + This transform does not support torch Tensor. + + Args: + img (PIL Image): PIL Image to be converted to grayscale. + num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1. + + Returns: + PIL Image: Grayscale version of the image. + + - if num_output_channels = 1 : returned image is single channel + - if num_output_channels = 3 : returned image is 3 channel with r = g = b + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(to_grayscale) + if isinstance(img, Image.Image): + return F_pil.to_grayscale(img, num_output_channels) + + raise TypeError("Input should be PIL Image") + + +def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: + """Convert RGB image to grayscale version of image. + If the image is torch Tensor, it is expected + to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions + + Note: + Please, note that this method supports only RGB images as input. For inputs in other color spaces, + please, consider using :meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image. + + Args: + img (PIL Image or Tensor): RGB Image to be converted to grayscale. + num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1. + + Returns: + PIL Image or Tensor: Grayscale version of the image. + + - if num_output_channels = 1 : returned image is single channel + - if num_output_channels = 3 : returned image is 3 channel with r = g = b + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(rgb_to_grayscale) + if not isinstance(img, torch.Tensor): + return F_pil.to_grayscale(img, num_output_channels) + + return F_t.rgb_to_grayscale(img, num_output_channels) + + +def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor: + """Erase the input Tensor Image with given value. + This transform does not support PIL Image. + + Args: + img (Tensor Image): Tensor image of size (C, H, W) to be erased + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the erased region. + w (int): Width of the erased region. + v: Erasing value. + inplace(bool, optional): For in-place operations. By default, is set False. + + Returns: + Tensor Image: Erased image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(erase) + if not isinstance(img, torch.Tensor): + raise TypeError(f"img should be Tensor Image. Got {type(img)}") + + return F_t.erase(img, i, j, h, w, v, inplace=inplace) + + +def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor: + """Performs Gaussian blurring on the image by given kernel + + The convolution will be using reflection padding corresponding to the kernel size, to maintain the input shape. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means at most one leading dimension. + + Args: + img (PIL Image or Tensor): Image to be blurred + kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers + like ``(kx, ky)`` or a single integer for square kernels. + + .. note:: + In torchscript mode kernel_size as single int is not supported, use a sequence of + length 1: ``[ksize, ]``. + sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a + sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the + same sigma in both X/Y directions. If None, then it is computed using + ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``. + Default, None. + + .. note:: + In torchscript mode sigma as single float is + not supported, use a sequence of length 1: ``[sigma, ]``. + + Returns: + PIL Image or Tensor: Gaussian Blurred version of the image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(gaussian_blur) + if not isinstance(kernel_size, (int, list, tuple)): + raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}") + if isinstance(kernel_size, int): + kernel_size = [kernel_size, kernel_size] + if len(kernel_size) != 2: + raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}") + for ksize in kernel_size: + if ksize % 2 == 0 or ksize < 0: + raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}") + + if sigma is None: + sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size] + + if sigma is not None and not isinstance(sigma, (int, float, list, tuple)): + raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}") + if isinstance(sigma, (int, float)): + sigma = [float(sigma), float(sigma)] + if isinstance(sigma, (list, tuple)) and len(sigma) == 1: + sigma = [sigma[0], sigma[0]] + if len(sigma) != 2: + raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}") + for s in sigma: + if s <= 0.0: + raise ValueError(f"sigma should have positive values. Got {sigma}") + + t_img = img + if not isinstance(img, torch.Tensor): + if not F_pil._is_pil_image(img): + raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}") + + t_img = pil_to_tensor(img) + + output = F_t.gaussian_blur(t_img, kernel_size, sigma) + + if not isinstance(img, torch.Tensor): + output = to_pil_image(output, mode=img.mode) + return output + + +def invert(img: Tensor) -> Tensor: + """Invert the colors of an RGB/grayscale image. + + Args: + img (PIL Image or Tensor): Image to have its colors inverted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Returns: + PIL Image or Tensor: Color inverted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(invert) + if not isinstance(img, torch.Tensor): + return F_pil.invert(img) + + return F_t.invert(img) + + +def posterize(img: Tensor, bits: int) -> Tensor: + """Posterize an image by reducing the number of bits for each color channel. + + Args: + img (PIL Image or Tensor): Image to have its colors posterized. + If img is torch Tensor, it should be of type torch.uint8, and + it is expected to be in [..., 1 or 3, H, W] format, where ... means + it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + bits (int): The number of bits to keep for each channel (0-8). + Returns: + PIL Image or Tensor: Posterized image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(posterize) + if not (0 <= bits <= 8): + raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}") + + if not isinstance(img, torch.Tensor): + return F_pil.posterize(img, bits) + + return F_t.posterize(img, bits) + + +def solarize(img: Tensor, threshold: float) -> Tensor: + """Solarize an RGB/grayscale image by inverting all pixel values above a threshold. + + Args: + img (PIL Image or Tensor): Image to have its colors inverted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + threshold (float): All pixels equal or above this value are inverted. + Returns: + PIL Image or Tensor: Solarized image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(solarize) + if not isinstance(img, torch.Tensor): + return F_pil.solarize(img, threshold) + + return F_t.solarize(img, threshold) + + +def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: + """Adjust the sharpness of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + sharpness_factor (float): How much to adjust the sharpness. Can be + any non-negative number. 0 gives a blurred image, 1 gives the + original image while 2 increases the sharpness by a factor of 2. + + Returns: + PIL Image or Tensor: Sharpness adjusted image. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_sharpness) + if not isinstance(img, torch.Tensor): + return F_pil.adjust_sharpness(img, sharpness_factor) + + return F_t.adjust_sharpness(img, sharpness_factor) + + +def autocontrast(img: Tensor) -> Tensor: + """Maximize contrast of an image by remapping its + pixels per channel so that the lowest becomes black and the lightest + becomes white. + + Args: + img (PIL Image or Tensor): Image on which autocontrast is applied. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Returns: + PIL Image or Tensor: An image that was autocontrasted. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(autocontrast) + if not isinstance(img, torch.Tensor): + return F_pil.autocontrast(img) + + return F_t.autocontrast(img) + + +def equalize(img: Tensor) -> Tensor: + """Equalize the histogram of an image by applying + a non-linear mapping to the input in order to create a uniform + distribution of grayscale values in the output. + + Args: + img (PIL Image or Tensor): Image on which equalize is applied. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``. + If img is PIL Image, it is expected to be in mode "P", "L" or "RGB". + + Returns: + PIL Image or Tensor: An image that was equalized. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(equalize) + if not isinstance(img, torch.Tensor): + return F_pil.equalize(img) + + return F_t.equalize(img) + + +def elastic_transform( + img: Tensor, + displacement: Tensor, + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + fill: Optional[List[float]] = None, +) -> Tensor: + """Transform a tensor image with elastic transformations. + Given alpha and sigma, it will generate displacement + vectors for all pixels based on random offsets. Alpha controls the strength + and sigma controls the smoothness of the displacements. + The displacements are added to an identity grid and the resulting grid is + used to grid_sample from the image. + + Applications: + Randomly transforms the morphology of objects in images and produces a + see-through-water-like effect. + + Args: + img (PIL Image or Tensor): Image on which elastic_transform is applied. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "P", "L" or "RGB". + displacement (Tensor): The displacement field. Expected shape is [1, H, W, 2]. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. + Default is ``InterpolationMode.BILINEAR``. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. + If a tuple of length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(elastic_transform) + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + if not isinstance(displacement, torch.Tensor): + raise TypeError("Argument displacement should be a Tensor") + + t_img = img + if not isinstance(img, torch.Tensor): + if not F_pil._is_pil_image(img): + raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}") + t_img = pil_to_tensor(img) + + shape = t_img.shape + shape = (1,) + shape[-2:] + (2,) + if shape != displacement.shape: + raise ValueError(f"Argument displacement shape should be {shape}, but given {displacement.shape}") + + # TODO: if image shape is [N1, N2, ..., C, H, W] and + # displacement is [1, H, W, 2] we need to reshape input image + # such grid_sampler takes internal code for 4D input + + output = F_t.elastic_transform( + t_img, + displacement, + interpolation=interpolation.value, + fill=fill, + ) + + if not isinstance(img, torch.Tensor): + output = to_pil_image(output, mode=img.mode) + return output diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_augment.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_augment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6b0628b895310d2bdffaa3ba49a9eb617ec0e6a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_augment.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_auto_augment.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_auto_augment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ea53a9a4cea4a620cffe66ab4d2efdeafe373c1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_auto_augment.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_color.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5582c142098995df251a4e6dfeb147894d834b0b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_color.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_deprecated.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_deprecated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86ecdaf9ab6b48a6ef935eae3c049eccb86ef1f4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_deprecated.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_temporal.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_temporal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a75651f881023737fd9eb3530aaddfd0264cc64b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_temporal.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_transform.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..701864b71ba6d4344a478da82d744e6b01bfbce3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_transform.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_type_conversion.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_type_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7163665731eee0a554925a73fb55bd9b0837c10 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_type_conversion.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..983c61e0a7d13bd642885cdd2bd3bf689c9d1b9c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/_auto_augment.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/_auto_augment.py new file mode 100644 index 0000000000000000000000000000000000000000..4dd7ba343aa360c38192d2c8ac88481093ed8c93 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/_auto_augment.py @@ -0,0 +1,627 @@ +import math +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union + +import PIL.Image +import torch + +from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec +from torchvision import transforms as _transforms, tv_tensors +from torchvision.transforms import _functional_tensor as _FT +from torchvision.transforms.v2 import AutoAugmentPolicy, functional as F, InterpolationMode, Transform +from torchvision.transforms.v2.functional._geometry import _check_interpolation +from torchvision.transforms.v2.functional._meta import get_size +from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT + +from ._utils import _get_fill, _setup_fill_arg, check_type, is_pure_tensor + + +ImageOrVideo = Union[torch.Tensor, PIL.Image.Image, tv_tensors.Image, tv_tensors.Video] + + +class _AutoAugmentBase(Transform): + def __init__( + self, + *, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None, + ) -> None: + super().__init__() + self.interpolation = _check_interpolation(interpolation) + self.fill = fill + self._fill = _setup_fill_arg(fill) + + def _extract_params_for_v1_transform(self) -> Dict[str, Any]: + params = super()._extract_params_for_v1_transform() + + if isinstance(params["fill"], dict): + raise ValueError(f"{type(self).__name__}() can not be scripted for when `fill` is a dictionary.") + + return params + + def _get_random_item(self, dct: Dict[str, Tuple[Callable, bool]]) -> Tuple[str, Tuple[Callable, bool]]: + keys = tuple(dct.keys()) + key = keys[int(torch.randint(len(keys), ()))] + return key, dct[key] + + def _flatten_and_extract_image_or_video( + self, + inputs: Any, + unsupported_types: Tuple[Type, ...] = (tv_tensors.BoundingBoxes, tv_tensors.Mask), + ) -> Tuple[Tuple[List[Any], TreeSpec, int], ImageOrVideo]: + flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0]) + needs_transform_list = self._needs_transform_list(flat_inputs) + + image_or_videos = [] + for idx, (inpt, needs_transform) in enumerate(zip(flat_inputs, needs_transform_list)): + if needs_transform and check_type( + inpt, + ( + tv_tensors.Image, + PIL.Image.Image, + is_pure_tensor, + tv_tensors.Video, + ), + ): + image_or_videos.append((idx, inpt)) + elif isinstance(inpt, unsupported_types): + raise TypeError(f"Inputs of type {type(inpt).__name__} are not supported by {type(self).__name__}()") + + if not image_or_videos: + raise TypeError("Found no image in the sample.") + if len(image_or_videos) > 1: + raise TypeError( + f"Auto augment transformations are only properly defined for a single image or video, " + f"but found {len(image_or_videos)}." + ) + + idx, image_or_video = image_or_videos[0] + return (flat_inputs, spec, idx), image_or_video + + def _unflatten_and_insert_image_or_video( + self, + flat_inputs_with_spec: Tuple[List[Any], TreeSpec, int], + image_or_video: ImageOrVideo, + ) -> Any: + flat_inputs, spec, idx = flat_inputs_with_spec + flat_inputs[idx] = image_or_video + return tree_unflatten(flat_inputs, spec) + + def _apply_image_or_video_transform( + self, + image: ImageOrVideo, + transform_id: str, + magnitude: float, + interpolation: Union[InterpolationMode, int], + fill: Dict[Union[Type, str], _FillTypeJIT], + ) -> ImageOrVideo: + # Note: this cast is wrong and is only here to make mypy happy (it disagrees with torchscript) + image = cast(torch.Tensor, image) + fill_ = _get_fill(fill, type(image)) + + if transform_id == "Identity": + return image + elif transform_id == "ShearX": + # magnitude should be arctan(magnitude) + # official autoaug: (1, level, 0, 0, 1, 0) + # https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290 + # compared to + # torchvision: (1, tan(level), 0, 0, 1, 0) + # https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976 + return F.affine( + image, + angle=0.0, + translate=[0, 0], + scale=1.0, + shear=[math.degrees(math.atan(magnitude)), 0.0], + interpolation=interpolation, + fill=fill_, + center=[0, 0], + ) + elif transform_id == "ShearY": + # magnitude should be arctan(magnitude) + # See above + return F.affine( + image, + angle=0.0, + translate=[0, 0], + scale=1.0, + shear=[0.0, math.degrees(math.atan(magnitude))], + interpolation=interpolation, + fill=fill_, + center=[0, 0], + ) + elif transform_id == "TranslateX": + return F.affine( + image, + angle=0.0, + translate=[int(magnitude), 0], + scale=1.0, + interpolation=interpolation, + shear=[0.0, 0.0], + fill=fill_, + ) + elif transform_id == "TranslateY": + return F.affine( + image, + angle=0.0, + translate=[0, int(magnitude)], + scale=1.0, + interpolation=interpolation, + shear=[0.0, 0.0], + fill=fill_, + ) + elif transform_id == "Rotate": + return F.rotate(image, angle=magnitude, interpolation=interpolation, fill=fill_) + elif transform_id == "Brightness": + return F.adjust_brightness(image, brightness_factor=1.0 + magnitude) + elif transform_id == "Color": + return F.adjust_saturation(image, saturation_factor=1.0 + magnitude) + elif transform_id == "Contrast": + return F.adjust_contrast(image, contrast_factor=1.0 + magnitude) + elif transform_id == "Sharpness": + return F.adjust_sharpness(image, sharpness_factor=1.0 + magnitude) + elif transform_id == "Posterize": + return F.posterize(image, bits=int(magnitude)) + elif transform_id == "Solarize": + bound = _FT._max_value(image.dtype) if isinstance(image, torch.Tensor) else 255.0 + return F.solarize(image, threshold=bound * magnitude) + elif transform_id == "AutoContrast": + return F.autocontrast(image) + elif transform_id == "Equalize": + return F.equalize(image) + elif transform_id == "Invert": + return F.invert(image) + else: + raise ValueError(f"No transform available for {transform_id}") + + +class AutoAugment(_AutoAugmentBase): + r"""AutoAugment data augmentation method based on + `"AutoAugment: Learning Augmentation Strategies from Data" `_. + + This transformation works on images and videos only. + + If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + policy (AutoAugmentPolicy, optional): Desired policy enum defined by + :class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + _v1_transform_cls = _transforms.AutoAugment + + _AUGMENTATION_SPACE = { + "ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": ( + lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins), + True, + ), + "TranslateY": ( + lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins), + True, + ), + "Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True), + "Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Posterize": ( + lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(), + False, + ), + "Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False), + "AutoContrast": (lambda num_bins, height, width: None, False), + "Equalize": (lambda num_bins, height, width: None, False), + "Invert": (lambda num_bins, height, width: None, False), + } + + def __init__( + self, + policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None, + ) -> None: + super().__init__(interpolation=interpolation, fill=fill) + self.policy = policy + self._policies = self._get_policies(policy) + + def _get_policies( + self, policy: AutoAugmentPolicy + ) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]: + if policy == AutoAugmentPolicy.IMAGENET: + return [ + (("Posterize", 0.4, 8), ("Rotate", 0.6, 9)), + (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)), + (("Equalize", 0.8, None), ("Equalize", 0.6, None)), + (("Posterize", 0.6, 7), ("Posterize", 0.6, 6)), + (("Equalize", 0.4, None), ("Solarize", 0.2, 4)), + (("Equalize", 0.4, None), ("Rotate", 0.8, 8)), + (("Solarize", 0.6, 3), ("Equalize", 0.6, None)), + (("Posterize", 0.8, 5), ("Equalize", 1.0, None)), + (("Rotate", 0.2, 3), ("Solarize", 0.6, 8)), + (("Equalize", 0.6, None), ("Posterize", 0.4, 6)), + (("Rotate", 0.8, 8), ("Color", 0.4, 0)), + (("Rotate", 0.4, 9), ("Equalize", 0.6, None)), + (("Equalize", 0.0, None), ("Equalize", 0.8, None)), + (("Invert", 0.6, None), ("Equalize", 1.0, None)), + (("Color", 0.6, 4), ("Contrast", 1.0, 8)), + (("Rotate", 0.8, 8), ("Color", 1.0, 2)), + (("Color", 0.8, 8), ("Solarize", 0.8, 7)), + (("Sharpness", 0.4, 7), ("Invert", 0.6, None)), + (("ShearX", 0.6, 5), ("Equalize", 1.0, None)), + (("Color", 0.4, 0), ("Equalize", 0.6, None)), + (("Equalize", 0.4, None), ("Solarize", 0.2, 4)), + (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)), + (("Invert", 0.6, None), ("Equalize", 1.0, None)), + (("Color", 0.6, 4), ("Contrast", 1.0, 8)), + (("Equalize", 0.8, None), ("Equalize", 0.6, None)), + ] + elif policy == AutoAugmentPolicy.CIFAR10: + return [ + (("Invert", 0.1, None), ("Contrast", 0.2, 6)), + (("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)), + (("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)), + (("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)), + (("AutoContrast", 0.5, None), ("Equalize", 0.9, None)), + (("ShearY", 0.2, 7), ("Posterize", 0.3, 7)), + (("Color", 0.4, 3), ("Brightness", 0.6, 7)), + (("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)), + (("Equalize", 0.6, None), ("Equalize", 0.5, None)), + (("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)), + (("Color", 0.7, 7), ("TranslateX", 0.5, 8)), + (("Equalize", 0.3, None), ("AutoContrast", 0.4, None)), + (("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)), + (("Brightness", 0.9, 6), ("Color", 0.2, 8)), + (("Solarize", 0.5, 2), ("Invert", 0.0, None)), + (("Equalize", 0.2, None), ("AutoContrast", 0.6, None)), + (("Equalize", 0.2, None), ("Equalize", 0.6, None)), + (("Color", 0.9, 9), ("Equalize", 0.6, None)), + (("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)), + (("Brightness", 0.1, 3), ("Color", 0.7, 0)), + (("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)), + (("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)), + (("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)), + (("Equalize", 0.8, None), ("Invert", 0.1, None)), + (("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)), + ] + elif policy == AutoAugmentPolicy.SVHN: + return [ + (("ShearX", 0.9, 4), ("Invert", 0.2, None)), + (("ShearY", 0.9, 8), ("Invert", 0.7, None)), + (("Equalize", 0.6, None), ("Solarize", 0.6, 6)), + (("Invert", 0.9, None), ("Equalize", 0.6, None)), + (("Equalize", 0.6, None), ("Rotate", 0.9, 3)), + (("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)), + (("ShearY", 0.9, 8), ("Invert", 0.4, None)), + (("ShearY", 0.9, 5), ("Solarize", 0.2, 6)), + (("Invert", 0.9, None), ("AutoContrast", 0.8, None)), + (("Equalize", 0.6, None), ("Rotate", 0.9, 3)), + (("ShearX", 0.9, 4), ("Solarize", 0.3, 3)), + (("ShearY", 0.8, 8), ("Invert", 0.7, None)), + (("Equalize", 0.9, None), ("TranslateY", 0.6, 6)), + (("Invert", 0.9, None), ("Equalize", 0.6, None)), + (("Contrast", 0.3, 3), ("Rotate", 0.8, 4)), + (("Invert", 0.8, None), ("TranslateY", 0.0, 2)), + (("ShearY", 0.7, 6), ("Solarize", 0.4, 8)), + (("Invert", 0.6, None), ("Rotate", 0.8, 4)), + (("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)), + (("ShearX", 0.1, 6), ("Invert", 0.6, None)), + (("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)), + (("ShearY", 0.8, 4), ("Invert", 0.8, None)), + (("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)), + (("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)), + (("ShearX", 0.7, 2), ("Invert", 0.1, None)), + ] + else: + raise ValueError(f"The provided policy {policy} is not recognized.") + + def forward(self, *inputs: Any) -> Any: + flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs) + height, width = get_size(image_or_video) # type: ignore[arg-type] + + policy = self._policies[int(torch.randint(len(self._policies), ()))] + + for transform_id, probability, magnitude_idx in policy: + if not torch.rand(()) <= probability: + continue + + magnitudes_fn, signed = self._AUGMENTATION_SPACE[transform_id] + + magnitudes = magnitudes_fn(10, height, width) + if magnitudes is not None: + magnitude = float(magnitudes[magnitude_idx]) + if signed and torch.rand(()) <= 0.5: + magnitude *= -1 + else: + magnitude = 0.0 + + image_or_video = self._apply_image_or_video_transform( + image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill + ) + + return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video) + + +class RandAugment(_AutoAugmentBase): + r"""RandAugment data augmentation method based on + `"RandAugment: Practical automated data augmentation with a reduced search space" + `_. + + This transformation works on images and videos only. + + If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + num_ops (int, optional): Number of augmentation transformations to apply sequentially. + magnitude (int, optional): Magnitude for all the transformations. + num_magnitude_bins (int, optional): The number of different magnitude values. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + _v1_transform_cls = _transforms.RandAugment + _AUGMENTATION_SPACE = { + "Identity": (lambda num_bins, height, width: None, False), + "ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": ( + lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins), + True, + ), + "TranslateY": ( + lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins), + True, + ), + "Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True), + "Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Posterize": ( + lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(), + False, + ), + "Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False), + "AutoContrast": (lambda num_bins, height, width: None, False), + "Equalize": (lambda num_bins, height, width: None, False), + } + + def __init__( + self, + num_ops: int = 2, + magnitude: int = 9, + num_magnitude_bins: int = 31, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None, + ) -> None: + super().__init__(interpolation=interpolation, fill=fill) + self.num_ops = num_ops + self.magnitude = magnitude + self.num_magnitude_bins = num_magnitude_bins + + def forward(self, *inputs: Any) -> Any: + flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs) + height, width = get_size(image_or_video) # type: ignore[arg-type] + + for _ in range(self.num_ops): + transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE) + magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width) + if magnitudes is not None: + magnitude = float(magnitudes[self.magnitude]) + if signed and torch.rand(()) <= 0.5: + magnitude *= -1 + else: + magnitude = 0.0 + image_or_video = self._apply_image_or_video_transform( + image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill + ) + + return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video) + + +class TrivialAugmentWide(_AutoAugmentBase): + r"""Dataset-independent data-augmentation with TrivialAugment Wide, as described in + `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" `_. + + This transformation works on images and videos only. + + If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + num_magnitude_bins (int, optional): The number of different magnitude values. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + _v1_transform_cls = _transforms.TrivialAugmentWide + _AUGMENTATION_SPACE = { + "Identity": (lambda num_bins, height, width: None, False), + "ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True), + "ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True), + "TranslateX": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True), + "TranslateY": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True), + "Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 135.0, num_bins), True), + "Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True), + "Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True), + "Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True), + "Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True), + "Posterize": ( + lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6))).round().int(), + False, + ), + "Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False), + "AutoContrast": (lambda num_bins, height, width: None, False), + "Equalize": (lambda num_bins, height, width: None, False), + } + + def __init__( + self, + num_magnitude_bins: int = 31, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None, + ): + super().__init__(interpolation=interpolation, fill=fill) + self.num_magnitude_bins = num_magnitude_bins + + def forward(self, *inputs: Any) -> Any: + flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs) + height, width = get_size(image_or_video) # type: ignore[arg-type] + + transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE) + + magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width) + if magnitudes is not None: + magnitude = float(magnitudes[int(torch.randint(self.num_magnitude_bins, ()))]) + if signed and torch.rand(()) <= 0.5: + magnitude *= -1 + else: + magnitude = 0.0 + + image_or_video = self._apply_image_or_video_transform( + image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill + ) + return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video) + + +class AugMix(_AutoAugmentBase): + r"""AugMix data augmentation method based on + `"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" `_. + + This transformation works on images and videos only. + + If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + severity (int, optional): The severity of base augmentation operators. Default is ``3``. + mixture_width (int, optional): The number of augmentation chains. Default is ``3``. + chain_depth (int, optional): The depth of augmentation chains. A negative value denotes stochastic depth sampled from the interval [1, 3]. + Default is ``-1``. + alpha (float, optional): The hyperparameter for the probability distributions. Default is ``1.0``. + all_ops (bool, optional): Use all operations (including brightness, contrast, color and sharpness). Default is ``True``. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + _v1_transform_cls = _transforms.AugMix + + _PARTIAL_AUGMENTATION_SPACE = { + "ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": (lambda num_bins, height, width: torch.linspace(0.0, width / 3.0, num_bins), True), + "TranslateY": (lambda num_bins, height, width: torch.linspace(0.0, height / 3.0, num_bins), True), + "Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True), + "Posterize": ( + lambda num_bins, height, width: (4 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(), + False, + ), + "Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False), + "AutoContrast": (lambda num_bins, height, width: None, False), + "Equalize": (lambda num_bins, height, width: None, False), + } + _AUGMENTATION_SPACE: Dict[str, Tuple[Callable[[int, int, int], Optional[torch.Tensor]], bool]] = { + **_PARTIAL_AUGMENTATION_SPACE, + "Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True), + } + + def __init__( + self, + severity: int = 3, + mixture_width: int = 3, + chain_depth: int = -1, + alpha: float = 1.0, + all_ops: bool = True, + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None, + ) -> None: + super().__init__(interpolation=interpolation, fill=fill) + self._PARAMETER_MAX = 10 + if not (1 <= severity <= self._PARAMETER_MAX): + raise ValueError(f"The severity must be between [1, {self._PARAMETER_MAX}]. Got {severity} instead.") + self.severity = severity + self.mixture_width = mixture_width + self.chain_depth = chain_depth + self.alpha = alpha + self.all_ops = all_ops + + def _sample_dirichlet(self, params: torch.Tensor) -> torch.Tensor: + # Must be on a separate method so that we can overwrite it in tests. + return torch._sample_dirichlet(params) + + def forward(self, *inputs: Any) -> Any: + flat_inputs_with_spec, orig_image_or_video = self._flatten_and_extract_image_or_video(inputs) + height, width = get_size(orig_image_or_video) # type: ignore[arg-type] + + if isinstance(orig_image_or_video, torch.Tensor): + image_or_video = orig_image_or_video + else: # isinstance(inpt, PIL.Image.Image): + image_or_video = F.pil_to_tensor(orig_image_or_video) + + augmentation_space = self._AUGMENTATION_SPACE if self.all_ops else self._PARTIAL_AUGMENTATION_SPACE + + orig_dims = list(image_or_video.shape) + expected_ndim = 5 if isinstance(orig_image_or_video, tv_tensors.Video) else 4 + batch = image_or_video.reshape([1] * max(expected_ndim - image_or_video.ndim, 0) + orig_dims) + batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1) + + # Sample the beta weights for combining the original and augmented image or video. To get Beta, we use a + # Dirichlet with 2 parameters. The 1st column stores the weights of the original and the 2nd the ones of + # augmented image or video. + m = self._sample_dirichlet( + torch.tensor([self.alpha, self.alpha], device=batch.device).expand(batch_dims[0], -1) + ) + + # Sample the mixing weights and combine them with the ones sampled from Beta for the augmented images or videos. + combined_weights = self._sample_dirichlet( + torch.tensor([self.alpha] * self.mixture_width, device=batch.device).expand(batch_dims[0], -1) + ) * m[:, 1].reshape([batch_dims[0], -1]) + + mix = m[:, 0].reshape(batch_dims) * batch + for i in range(self.mixture_width): + aug = batch + depth = self.chain_depth if self.chain_depth > 0 else int(torch.randint(low=1, high=4, size=(1,)).item()) + for _ in range(depth): + transform_id, (magnitudes_fn, signed) = self._get_random_item(augmentation_space) + + magnitudes = magnitudes_fn(self._PARAMETER_MAX, height, width) + if magnitudes is not None: + magnitude = float(magnitudes[int(torch.randint(self.severity, ()))]) + if signed and torch.rand(()) <= 0.5: + magnitude *= -1 + else: + magnitude = 0.0 + + aug = self._apply_image_or_video_transform(aug, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill) # type: ignore[assignment] + mix.add_(combined_weights[:, i].reshape(batch_dims) * aug) + mix = mix.reshape(orig_dims).to(dtype=image_or_video.dtype) + + if isinstance(orig_image_or_video, (tv_tensors.Image, tv_tensors.Video)): + mix = tv_tensors.wrap(mix, like=orig_image_or_video) + elif isinstance(orig_image_or_video, PIL.Image.Image): + mix = F.to_pil_image(mix) + + return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, mix) diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5c246275386982521e3bc7e9d9493673e973ddc Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_augment.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_augment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fae70b071799c7d060f37f1124726f8aad6db379 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_augment.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_color.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c5d36a0c27bf9d9d692dda2daff3ee169fe3b4e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_color.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_geometry.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_geometry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4141004a37d791aab7646c336c9d6be6f80aff9b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_geometry.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_misc.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..775490c36450f9b3cee1b38fe535e687c6ce375e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_misc.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_temporal.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_temporal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02feaedec89c3af471537eb9f2210c13e46453e5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_temporal.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_type_conversion.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_type_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30d1d47e8fc45cda4d938b8527c97399dbad7919 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_type_conversion.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_color.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_color.py new file mode 100644 index 0000000000000000000000000000000000000000..34d1e101dbdc6b386cd7446e6013d1be9d17aa30 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_color.py @@ -0,0 +1,739 @@ +from typing import List + +import PIL.Image +import torch +from torch.nn.functional import conv2d +from torchvision import tv_tensors +from torchvision.transforms import _functional_pil as _FP +from torchvision.transforms._functional_tensor import _max_value + +from torchvision.utils import _log_api_usage_once + +from ._misc import _num_value_bits, to_dtype_image +from ._type_conversion import pil_to_tensor, to_pil_image +from ._utils import _get_kernel, _register_kernel_internal + + +def rgb_to_grayscale(inpt: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor: + """See :class:`~torchvision.transforms.v2.Grayscale` for details.""" + if torch.jit.is_scripting(): + return rgb_to_grayscale_image(inpt, num_output_channels=num_output_channels) + + _log_api_usage_once(rgb_to_grayscale) + + kernel = _get_kernel(rgb_to_grayscale, type(inpt)) + return kernel(inpt, num_output_channels=num_output_channels) + + +# `to_grayscale` actually predates `rgb_to_grayscale` in v1, but only handles PIL images. Since `rgb_to_grayscale` is a +# superset in terms of functionality and has the same signature, we alias here to avoid disruption. +to_grayscale = rgb_to_grayscale + + +def _rgb_to_grayscale_image( + image: torch.Tensor, num_output_channels: int = 1, preserve_dtype: bool = True +) -> torch.Tensor: + # TODO: Maybe move the validation that num_output_channels is 1 or 3 to this function instead of callers. + if image.shape[-3] == 1 and num_output_channels == 1: + return image.clone() + if image.shape[-3] == 1 and num_output_channels == 3: + s = [1] * len(image.shape) + s[-3] = 3 + return image.repeat(s) + r, g, b = image.unbind(dim=-3) + l_img = r.mul(0.2989).add_(g, alpha=0.587).add_(b, alpha=0.114) + l_img = l_img.unsqueeze(dim=-3) + if preserve_dtype: + l_img = l_img.to(image.dtype) + if num_output_channels == 3: + l_img = l_img.expand(image.shape) + return l_img + + +@_register_kernel_internal(rgb_to_grayscale, torch.Tensor) +@_register_kernel_internal(rgb_to_grayscale, tv_tensors.Image) +def rgb_to_grayscale_image(image: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor: + if num_output_channels not in (1, 3): + raise ValueError(f"num_output_channels must be 1 or 3, got {num_output_channels}.") + return _rgb_to_grayscale_image(image, num_output_channels=num_output_channels, preserve_dtype=True) + + +@_register_kernel_internal(rgb_to_grayscale, PIL.Image.Image) +def _rgb_to_grayscale_image_pil(image: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image: + if num_output_channels not in (1, 3): + raise ValueError(f"num_output_channels must be 1 or 3, got {num_output_channels}.") + return _FP.to_grayscale(image, num_output_channels=num_output_channels) + + +def grayscale_to_rgb(inpt: torch.Tensor) -> torch.Tensor: + """See :class:`~torchvision.transforms.v2.GrayscaleToRgb` for details.""" + if torch.jit.is_scripting(): + return grayscale_to_rgb_image(inpt) + + _log_api_usage_once(grayscale_to_rgb) + + kernel = _get_kernel(grayscale_to_rgb, type(inpt)) + return kernel(inpt) + + +@_register_kernel_internal(grayscale_to_rgb, torch.Tensor) +@_register_kernel_internal(grayscale_to_rgb, tv_tensors.Image) +def grayscale_to_rgb_image(image: torch.Tensor) -> torch.Tensor: + if image.shape[-3] >= 3: + # Image already has RGB channels. We don't need to do anything. + return image + # rgb_to_grayscale can be used to add channels so we reuse that function. + return _rgb_to_grayscale_image(image, num_output_channels=3, preserve_dtype=True) + + +@_register_kernel_internal(grayscale_to_rgb, PIL.Image.Image) +def grayscale_to_rgb_image_pil(image: PIL.Image.Image) -> PIL.Image.Image: + return image.convert(mode="RGB") + + +def _blend(image1: torch.Tensor, image2: torch.Tensor, ratio: float) -> torch.Tensor: + ratio = float(ratio) + fp = image1.is_floating_point() + bound = _max_value(image1.dtype) + output = image1.mul(ratio).add_(image2, alpha=(1.0 - ratio)).clamp_(0, bound) + return output if fp else output.to(image1.dtype) + + +def adjust_brightness(inpt: torch.Tensor, brightness_factor: float) -> torch.Tensor: + """Adjust brightness.""" + + if torch.jit.is_scripting(): + return adjust_brightness_image(inpt, brightness_factor=brightness_factor) + + _log_api_usage_once(adjust_brightness) + + kernel = _get_kernel(adjust_brightness, type(inpt)) + return kernel(inpt, brightness_factor=brightness_factor) + + +@_register_kernel_internal(adjust_brightness, torch.Tensor) +@_register_kernel_internal(adjust_brightness, tv_tensors.Image) +def adjust_brightness_image(image: torch.Tensor, brightness_factor: float) -> torch.Tensor: + if brightness_factor < 0: + raise ValueError(f"brightness_factor ({brightness_factor}) is not non-negative.") + + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + + fp = image.is_floating_point() + bound = _max_value(image.dtype) + output = image.mul(brightness_factor).clamp_(0, bound) + return output if fp else output.to(image.dtype) + + +@_register_kernel_internal(adjust_brightness, PIL.Image.Image) +def _adjust_brightness_image_pil(image: PIL.Image.Image, brightness_factor: float) -> PIL.Image.Image: + return _FP.adjust_brightness(image, brightness_factor=brightness_factor) + + +@_register_kernel_internal(adjust_brightness, tv_tensors.Video) +def adjust_brightness_video(video: torch.Tensor, brightness_factor: float) -> torch.Tensor: + return adjust_brightness_image(video, brightness_factor=brightness_factor) + + +def adjust_saturation(inpt: torch.Tensor, saturation_factor: float) -> torch.Tensor: + """Adjust saturation.""" + if torch.jit.is_scripting(): + return adjust_saturation_image(inpt, saturation_factor=saturation_factor) + + _log_api_usage_once(adjust_saturation) + + kernel = _get_kernel(adjust_saturation, type(inpt)) + return kernel(inpt, saturation_factor=saturation_factor) + + +@_register_kernel_internal(adjust_saturation, torch.Tensor) +@_register_kernel_internal(adjust_saturation, tv_tensors.Image) +def adjust_saturation_image(image: torch.Tensor, saturation_factor: float) -> torch.Tensor: + if saturation_factor < 0: + raise ValueError(f"saturation_factor ({saturation_factor}) is not non-negative.") + + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + + if c == 1: # Match PIL behaviour + return image + + grayscale_image = _rgb_to_grayscale_image(image, num_output_channels=1, preserve_dtype=False) + if not image.is_floating_point(): + grayscale_image = grayscale_image.floor_() + + return _blend(image, grayscale_image, saturation_factor) + + +_adjust_saturation_image_pil = _register_kernel_internal(adjust_saturation, PIL.Image.Image)(_FP.adjust_saturation) + + +@_register_kernel_internal(adjust_saturation, tv_tensors.Video) +def adjust_saturation_video(video: torch.Tensor, saturation_factor: float) -> torch.Tensor: + return adjust_saturation_image(video, saturation_factor=saturation_factor) + + +def adjust_contrast(inpt: torch.Tensor, contrast_factor: float) -> torch.Tensor: + """See :class:`~torchvision.transforms.RandomAutocontrast`""" + if torch.jit.is_scripting(): + return adjust_contrast_image(inpt, contrast_factor=contrast_factor) + + _log_api_usage_once(adjust_contrast) + + kernel = _get_kernel(adjust_contrast, type(inpt)) + return kernel(inpt, contrast_factor=contrast_factor) + + +@_register_kernel_internal(adjust_contrast, torch.Tensor) +@_register_kernel_internal(adjust_contrast, tv_tensors.Image) +def adjust_contrast_image(image: torch.Tensor, contrast_factor: float) -> torch.Tensor: + if contrast_factor < 0: + raise ValueError(f"contrast_factor ({contrast_factor}) is not non-negative.") + + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + fp = image.is_floating_point() + if c == 3: + grayscale_image = _rgb_to_grayscale_image(image, num_output_channels=1, preserve_dtype=False) + if not fp: + grayscale_image = grayscale_image.floor_() + else: + grayscale_image = image if fp else image.to(torch.float32) + mean = torch.mean(grayscale_image, dim=(-3, -2, -1), keepdim=True) + return _blend(image, mean, contrast_factor) + + +_adjust_contrast_image_pil = _register_kernel_internal(adjust_contrast, PIL.Image.Image)(_FP.adjust_contrast) + + +@_register_kernel_internal(adjust_contrast, tv_tensors.Video) +def adjust_contrast_video(video: torch.Tensor, contrast_factor: float) -> torch.Tensor: + return adjust_contrast_image(video, contrast_factor=contrast_factor) + + +def adjust_sharpness(inpt: torch.Tensor, sharpness_factor: float) -> torch.Tensor: + """See :class:`~torchvision.transforms.RandomAdjustSharpness`""" + if torch.jit.is_scripting(): + return adjust_sharpness_image(inpt, sharpness_factor=sharpness_factor) + + _log_api_usage_once(adjust_sharpness) + + kernel = _get_kernel(adjust_sharpness, type(inpt)) + return kernel(inpt, sharpness_factor=sharpness_factor) + + +@_register_kernel_internal(adjust_sharpness, torch.Tensor) +@_register_kernel_internal(adjust_sharpness, tv_tensors.Image) +def adjust_sharpness_image(image: torch.Tensor, sharpness_factor: float) -> torch.Tensor: + num_channels, height, width = image.shape[-3:] + if num_channels not in (1, 3): + raise TypeError(f"Input image tensor can have 1 or 3 channels, but found {num_channels}") + + if sharpness_factor < 0: + raise ValueError(f"sharpness_factor ({sharpness_factor}) is not non-negative.") + + if image.numel() == 0 or height <= 2 or width <= 2: + return image + + bound = _max_value(image.dtype) + fp = image.is_floating_point() + shape = image.shape + + if image.ndim > 4: + image = image.reshape(-1, num_channels, height, width) + needs_unsquash = True + else: + needs_unsquash = False + + # The following is a normalized 3x3 kernel with 1s in the edges and a 5 in the middle. + kernel_dtype = image.dtype if fp else torch.float32 + a, b = 1.0 / 13.0, 5.0 / 13.0 + kernel = torch.tensor([[a, a, a], [a, b, a], [a, a, a]], dtype=kernel_dtype, device=image.device) + kernel = kernel.expand(num_channels, 1, 3, 3) + + # We copy and cast at the same time to avoid modifications on the original data + output = image.to(dtype=kernel_dtype, copy=True) + blurred_degenerate = conv2d(output, kernel, groups=num_channels) + if not fp: + # it is better to round before cast + blurred_degenerate = blurred_degenerate.round_() + + # Create a view on the underlying output while pointing at the same data. We do this to avoid indexing twice. + view = output[..., 1:-1, 1:-1] + + # We speed up blending by minimizing flops and doing in-place. The 2 blend options are mathematically equivalent: + # x+(1-r)*(y-x) = x + (1-r)*y - (1-r)*x = x*r + y*(1-r) + view.add_(blurred_degenerate.sub_(view), alpha=(1.0 - sharpness_factor)) + + # The actual data of output have been modified by the above. We only need to clamp and cast now. + output = output.clamp_(0, bound) + if not fp: + output = output.to(image.dtype) + + if needs_unsquash: + output = output.reshape(shape) + + return output + + +_adjust_sharpness_image_pil = _register_kernel_internal(adjust_sharpness, PIL.Image.Image)(_FP.adjust_sharpness) + + +@_register_kernel_internal(adjust_sharpness, tv_tensors.Video) +def adjust_sharpness_video(video: torch.Tensor, sharpness_factor: float) -> torch.Tensor: + return adjust_sharpness_image(video, sharpness_factor=sharpness_factor) + + +def adjust_hue(inpt: torch.Tensor, hue_factor: float) -> torch.Tensor: + """Adjust hue""" + if torch.jit.is_scripting(): + return adjust_hue_image(inpt, hue_factor=hue_factor) + + _log_api_usage_once(adjust_hue) + + kernel = _get_kernel(adjust_hue, type(inpt)) + return kernel(inpt, hue_factor=hue_factor) + + +def _rgb_to_hsv(image: torch.Tensor) -> torch.Tensor: + r, g, _ = image.unbind(dim=-3) + + # Implementation is based on + # https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/src/libImaging/Convert.c#L330 + minc, maxc = torch.aminmax(image, dim=-3) + + # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN + # from happening in the results, because + # + S channel has division by `maxc`, which is zero only if `maxc = minc` + # + H channel has division by `(maxc - minc)`. + # + # Instead of overwriting NaN afterwards, we just prevent it from occurring so + # we don't need to deal with it in case we save the NaN in a buffer in + # backprop, if it is ever supported, but it doesn't hurt to do so. + eqc = maxc == minc + + channels_range = maxc - minc + # Since `eqc => channels_range = 0`, replacing denominator with 1 when `eqc` is fine. + ones = torch.ones_like(maxc) + s = channels_range / torch.where(eqc, ones, maxc) + # Note that `eqc => maxc = minc = r = g = b`. So the following calculation + # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it + # would not matter what values `rc`, `gc`, and `bc` have here, and thus + # replacing denominator with 1 when `eqc` is fine. + channels_range_divisor = torch.where(eqc, ones, channels_range).unsqueeze_(dim=-3) + rc, gc, bc = ((maxc.unsqueeze(dim=-3) - image) / channels_range_divisor).unbind(dim=-3) + + mask_maxc_neq_r = maxc != r + mask_maxc_eq_g = maxc == g + + hg = rc.add(2.0).sub_(bc).mul_(mask_maxc_eq_g & mask_maxc_neq_r) + hr = bc.sub_(gc).mul_(~mask_maxc_neq_r) + hb = gc.add_(4.0).sub_(rc).mul_(mask_maxc_neq_r.logical_and_(mask_maxc_eq_g.logical_not_())) + + h = hr.add_(hg).add_(hb) + h = h.mul_(1.0 / 6.0).add_(1.0).fmod_(1.0) + return torch.stack((h, s, maxc), dim=-3) + + +def _hsv_to_rgb(img: torch.Tensor) -> torch.Tensor: + h, s, v = img.unbind(dim=-3) + h6 = h.mul(6) + i = torch.floor(h6) + f = h6.sub_(i) + i = i.to(dtype=torch.int32) + + sxf = s * f + one_minus_s = 1.0 - s + q = (1.0 - sxf).mul_(v).clamp_(0.0, 1.0) + t = sxf.add_(one_minus_s).mul_(v).clamp_(0.0, 1.0) + p = one_minus_s.mul_(v).clamp_(0.0, 1.0) + i.remainder_(6) + + vpqt = torch.stack((v, p, q, t), dim=-3) + + # vpqt -> rgb mapping based on i + select = torch.tensor([[0, 2, 1, 1, 3, 0], [3, 0, 0, 2, 1, 1], [1, 1, 3, 0, 0, 2]], dtype=torch.long) + select = select.to(device=img.device, non_blocking=True) + + select = select[:, i] + if select.ndim > 3: + # if input.shape is (B, ..., C, H, W) then + # select.shape is (C, B, ..., H, W) + # thus we move C axis to get (B, ..., C, H, W) + select = select.moveaxis(0, -3) + + return vpqt.gather(-3, select) + + +@_register_kernel_internal(adjust_hue, torch.Tensor) +@_register_kernel_internal(adjust_hue, tv_tensors.Image) +def adjust_hue_image(image: torch.Tensor, hue_factor: float) -> torch.Tensor: + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].") + + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + + if c == 1: # Match PIL behaviour + return image + + if image.numel() == 0: + # exit earlier on empty images + return image + + orig_dtype = image.dtype + image = to_dtype_image(image, torch.float32, scale=True) + + image = _rgb_to_hsv(image) + h, s, v = image.unbind(dim=-3) + h.add_(hue_factor).remainder_(1.0) + image = torch.stack((h, s, v), dim=-3) + image_hue_adj = _hsv_to_rgb(image) + + return to_dtype_image(image_hue_adj, orig_dtype, scale=True) + + +_adjust_hue_image_pil = _register_kernel_internal(adjust_hue, PIL.Image.Image)(_FP.adjust_hue) + + +@_register_kernel_internal(adjust_hue, tv_tensors.Video) +def adjust_hue_video(video: torch.Tensor, hue_factor: float) -> torch.Tensor: + return adjust_hue_image(video, hue_factor=hue_factor) + + +def adjust_gamma(inpt: torch.Tensor, gamma: float, gain: float = 1) -> torch.Tensor: + """Adjust gamma.""" + if torch.jit.is_scripting(): + return adjust_gamma_image(inpt, gamma=gamma, gain=gain) + + _log_api_usage_once(adjust_gamma) + + kernel = _get_kernel(adjust_gamma, type(inpt)) + return kernel(inpt, gamma=gamma, gain=gain) + + +@_register_kernel_internal(adjust_gamma, torch.Tensor) +@_register_kernel_internal(adjust_gamma, tv_tensors.Image) +def adjust_gamma_image(image: torch.Tensor, gamma: float, gain: float = 1.0) -> torch.Tensor: + if gamma < 0: + raise ValueError("Gamma should be a non-negative real number") + + # The input image is either assumed to be at [0, 1] scale (if float) or is converted to that scale (if integer). + # Since the gamma is non-negative, the output remains at [0, 1] scale. + if not torch.is_floating_point(image): + output = to_dtype_image(image, torch.float32, scale=True).pow_(gamma) + else: + output = image.pow(gamma) + + if gain != 1.0: + # The clamp operation is needed only if multiplication is performed. It's only when gain != 1, that the scale + # of the output can go beyond [0, 1]. + output = output.mul_(gain).clamp_(0.0, 1.0) + + return to_dtype_image(output, image.dtype, scale=True) + + +_adjust_gamma_image_pil = _register_kernel_internal(adjust_gamma, PIL.Image.Image)(_FP.adjust_gamma) + + +@_register_kernel_internal(adjust_gamma, tv_tensors.Video) +def adjust_gamma_video(video: torch.Tensor, gamma: float, gain: float = 1) -> torch.Tensor: + return adjust_gamma_image(video, gamma=gamma, gain=gain) + + +def posterize(inpt: torch.Tensor, bits: int) -> torch.Tensor: + """See :class:`~torchvision.transforms.v2.RandomPosterize` for details.""" + if torch.jit.is_scripting(): + return posterize_image(inpt, bits=bits) + + _log_api_usage_once(posterize) + + kernel = _get_kernel(posterize, type(inpt)) + return kernel(inpt, bits=bits) + + +@_register_kernel_internal(posterize, torch.Tensor) +@_register_kernel_internal(posterize, tv_tensors.Image) +def posterize_image(image: torch.Tensor, bits: int) -> torch.Tensor: + if image.is_floating_point(): + levels = 1 << bits + return image.mul(levels).floor_().clamp_(0, levels - 1).mul_(1.0 / levels) + else: + num_value_bits = _num_value_bits(image.dtype) + if bits >= num_value_bits: + return image + + mask = ((1 << bits) - 1) << (num_value_bits - bits) + return image & mask + + +_posterize_image_pil = _register_kernel_internal(posterize, PIL.Image.Image)(_FP.posterize) + + +@_register_kernel_internal(posterize, tv_tensors.Video) +def posterize_video(video: torch.Tensor, bits: int) -> torch.Tensor: + return posterize_image(video, bits=bits) + + +def solarize(inpt: torch.Tensor, threshold: float) -> torch.Tensor: + """See :class:`~torchvision.transforms.v2.RandomSolarize` for details.""" + if torch.jit.is_scripting(): + return solarize_image(inpt, threshold=threshold) + + _log_api_usage_once(solarize) + + kernel = _get_kernel(solarize, type(inpt)) + return kernel(inpt, threshold=threshold) + + +@_register_kernel_internal(solarize, torch.Tensor) +@_register_kernel_internal(solarize, tv_tensors.Image) +def solarize_image(image: torch.Tensor, threshold: float) -> torch.Tensor: + if threshold > _max_value(image.dtype): + raise TypeError(f"Threshold should be less or equal the maximum value of the dtype, but got {threshold}") + + return torch.where(image >= threshold, invert_image(image), image) + + +_solarize_image_pil = _register_kernel_internal(solarize, PIL.Image.Image)(_FP.solarize) + + +@_register_kernel_internal(solarize, tv_tensors.Video) +def solarize_video(video: torch.Tensor, threshold: float) -> torch.Tensor: + return solarize_image(video, threshold=threshold) + + +def autocontrast(inpt: torch.Tensor) -> torch.Tensor: + """See :class:`~torchvision.transforms.v2.RandomAutocontrast` for details.""" + if torch.jit.is_scripting(): + return autocontrast_image(inpt) + + _log_api_usage_once(autocontrast) + + kernel = _get_kernel(autocontrast, type(inpt)) + return kernel(inpt) + + +@_register_kernel_internal(autocontrast, torch.Tensor) +@_register_kernel_internal(autocontrast, tv_tensors.Image) +def autocontrast_image(image: torch.Tensor) -> torch.Tensor: + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + + if image.numel() == 0: + # exit earlier on empty images + return image + + bound = _max_value(image.dtype) + fp = image.is_floating_point() + float_image = image if fp else image.to(torch.float32) + + minimum = float_image.amin(dim=(-2, -1), keepdim=True) + maximum = float_image.amax(dim=(-2, -1), keepdim=True) + + eq_idxs = maximum == minimum + inv_scale = maximum.sub_(minimum).mul_(1.0 / bound) + minimum[eq_idxs] = 0.0 + inv_scale[eq_idxs] = 1.0 + + if fp: + diff = float_image.sub(minimum) + else: + diff = float_image.sub_(minimum) + + return diff.div_(inv_scale).clamp_(0, bound).to(image.dtype) + + +_autocontrast_image_pil = _register_kernel_internal(autocontrast, PIL.Image.Image)(_FP.autocontrast) + + +@_register_kernel_internal(autocontrast, tv_tensors.Video) +def autocontrast_video(video: torch.Tensor) -> torch.Tensor: + return autocontrast_image(video) + + +def equalize(inpt: torch.Tensor) -> torch.Tensor: + """See :class:`~torchvision.transforms.v2.RandomEqualize` for details.""" + if torch.jit.is_scripting(): + return equalize_image(inpt) + + _log_api_usage_once(equalize) + + kernel = _get_kernel(equalize, type(inpt)) + return kernel(inpt) + + +@_register_kernel_internal(equalize, torch.Tensor) +@_register_kernel_internal(equalize, tv_tensors.Image) +def equalize_image(image: torch.Tensor) -> torch.Tensor: + if image.numel() == 0: + return image + + # 1. The algorithm below can easily be extended to support arbitrary integer dtypes. However, the histogram that + # would be needed to computed will have at least `torch.iinfo(dtype).max + 1` values. That is perfectly fine for + # `torch.int8`, `torch.uint8`, and `torch.int16`, at least questionable for `torch.int32` and completely + # unfeasible for `torch.int64`. + # 2. Floating point inputs need to be binned for this algorithm. Apart from converting them to an integer dtype, we + # could also use PyTorch's builtin histogram functionality. However, that has its own set of issues: in addition + # to being slow in general, PyTorch's implementation also doesn't support batches. In total, that makes it slower + # and more complicated to implement than a simple conversion and a fast histogram implementation for integers. + # Since we need to convert in most cases anyway and out of the acceptable dtypes mentioned in 1. `torch.uint8` is + # by far the most common, we choose it as base. + output_dtype = image.dtype + image = to_dtype_image(image, torch.uint8, scale=True) + + # The histogram is computed by using the flattened image as index. For example, a pixel value of 127 in the image + # corresponds to adding 1 to index 127 in the histogram. + batch_shape = image.shape[:-2] + flat_image = image.flatten(start_dim=-2).to(torch.long) + hist = flat_image.new_zeros(batch_shape + (256,), dtype=torch.int32) + hist.scatter_add_(dim=-1, index=flat_image, src=hist.new_ones(1).expand_as(flat_image)) + cum_hist = hist.cumsum(dim=-1) + + # The simplest form of lookup-table (LUT) that also achieves histogram equalization is + # `lut = cum_hist / flat_image.shape[-1] * 255` + # However, PIL uses a more elaborate scheme: + # https://github.com/python-pillow/Pillow/blob/eb59cb61d5239ee69cbbf12709a0c6fd7314e6d7/src/PIL/ImageOps.py#L368-L385 + # `lut = ((cum_hist + num_non_max_pixels // (2 * 255)) // num_non_max_pixels) * 255` + + # The last non-zero element in the histogram is the first element in the cumulative histogram with the maximum + # value. Thus, the "max" in `num_non_max_pixels` does not refer to 255 as the maximum value of uint8 images, but + # rather the maximum value in the image, which might be or not be 255. + index = cum_hist.argmax(dim=-1) + num_non_max_pixels = flat_image.shape[-1] - hist.gather(dim=-1, index=index.unsqueeze_(-1)) + + # This is performance optimization that saves us one multiplication later. With this, the LUT computation simplifies + # to `lut = (cum_hist + step // 2) // step` and thus saving the final multiplication by 255 while keeping the + # division count the same. PIL uses the variable name `step` for this, so we keep that for easier comparison. + step = num_non_max_pixels.div_(255, rounding_mode="floor") + + # Although it looks like we could return early if we find `step == 0` like PIL does, that is unfortunately not as + # easy due to our support for batched images. We can only return early if `(step == 0).all()` holds. If it doesn't, + # we have to go through the computation below anyway. Since `step == 0` is an edge case anyway, it makes no sense to + # pay the runtime cost for checking it every time. + valid_equalization = step.ne(0).unsqueeze_(-1) + + # `lut[k]` is computed with `cum_hist[k-1]` with `lut[0] == (step // 2) // step == 0`. Thus, we perform the + # computation only for `lut[1:]` with `cum_hist[:-1]` and add `lut[0] == 0` afterwards. + cum_hist = cum_hist[..., :-1] + ( + cum_hist.add_(step // 2) + # We need the `clamp_`(min=1) call here to avoid zero division since they fail for integer dtypes. This has no + # effect on the returned result of this kernel since images inside the batch with `step == 0` are returned as is + # instead of equalized version. + .div_(step.clamp_(min=1), rounding_mode="floor") + # We need the `clamp_` call here since PILs LUT computation scheme can produce values outside the valid value + # range of uint8 images + .clamp_(0, 255) + ) + lut = cum_hist.to(torch.uint8) + lut = torch.cat([lut.new_zeros(1).expand(batch_shape + (1,)), lut], dim=-1) + equalized_image = lut.gather(dim=-1, index=flat_image).view_as(image) + + output = torch.where(valid_equalization, equalized_image, image) + return to_dtype_image(output, output_dtype, scale=True) + + +_equalize_image_pil = _register_kernel_internal(equalize, PIL.Image.Image)(_FP.equalize) + + +@_register_kernel_internal(equalize, tv_tensors.Video) +def equalize_video(video: torch.Tensor) -> torch.Tensor: + return equalize_image(video) + + +def invert(inpt: torch.Tensor) -> torch.Tensor: + """See :func:`~torchvision.transforms.v2.RandomInvert`.""" + if torch.jit.is_scripting(): + return invert_image(inpt) + + _log_api_usage_once(invert) + + kernel = _get_kernel(invert, type(inpt)) + return kernel(inpt) + + +@_register_kernel_internal(invert, torch.Tensor) +@_register_kernel_internal(invert, tv_tensors.Image) +def invert_image(image: torch.Tensor) -> torch.Tensor: + if image.is_floating_point(): + return 1.0 - image + elif image.dtype == torch.uint8: + return image.bitwise_not() + else: # signed integer dtypes + # We can't use `Tensor.bitwise_not` here, since we want to retain the leading zero bit that encodes the sign + return image.bitwise_xor((1 << _num_value_bits(image.dtype)) - 1) + + +_invert_image_pil = _register_kernel_internal(invert, PIL.Image.Image)(_FP.invert) + + +@_register_kernel_internal(invert, tv_tensors.Video) +def invert_video(video: torch.Tensor) -> torch.Tensor: + return invert_image(video) + + +def permute_channels(inpt: torch.Tensor, permutation: List[int]) -> torch.Tensor: + """Permute the channels of the input according to the given permutation. + + This function supports plain :class:`~torch.Tensor`'s, :class:`PIL.Image.Image`'s, and + :class:`torchvision.tv_tensors.Image` and :class:`torchvision.tv_tensors.Video`. + + Example: + >>> rgb_image = torch.rand(3, 256, 256) + >>> bgr_image = F.permute_channels(rgb_image, permutation=[2, 1, 0]) + + Args: + permutation (List[int]): Valid permutation of the input channel indices. The index of the element determines the + channel index in the input and the value determines the channel index in the output. For example, + ``permutation=[2, 0 , 1]`` + + - takes ``ìnpt[..., 0, :, :]`` and puts it at ``output[..., 2, :, :]``, + - takes ``ìnpt[..., 1, :, :]`` and puts it at ``output[..., 0, :, :]``, and + - takes ``ìnpt[..., 2, :, :]`` and puts it at ``output[..., 1, :, :]``. + + Raises: + ValueError: If ``len(permutation)`` doesn't match the number of channels in the input. + """ + if torch.jit.is_scripting(): + return permute_channels_image(inpt, permutation=permutation) + + _log_api_usage_once(permute_channels) + + kernel = _get_kernel(permute_channels, type(inpt)) + return kernel(inpt, permutation=permutation) + + +@_register_kernel_internal(permute_channels, torch.Tensor) +@_register_kernel_internal(permute_channels, tv_tensors.Image) +def permute_channels_image(image: torch.Tensor, permutation: List[int]) -> torch.Tensor: + shape = image.shape + num_channels, height, width = shape[-3:] + + if len(permutation) != num_channels: + raise ValueError( + f"Length of permutation does not match number of channels: " f"{len(permutation)} != {num_channels}" + ) + + if image.numel() == 0: + return image + + image = image.reshape(-1, num_channels, height, width) + image = image[:, permutation, :, :] + return image.reshape(shape) + + +@_register_kernel_internal(permute_channels, PIL.Image.Image) +def _permute_channels_image_pil(image: PIL.Image.Image, permutation: List[int]) -> PIL.Image.Image: + return to_pil_image(permute_channels_image(pil_to_tensor(image), permutation=permutation)) + + +@_register_kernel_internal(permute_channels, tv_tensors.Video) +def permute_channels_video(video: torch.Tensor, permutation: List[int]) -> torch.Tensor: + return permute_channels_image(video, permutation=permutation) diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_deprecated.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..116ea31587a1d7f7172267898152e0167531f303 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_deprecated.py @@ -0,0 +1,24 @@ +import warnings +from typing import Any, List + +import torch + +from torchvision.transforms import functional as _F + + +@torch.jit.unused +def to_tensor(inpt: Any) -> torch.Tensor: + """[DEPREACTED] Use to_image() and to_dtype() instead.""" + warnings.warn( + "The function `to_tensor(...)` is deprecated and will be removed in a future release. " + "Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`." + ) + return _F.to_tensor(inpt) + + +def get_image_size(inpt: torch.Tensor) -> List[int]: + warnings.warn( + "The function `get_image_size(...)` is deprecated and will be removed in a future release. " + "Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`." + ) + return _F.get_image_size(inpt) diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_meta.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_meta.py new file mode 100644 index 0000000000000000000000000000000000000000..b90e5fb7b5be887d24bb42725d59bb056ac126c2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_meta.py @@ -0,0 +1,279 @@ +from typing import List, Optional, Tuple + +import PIL.Image +import torch +from torchvision import tv_tensors +from torchvision.transforms import _functional_pil as _FP +from torchvision.tv_tensors import BoundingBoxFormat + +from torchvision.utils import _log_api_usage_once + +from ._utils import _get_kernel, _register_kernel_internal, is_pure_tensor + + +def get_dimensions(inpt: torch.Tensor) -> List[int]: + if torch.jit.is_scripting(): + return get_dimensions_image(inpt) + + _log_api_usage_once(get_dimensions) + + kernel = _get_kernel(get_dimensions, type(inpt)) + return kernel(inpt) + + +@_register_kernel_internal(get_dimensions, torch.Tensor) +@_register_kernel_internal(get_dimensions, tv_tensors.Image, tv_tensor_wrapper=False) +def get_dimensions_image(image: torch.Tensor) -> List[int]: + chw = list(image.shape[-3:]) + ndims = len(chw) + if ndims == 3: + return chw + elif ndims == 2: + chw.insert(0, 1) + return chw + else: + raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}") + + +_get_dimensions_image_pil = _register_kernel_internal(get_dimensions, PIL.Image.Image)(_FP.get_dimensions) + + +@_register_kernel_internal(get_dimensions, tv_tensors.Video, tv_tensor_wrapper=False) +def get_dimensions_video(video: torch.Tensor) -> List[int]: + return get_dimensions_image(video) + + +def get_num_channels(inpt: torch.Tensor) -> int: + if torch.jit.is_scripting(): + return get_num_channels_image(inpt) + + _log_api_usage_once(get_num_channels) + + kernel = _get_kernel(get_num_channels, type(inpt)) + return kernel(inpt) + + +@_register_kernel_internal(get_num_channels, torch.Tensor) +@_register_kernel_internal(get_num_channels, tv_tensors.Image, tv_tensor_wrapper=False) +def get_num_channels_image(image: torch.Tensor) -> int: + chw = image.shape[-3:] + ndims = len(chw) + if ndims == 3: + return chw[0] + elif ndims == 2: + return 1 + else: + raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}") + + +_get_num_channels_image_pil = _register_kernel_internal(get_num_channels, PIL.Image.Image)(_FP.get_image_num_channels) + + +@_register_kernel_internal(get_num_channels, tv_tensors.Video, tv_tensor_wrapper=False) +def get_num_channels_video(video: torch.Tensor) -> int: + return get_num_channels_image(video) + + +# We changed the names to ensure it can be used not only for images but also videos. Thus, we just alias it without +# deprecating the old names. +get_image_num_channels = get_num_channels + + +def get_size(inpt: torch.Tensor) -> List[int]: + if torch.jit.is_scripting(): + return get_size_image(inpt) + + _log_api_usage_once(get_size) + + kernel = _get_kernel(get_size, type(inpt)) + return kernel(inpt) + + +@_register_kernel_internal(get_size, torch.Tensor) +@_register_kernel_internal(get_size, tv_tensors.Image, tv_tensor_wrapper=False) +def get_size_image(image: torch.Tensor) -> List[int]: + hw = list(image.shape[-2:]) + ndims = len(hw) + if ndims == 2: + return hw + else: + raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}") + + +@_register_kernel_internal(get_size, PIL.Image.Image) +def _get_size_image_pil(image: PIL.Image.Image) -> List[int]: + width, height = _FP.get_image_size(image) + return [height, width] + + +@_register_kernel_internal(get_size, tv_tensors.Video, tv_tensor_wrapper=False) +def get_size_video(video: torch.Tensor) -> List[int]: + return get_size_image(video) + + +@_register_kernel_internal(get_size, tv_tensors.Mask, tv_tensor_wrapper=False) +def get_size_mask(mask: torch.Tensor) -> List[int]: + return get_size_image(mask) + + +@_register_kernel_internal(get_size, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) +def get_size_bounding_boxes(bounding_box: tv_tensors.BoundingBoxes) -> List[int]: + return list(bounding_box.canvas_size) + + +def get_num_frames(inpt: torch.Tensor) -> int: + if torch.jit.is_scripting(): + return get_num_frames_video(inpt) + + _log_api_usage_once(get_num_frames) + + kernel = _get_kernel(get_num_frames, type(inpt)) + return kernel(inpt) + + +@_register_kernel_internal(get_num_frames, torch.Tensor) +@_register_kernel_internal(get_num_frames, tv_tensors.Video, tv_tensor_wrapper=False) +def get_num_frames_video(video: torch.Tensor) -> int: + return video.shape[-4] + + +def _xywh_to_xyxy(xywh: torch.Tensor, inplace: bool) -> torch.Tensor: + xyxy = xywh if inplace else xywh.clone() + xyxy[..., 2:] += xyxy[..., :2] + return xyxy + + +def _xyxy_to_xywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: + xywh = xyxy if inplace else xyxy.clone() + xywh[..., 2:] -= xywh[..., :2] + return xywh + + +def _cxcywh_to_xyxy(cxcywh: torch.Tensor, inplace: bool) -> torch.Tensor: + if not inplace: + cxcywh = cxcywh.clone() + + # Trick to do fast division by 2 and ceil, without casting. It produces the same result as + # `torchvision.ops._box_convert._box_cxcywh_to_xyxy`. + half_wh = cxcywh[..., 2:].div(-2, rounding_mode=None if cxcywh.is_floating_point() else "floor").abs_() + # (cx - width / 2) = x1, same for y1 + cxcywh[..., :2].sub_(half_wh) + # (x1 + width) = x2, same for y2 + cxcywh[..., 2:].add_(cxcywh[..., :2]) + + return cxcywh + + +def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: + if not inplace: + xyxy = xyxy.clone() + + # (x2 - x1) = width, same for height + xyxy[..., 2:].sub_(xyxy[..., :2]) + # (x1 * 2 + width) / 2 = x1 + width / 2 = x1 + (x2-x1)/2 = (x1 + x2)/2 = cx, same for cy + xyxy[..., :2].mul_(2).add_(xyxy[..., 2:]).div_(2, rounding_mode=None if xyxy.is_floating_point() else "floor") + + return xyxy + + +def _convert_bounding_box_format( + bounding_boxes: torch.Tensor, old_format: BoundingBoxFormat, new_format: BoundingBoxFormat, inplace: bool = False +) -> torch.Tensor: + + if new_format == old_format: + return bounding_boxes + + # TODO: Add _xywh_to_cxcywh and _cxcywh_to_xywh to improve performance + if old_format == BoundingBoxFormat.XYWH: + bounding_boxes = _xywh_to_xyxy(bounding_boxes, inplace) + elif old_format == BoundingBoxFormat.CXCYWH: + bounding_boxes = _cxcywh_to_xyxy(bounding_boxes, inplace) + + if new_format == BoundingBoxFormat.XYWH: + bounding_boxes = _xyxy_to_xywh(bounding_boxes, inplace) + elif new_format == BoundingBoxFormat.CXCYWH: + bounding_boxes = _xyxy_to_cxcywh(bounding_boxes, inplace) + + return bounding_boxes + + +def convert_bounding_box_format( + inpt: torch.Tensor, + old_format: Optional[BoundingBoxFormat] = None, + new_format: Optional[BoundingBoxFormat] = None, + inplace: bool = False, +) -> torch.Tensor: + """See :func:`~torchvision.transforms.v2.ConvertBoundingBoxFormat` for details.""" + # This being a kernel / functional hybrid, we need an option to pass `old_format` explicitly for pure tensor + # inputs as well as extract it from `tv_tensors.BoundingBoxes` inputs. However, putting a default value on + # `old_format` means we also need to put one on `new_format` to have syntactically correct Python. Here we mimic the + # default error that would be thrown if `new_format` had no default value. + if new_format is None: + raise TypeError("convert_bounding_box_format() missing 1 required argument: 'new_format'") + + if not torch.jit.is_scripting(): + _log_api_usage_once(convert_bounding_box_format) + + if isinstance(old_format, str): + old_format = BoundingBoxFormat[old_format.upper()] + if isinstance(new_format, str): + new_format = BoundingBoxFormat[new_format.upper()] + + if torch.jit.is_scripting() or is_pure_tensor(inpt): + if old_format is None: + raise ValueError("For pure tensor inputs, `old_format` has to be passed.") + return _convert_bounding_box_format(inpt, old_format=old_format, new_format=new_format, inplace=inplace) + elif isinstance(inpt, tv_tensors.BoundingBoxes): + if old_format is not None: + raise ValueError("For bounding box tv_tensor inputs, `old_format` must not be passed.") + output = _convert_bounding_box_format( + inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=new_format, inplace=inplace + ) + return tv_tensors.wrap(output, like=inpt, format=new_format) + else: + raise TypeError( + f"Input can either be a plain tensor or a bounding box tv_tensor, but got {type(inpt)} instead." + ) + + +def _clamp_bounding_boxes( + bounding_boxes: torch.Tensor, format: BoundingBoxFormat, canvas_size: Tuple[int, int] +) -> torch.Tensor: + # TODO: Investigate if it makes sense from a performance perspective to have an implementation for every + # BoundingBoxFormat instead of converting back and forth + in_dtype = bounding_boxes.dtype + bounding_boxes = bounding_boxes.clone() if bounding_boxes.is_floating_point() else bounding_boxes.float() + xyxy_boxes = convert_bounding_box_format( + bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYXY, inplace=True + ) + xyxy_boxes[..., 0::2].clamp_(min=0, max=canvas_size[1]) + xyxy_boxes[..., 1::2].clamp_(min=0, max=canvas_size[0]) + out_boxes = convert_bounding_box_format( + xyxy_boxes, old_format=BoundingBoxFormat.XYXY, new_format=format, inplace=True + ) + return out_boxes.to(in_dtype) + + +def clamp_bounding_boxes( + inpt: torch.Tensor, + format: Optional[BoundingBoxFormat] = None, + canvas_size: Optional[Tuple[int, int]] = None, +) -> torch.Tensor: + """See :func:`~torchvision.transforms.v2.ClampBoundingBoxes` for details.""" + if not torch.jit.is_scripting(): + _log_api_usage_once(clamp_bounding_boxes) + + if torch.jit.is_scripting() or is_pure_tensor(inpt): + + if format is None or canvas_size is None: + raise ValueError("For pure tensor inputs, `format` and `canvas_size` have to be passed.") + return _clamp_bounding_boxes(inpt, format=format, canvas_size=canvas_size) + elif isinstance(inpt, tv_tensors.BoundingBoxes): + if format is not None or canvas_size is not None: + raise ValueError("For bounding box tv_tensor inputs, `format` and `canvas_size` must not be passed.") + output = _clamp_bounding_boxes(inpt.as_subclass(torch.Tensor), format=inpt.format, canvas_size=inpt.canvas_size) + return tv_tensors.wrap(output, like=inpt) + else: + raise TypeError( + f"Input can either be a plain tensor or a bounding box tv_tensor, but got {type(inpt)} instead." + ) diff --git a/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_temporal.py b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_temporal.py new file mode 100644 index 0000000000000000000000000000000000000000..f932b06a295fd10316fba3e796ec4649053e92db --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_temporal.py @@ -0,0 +1,27 @@ +import torch + +from torchvision import tv_tensors + +from torchvision.utils import _log_api_usage_once + +from ._utils import _get_kernel, _register_kernel_internal + + +def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor: + """See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details.""" + if torch.jit.is_scripting(): + return uniform_temporal_subsample_video(inpt, num_samples=num_samples) + + _log_api_usage_once(uniform_temporal_subsample) + + kernel = _get_kernel(uniform_temporal_subsample, type(inpt)) + return kernel(inpt, num_samples=num_samples) + + +@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor) +@_register_kernel_internal(uniform_temporal_subsample, tv_tensors.Video) +def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor: + # Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19 + t_max = video.shape[-4] - 1 + indices = torch.linspace(0, t_max, num_samples, device=video.device).long() + return torch.index_select(video, -4, indices) diff --git a/parrot/lib/python3.10/site-packages/torchvision/utils.py b/parrot/lib/python3.10/site-packages/torchvision/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6b2d19ec3ddb49a71f7773d542324a16dbc5f8c3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/utils.py @@ -0,0 +1,658 @@ +import collections +import math +import pathlib +import warnings +from itertools import repeat +from types import FunctionType +from typing import Any, BinaryIO, List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL import Image, ImageColor, ImageDraw, ImageFont + + +__all__ = [ + "make_grid", + "save_image", + "draw_bounding_boxes", + "draw_segmentation_masks", + "draw_keypoints", + "flow_to_image", +] + + +@torch.no_grad() +def make_grid( + tensor: Union[torch.Tensor, List[torch.Tensor]], + nrow: int = 8, + padding: int = 2, + normalize: bool = False, + value_range: Optional[Tuple[int, int]] = None, + scale_each: bool = False, + pad_value: float = 0.0, +) -> torch.Tensor: + """ + Make a grid of images. + + Args: + tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) + or a list of images all of the same size. + nrow (int, optional): Number of images displayed in each row of the grid. + The final grid size is ``(B / nrow, nrow)``. Default: ``8``. + padding (int, optional): amount of padding. Default: ``2``. + normalize (bool, optional): If True, shift the image to the range (0, 1), + by the min and max values specified by ``value_range``. Default: ``False``. + value_range (tuple, optional): tuple (min, max) where min and max are numbers, + then these numbers are used to normalize the image. By default, min and max + are computed from the tensor. + scale_each (bool, optional): If ``True``, scale each image in the batch of + images separately rather than the (min, max) over all images. Default: ``False``. + pad_value (float, optional): Value for the padded pixels. Default: ``0``. + + Returns: + grid (Tensor): the tensor containing grid of images. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(make_grid) + if not torch.is_tensor(tensor): + if isinstance(tensor, list): + for t in tensor: + if not torch.is_tensor(t): + raise TypeError(f"tensor or list of tensors expected, got a list containing {type(t)}") + else: + raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}") + + # if list of tensors, convert to a 4D mini-batch Tensor + if isinstance(tensor, list): + tensor = torch.stack(tensor, dim=0) + + if tensor.dim() == 2: # single image H x W + tensor = tensor.unsqueeze(0) + if tensor.dim() == 3: # single image + if tensor.size(0) == 1: # if single-channel, convert to 3-channel + tensor = torch.cat((tensor, tensor, tensor), 0) + tensor = tensor.unsqueeze(0) + + if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images + tensor = torch.cat((tensor, tensor, tensor), 1) + + if normalize is True: + tensor = tensor.clone() # avoid modifying tensor in-place + if value_range is not None and not isinstance(value_range, tuple): + raise TypeError("value_range has to be a tuple (min, max) if specified. min and max are numbers") + + def norm_ip(img, low, high): + img.clamp_(min=low, max=high) + img.sub_(low).div_(max(high - low, 1e-5)) + + def norm_range(t, value_range): + if value_range is not None: + norm_ip(t, value_range[0], value_range[1]) + else: + norm_ip(t, float(t.min()), float(t.max())) + + if scale_each is True: + for t in tensor: # loop over mini-batch dimension + norm_range(t, value_range) + else: + norm_range(tensor, value_range) + + if not isinstance(tensor, torch.Tensor): + raise TypeError("tensor should be of type torch.Tensor") + if tensor.size(0) == 1: + return tensor.squeeze(0) + + # make the mini-batch of images into a grid + nmaps = tensor.size(0) + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) + num_channels = tensor.size(1) + grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value) + k = 0 + for y in range(ymaps): + for x in range(xmaps): + if k >= nmaps: + break + # Tensor.copy_() is a valid method but seems to be missing from the stubs + # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_ + grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined] + 2, x * width + padding, width - padding + ).copy_(tensor[k]) + k = k + 1 + return grid + + +@torch.no_grad() +def save_image( + tensor: Union[torch.Tensor, List[torch.Tensor]], + fp: Union[str, pathlib.Path, BinaryIO], + format: Optional[str] = None, + **kwargs, +) -> None: + """ + Save a given Tensor into an image file. + + Args: + tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, + saves the tensor as a grid of images by calling ``make_grid``. + fp (string or file object): A filename or a file object + format(Optional): If omitted, the format to use is determined from the filename extension. + If a file object was used instead of a filename, this parameter should always be used. + **kwargs: Other arguments are documented in ``make_grid``. + """ + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(save_image) + grid = make_grid(tensor, **kwargs) + # Add 0.5 after unnormalizing to [0, 255] to round to the nearest integer + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + im = Image.fromarray(ndarr) + im.save(fp, format=format) + + +@torch.no_grad() +def draw_bounding_boxes( + image: torch.Tensor, + boxes: torch.Tensor, + labels: Optional[List[str]] = None, + colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None, + fill: Optional[bool] = False, + width: int = 1, + font: Optional[str] = None, + font_size: Optional[int] = None, +) -> torch.Tensor: + + """ + Draws bounding boxes on given RGB image. + The image values should be uint8 in [0, 255] or float in [0, 1]. + If fill is True, Resulting Tensor should be saved as PNG image. + + Args: + image (Tensor): Tensor of shape (C, H, W) and dtype uint8 or float. + boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that + the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and + `0 <= ymin < ymax < H`. + labels (List[str]): List containing the labels of bounding boxes. + colors (color or list of colors, optional): List containing the colors + of the boxes or single color for all boxes. The color can be represented as + PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. + By default, random colors are generated for boxes. + fill (bool): If `True` fills the bounding box with specified color. + width (int): Width of bounding box. + font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may + also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`, + `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS. + font_size (int): The requested font size in points. + + Returns: + img (Tensor[C, H, W]): Image Tensor of dtype uint8 with bounding boxes plotted. + """ + import torchvision.transforms.v2.functional as F # noqa + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(draw_bounding_boxes) + if not isinstance(image, torch.Tensor): + raise TypeError(f"Tensor expected, got {type(image)}") + elif not (image.dtype == torch.uint8 or image.is_floating_point()): + raise ValueError(f"The image dtype must be uint8 or float, got {image.dtype}") + elif image.dim() != 3: + raise ValueError("Pass individual images, not batches") + elif image.size(0) not in {1, 3}: + raise ValueError("Only grayscale and RGB images are supported") + elif (boxes[:, 0] > boxes[:, 2]).any() or (boxes[:, 1] > boxes[:, 3]).any(): + raise ValueError( + "Boxes need to be in (xmin, ymin, xmax, ymax) format. Use torchvision.ops.box_convert to convert them" + ) + + num_boxes = boxes.shape[0] + + if num_boxes == 0: + warnings.warn("boxes doesn't contain any box. No box was drawn") + return image + + if labels is None: + labels: Union[List[str], List[None]] = [None] * num_boxes # type: ignore[no-redef] + elif len(labels) != num_boxes: + raise ValueError( + f"Number of boxes ({num_boxes}) and labels ({len(labels)}) mismatch. Please specify labels for each box." + ) + + colors = _parse_colors(colors, num_objects=num_boxes) + + if font is None: + if font_size is not None: + warnings.warn("Argument 'font_size' will be ignored since 'font' is not set.") + txt_font = ImageFont.load_default() + else: + txt_font = ImageFont.truetype(font=font, size=font_size or 10) + + # Handle Grayscale images + if image.size(0) == 1: + image = torch.tile(image, (3, 1, 1)) + + original_dtype = image.dtype + if original_dtype.is_floating_point: + image = F.to_dtype(image, dtype=torch.uint8, scale=True) + + img_to_draw = F.to_pil_image(image) + img_boxes = boxes.to(torch.int64).tolist() + + if fill: + draw = ImageDraw.Draw(img_to_draw, "RGBA") + else: + draw = ImageDraw.Draw(img_to_draw) + + for bbox, color, label in zip(img_boxes, colors, labels): # type: ignore[arg-type] + if fill: + fill_color = color + (100,) + draw.rectangle(bbox, width=width, outline=color, fill=fill_color) + else: + draw.rectangle(bbox, width=width, outline=color) + + if label is not None: + margin = width + 1 + draw.text((bbox[0] + margin, bbox[1] + margin), label, fill=color, font=txt_font) + + out = F.pil_to_tensor(img_to_draw) + if original_dtype.is_floating_point: + out = F.to_dtype(out, dtype=original_dtype, scale=True) + return out + + +@torch.no_grad() +def draw_segmentation_masks( + image: torch.Tensor, + masks: torch.Tensor, + alpha: float = 0.8, + colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None, +) -> torch.Tensor: + + """ + Draws segmentation masks on given RGB image. + The image values should be uint8 in [0, 255] or float in [0, 1]. + + Args: + image (Tensor): Tensor of shape (3, H, W) and dtype uint8 or float. + masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool. + alpha (float): Float number between 0 and 1 denoting the transparency of the masks. + 0 means full transparency, 1 means no transparency. + colors (color or list of colors, optional): List containing the colors + of the masks or single color for all masks. The color can be represented as + PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. + By default, random colors are generated for each mask. + + Returns: + img (Tensor[C, H, W]): Image Tensor, with segmentation masks drawn on top. + """ + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(draw_segmentation_masks) + if not isinstance(image, torch.Tensor): + raise TypeError(f"The image must be a tensor, got {type(image)}") + elif not (image.dtype == torch.uint8 or image.is_floating_point()): + raise ValueError(f"The image dtype must be uint8 or float, got {image.dtype}") + elif image.dim() != 3: + raise ValueError("Pass individual images, not batches") + elif image.size()[0] != 3: + raise ValueError("Pass an RGB image. Other Image formats are not supported") + if masks.ndim == 2: + masks = masks[None, :, :] + if masks.ndim != 3: + raise ValueError("masks must be of shape (H, W) or (batch_size, H, W)") + if masks.dtype != torch.bool: + raise ValueError(f"The masks must be of dtype bool. Got {masks.dtype}") + if masks.shape[-2:] != image.shape[-2:]: + raise ValueError("The image and the masks must have the same height and width") + + num_masks = masks.size()[0] + overlapping_masks = masks.sum(dim=0) > 1 + + if num_masks == 0: + warnings.warn("masks doesn't contain any mask. No mask was drawn") + return image + + original_dtype = image.dtype + colors = [ + torch.tensor(color, dtype=original_dtype, device=image.device) + for color in _parse_colors(colors, num_objects=num_masks, dtype=original_dtype) + ] + + img_to_draw = image.detach().clone() + # TODO: There might be a way to vectorize this + for mask, color in zip(masks, colors): + img_to_draw[:, mask] = color[:, None] + + img_to_draw[:, overlapping_masks] = 0 + + out = image * (1 - alpha) + img_to_draw * alpha + # Note: at this point, out is a float tensor in [0, 1] or [0, 255] depending on original_dtype + return out.to(original_dtype) + + +@torch.no_grad() +def draw_keypoints( + image: torch.Tensor, + keypoints: torch.Tensor, + connectivity: Optional[List[Tuple[int, int]]] = None, + colors: Optional[Union[str, Tuple[int, int, int]]] = None, + radius: int = 2, + width: int = 3, + visibility: Optional[torch.Tensor] = None, +) -> torch.Tensor: + + """ + Draws Keypoints on given RGB image. + The image values should be uint8 in [0, 255] or float in [0, 1]. + Keypoints can be drawn for multiple instances at a time. + + This method allows that keypoints and their connectivity are drawn based on the visibility of this keypoint. + + Args: + image (Tensor): Tensor of shape (3, H, W) and dtype uint8 or float. + keypoints (Tensor): Tensor of shape (num_instances, K, 2) the K keypoint locations for each of the N instances, + in the format [x, y]. + connectivity (List[Tuple[int, int]]]): A List of tuple where each tuple contains a pair of keypoints + to be connected. + If at least one of the two connected keypoints has a ``visibility`` of False, + this specific connection is not drawn. + Exclusions due to invisibility are computed per-instance. + colors (str, Tuple): The color can be represented as + PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. + radius (int): Integer denoting radius of keypoint. + width (int): Integer denoting width of line connecting keypoints. + visibility (Tensor): Tensor of shape (num_instances, K) specifying the visibility of the K + keypoints for each of the N instances. + True means that the respective keypoint is visible and should be drawn. + False means invisible, so neither the point nor possible connections containing it are drawn. + The input tensor will be cast to bool. + Default ``None`` means that all the keypoints are visible. + For more details, see :ref:`draw_keypoints_with_visibility`. + + Returns: + img (Tensor[C, H, W]): Image Tensor with keypoints drawn. + """ + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(draw_keypoints) + # validate image + if not isinstance(image, torch.Tensor): + raise TypeError(f"The image must be a tensor, got {type(image)}") + elif not (image.dtype == torch.uint8 or image.is_floating_point()): + raise ValueError(f"The image dtype must be uint8 or float, got {image.dtype}") + elif image.dim() != 3: + raise ValueError("Pass individual images, not batches") + elif image.size()[0] != 3: + raise ValueError("Pass an RGB image. Other Image formats are not supported") + + # validate keypoints + if keypoints.ndim != 3: + raise ValueError("keypoints must be of shape (num_instances, K, 2)") + + # validate visibility + if visibility is None: # set default + visibility = torch.ones(keypoints.shape[:-1], dtype=torch.bool) + if visibility.ndim == 3: + # If visibility was passed as pred.split([2, 1], dim=-1), it will be of shape (num_instances, K, 1). + # We make sure it is of shape (num_instances, K). This isn't documented, we're just being nice. + visibility = visibility.squeeze(-1) + if visibility.ndim != 2: + raise ValueError(f"visibility must be of shape (num_instances, K). Got ndim={visibility.ndim}") + if visibility.shape != keypoints.shape[:-1]: + raise ValueError( + "keypoints and visibility must have the same dimensionality for num_instances and K. " + f"Got {visibility.shape = } and {keypoints.shape = }" + ) + + original_dtype = image.dtype + if original_dtype.is_floating_point: + from torchvision.transforms.v2.functional import to_dtype # noqa + + image = to_dtype(image, dtype=torch.uint8, scale=True) + + ndarr = image.permute(1, 2, 0).cpu().numpy() + img_to_draw = Image.fromarray(ndarr) + draw = ImageDraw.Draw(img_to_draw) + img_kpts = keypoints.to(torch.int64).tolist() + img_vis = visibility.cpu().bool().tolist() + + for kpt_inst, vis_inst in zip(img_kpts, img_vis): + for kpt_coord, kp_vis in zip(kpt_inst, vis_inst): + if not kp_vis: + continue + x1 = kpt_coord[0] - radius + x2 = kpt_coord[0] + radius + y1 = kpt_coord[1] - radius + y2 = kpt_coord[1] + radius + draw.ellipse([x1, y1, x2, y2], fill=colors, outline=None, width=0) + + if connectivity: + for connection in connectivity: + if (not vis_inst[connection[0]]) or (not vis_inst[connection[1]]): + continue + start_pt_x = kpt_inst[connection[0]][0] + start_pt_y = kpt_inst[connection[0]][1] + + end_pt_x = kpt_inst[connection[1]][0] + end_pt_y = kpt_inst[connection[1]][1] + + draw.line( + ((start_pt_x, start_pt_y), (end_pt_x, end_pt_y)), + width=width, + ) + + out = torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1) + if original_dtype.is_floating_point: + out = to_dtype(out, dtype=original_dtype, scale=True) + return out + + +# Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization +@torch.no_grad() +def flow_to_image(flow: torch.Tensor) -> torch.Tensor: + + """ + Converts a flow to an RGB image. + + Args: + flow (Tensor): Flow of shape (N, 2, H, W) or (2, H, W) and dtype torch.float. + + Returns: + img (Tensor): Image Tensor of dtype uint8 where each color corresponds + to a given flow direction. Shape is (N, 3, H, W) or (3, H, W) depending on the input. + """ + + if flow.dtype != torch.float: + raise ValueError(f"Flow should be of dtype torch.float, got {flow.dtype}.") + + orig_shape = flow.shape + if flow.ndim == 3: + flow = flow[None] # Add batch dim + + if flow.ndim != 4 or flow.shape[1] != 2: + raise ValueError(f"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.") + + max_norm = torch.sum(flow**2, dim=1).sqrt().max() + epsilon = torch.finfo((flow).dtype).eps + normalized_flow = flow / (max_norm + epsilon) + img = _normalized_flow_to_image(normalized_flow) + + if len(orig_shape) == 3: + img = img[0] # Remove batch dim + return img + + +@torch.no_grad() +def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor: + + """ + Converts a batch of normalized flow to an RGB image. + + Args: + normalized_flow (torch.Tensor): Normalized flow tensor of shape (N, 2, H, W) + Returns: + img (Tensor(N, 3, H, W)): Flow visualization image of dtype uint8. + """ + + N, _, H, W = normalized_flow.shape + device = normalized_flow.device + flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device) + colorwheel = _make_colorwheel().to(device) # shape [55x3] + num_cols = colorwheel.shape[0] + norm = torch.sum(normalized_flow**2, dim=1).sqrt() + a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi + fk = (a + 1) / 2 * (num_cols - 1) + k0 = torch.floor(fk).to(torch.long) + k1 = k0 + 1 + k1[k1 == num_cols] = 0 + f = fk - k0 + + for c in range(colorwheel.shape[1]): + tmp = colorwheel[:, c] + col0 = tmp[k0] / 255.0 + col1 = tmp[k1] / 255.0 + col = (1 - f) * col0 + f * col1 + col = 1 - norm * (1 - col) + flow_image[:, c, :, :] = torch.floor(255 * col) + return flow_image + + +def _make_colorwheel() -> torch.Tensor: + """ + Generates a color wheel for optical flow visualization as presented in: + Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) + URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf. + + Returns: + colorwheel (Tensor[55, 3]): Colorwheel Tensor. + """ + + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + colorwheel = torch.zeros((ncols, 3)) + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = torch.floor(255 * torch.arange(0, RY) / RY) + col = col + RY + # YG + colorwheel[col : col + YG, 0] = 255 - torch.floor(255 * torch.arange(0, YG) / YG) + colorwheel[col : col + YG, 1] = 255 + col = col + YG + # GC + colorwheel[col : col + GC, 1] = 255 + colorwheel[col : col + GC, 2] = torch.floor(255 * torch.arange(0, GC) / GC) + col = col + GC + # CB + colorwheel[col : col + CB, 1] = 255 - torch.floor(255 * torch.arange(CB) / CB) + colorwheel[col : col + CB, 2] = 255 + col = col + CB + # BM + colorwheel[col : col + BM, 2] = 255 + colorwheel[col : col + BM, 0] = torch.floor(255 * torch.arange(0, BM) / BM) + col = col + BM + # MR + colorwheel[col : col + MR, 2] = 255 - torch.floor(255 * torch.arange(MR) / MR) + colorwheel[col : col + MR, 0] = 255 + return colorwheel + + +def _generate_color_palette(num_objects: int): + palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1]) + return [tuple((i * palette) % 255) for i in range(num_objects)] + + +def _parse_colors( + colors: Union[None, str, Tuple[int, int, int], List[Union[str, Tuple[int, int, int]]]], + *, + num_objects: int, + dtype: torch.dtype = torch.uint8, +) -> List[Tuple[int, int, int]]: + """ + Parses a specification of colors for a set of objects. + + Args: + colors: A specification of colors for the objects. This can be one of the following: + - None: to generate a color palette automatically. + - A list of colors: where each color is either a string (specifying a named color) or an RGB tuple. + - A string or an RGB tuple: to use the same color for all objects. + + If `colors` is a tuple, it should be a 3-tuple specifying the RGB values of the color. + If `colors` is a list, it should have at least as many elements as the number of objects to color. + + num_objects (int): The number of objects to color. + + Returns: + A list of 3-tuples, specifying the RGB values of the colors. + + Raises: + ValueError: If the number of colors in the list is less than the number of objects to color. + If `colors` is not a list, tuple, string or None. + """ + if colors is None: + colors = _generate_color_palette(num_objects) + elif isinstance(colors, list): + if len(colors) < num_objects: + raise ValueError( + f"Number of colors must be equal or larger than the number of objects, but got {len(colors)} < {num_objects}." + ) + elif not isinstance(colors, (tuple, str)): + raise ValueError("`colors` must be a tuple or a string, or a list thereof, but got {colors}.") + elif isinstance(colors, tuple) and len(colors) != 3: + raise ValueError("If passed as tuple, colors should be an RGB triplet, but got {colors}.") + else: # colors specifies a single color for all objects + colors = [colors] * num_objects + + colors = [ImageColor.getrgb(color) if isinstance(color, str) else color for color in colors] + if dtype.is_floating_point: # [0, 255] -> [0, 1] + colors = [tuple(v / 255 for v in color) for color in colors] + return colors + + +def _log_api_usage_once(obj: Any) -> None: + + """ + Logs API usage(module and name) within an organization. + In a large ecosystem, it's often useful to track the PyTorch and + TorchVision APIs usage. This API provides the similar functionality to the + logging module in the Python stdlib. It can be used for debugging purpose + to log which methods are used and by default it is inactive, unless the user + manually subscribes a logger via the `SetAPIUsageLogger method `_. + Please note it is triggered only once for the same API call within a process. + It does not collect any data from open-source users since it is no-op by default. + For more information, please refer to + * PyTorch note: https://pytorch.org/docs/stable/notes/large_scale_deployments.html#api-usage-logging; + * Logging policy: https://github.com/pytorch/vision/issues/5052; + + Args: + obj (class instance or method): an object to extract info from. + """ + module = obj.__module__ + if not module.startswith("torchvision"): + module = f"torchvision.internal.{module}" + name = obj.__class__.__name__ + if isinstance(obj, FunctionType): + name = obj.__name__ + torch._C._log_api_usage_once(f"{module}.{name}") + + +def _make_ntuple(x: Any, n: int) -> Tuple[Any, ...]: + """ + Make n-tuple from input x. If x is an iterable, then we just convert it to tuple. + Otherwise, we will make a tuple of length n, all with value of x. + reference: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/utils.py#L8 + + Args: + x (Any): input value + n (int): length of the resulting tuple + """ + if isinstance(x, collections.abc.Iterable): + return tuple(x) + return tuple(repeat(x, n)) diff --git a/parrot/lib/python3.10/site-packages/torchvision/version.py b/parrot/lib/python3.10/site-packages/torchvision/version.py new file mode 100644 index 0000000000000000000000000000000000000000..c18b747eb9bf409e408441bb50428d08e627709c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torchvision/version.py @@ -0,0 +1,5 @@ +__version__ = '0.19.1+cu121' +git_version = '61943691d3390bd3148a7003b4a501f0e2b7ac6e' +from torchvision.extension import _check_cuda_version +if _check_cuda_version() > 0: + cuda = _check_cuda_version()