diff --git a/.gitattributes b/.gitattributes index 7d257b39e456c1f669448ae33bb3f3a8d2d83ae5..f87c837a024797ccd188c275187b4e73fefc1e5e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -183,3 +183,4 @@ parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text parrot/lib/libsqlite3.so filter=lfs diff=lfs merge=lfs -text parrot/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text +wemm/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/wemm/lib/python3.10/site-packages/Crypto/IO/PKCS8.pyi b/wemm/lib/python3.10/site-packages/Crypto/IO/PKCS8.pyi new file mode 100644 index 0000000000000000000000000000000000000000..cf6aa8ae3730fe0d2ab936110482411c99059675 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/Crypto/IO/PKCS8.pyi @@ -0,0 +1,17 @@ +from typing import Tuple, Optional, Union, Callable +from typing_extensions import NotRequired + +from Crypto.Util.asn1 import DerObject +from Crypto.IO._PBES import ProtParams + + +def wrap(private_key: bytes, + key_oid: str, + passphrase: Union[bytes, str] = ..., + protection: str = ..., + prot_params: Optional[ProtParams] = ..., + key_params: Optional[DerObject] = ..., + randfunc: Optional[Callable[[int], str]] = ...) -> bytes: ... + + +def unwrap(p8_private_key: bytes, passphrase: Optional[Union[bytes, str]] = ...) -> Tuple[str, bytes, Optional[bytes]]: ... diff --git a/wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/PKCS8.cpython-310.pyc b/wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/PKCS8.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c7d48a750edbaa1679d187224326366e941e42e Binary files /dev/null and b/wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/PKCS8.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fbb32845b5e1f7f09787c185e792409e5c61fb3 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/Crypto/IO/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.py b/wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.py new file mode 100644 index 0000000000000000000000000000000000000000..46118ae052e4fddc1760c54d0c69e1e58edb0e9a --- /dev/null +++ b/wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.py @@ -0,0 +1,799 @@ +# =================================================================== +# +# Copyright (c) 2014, Legrandin +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# =================================================================== + +import sys +import struct + +from Crypto.Util.py3compat import is_native_int + +from Crypto.Util._raw_api import (backend, load_lib, + c_ulong, c_size_t, c_uint8_ptr) + +from ._IntegerBase import IntegerBase + +gmp_defs = """typedef unsigned long UNIX_ULONG; + typedef struct { int a; int b; void *c; } MPZ; + typedef MPZ mpz_t[1]; + typedef UNIX_ULONG mp_bitcnt_t; + + void __gmpz_init (mpz_t x); + void __gmpz_init_set (mpz_t rop, const mpz_t op); + void __gmpz_init_set_ui (mpz_t rop, UNIX_ULONG op); + + UNIX_ULONG __gmpz_get_ui (const mpz_t op); + void __gmpz_set (mpz_t rop, const mpz_t op); + void __gmpz_set_ui (mpz_t rop, UNIX_ULONG op); + void __gmpz_add (mpz_t rop, const mpz_t op1, const mpz_t op2); + void __gmpz_add_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2); + void __gmpz_sub_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2); + void __gmpz_addmul (mpz_t rop, const mpz_t op1, const mpz_t op2); + void __gmpz_addmul_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2); + void __gmpz_submul_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2); + void __gmpz_import (mpz_t rop, size_t count, int order, size_t size, + int endian, size_t nails, const void *op); + void * __gmpz_export (void *rop, size_t *countp, int order, + size_t size, + int endian, size_t nails, const mpz_t op); + size_t __gmpz_sizeinbase (const mpz_t op, int base); + void __gmpz_sub (mpz_t rop, const mpz_t op1, const mpz_t op2); + void __gmpz_mul (mpz_t rop, const mpz_t op1, const mpz_t op2); + void __gmpz_mul_ui (mpz_t rop, const mpz_t op1, UNIX_ULONG op2); + int __gmpz_cmp (const mpz_t op1, const mpz_t op2); + void __gmpz_powm (mpz_t rop, const mpz_t base, const mpz_t exp, const + mpz_t mod); + void __gmpz_powm_ui (mpz_t rop, const mpz_t base, UNIX_ULONG exp, + const mpz_t mod); + void __gmpz_pow_ui (mpz_t rop, const mpz_t base, UNIX_ULONG exp); + void __gmpz_sqrt(mpz_t rop, const mpz_t op); + void __gmpz_mod (mpz_t r, const mpz_t n, const mpz_t d); + void __gmpz_neg (mpz_t rop, const mpz_t op); + void __gmpz_abs (mpz_t rop, const mpz_t op); + void __gmpz_and (mpz_t rop, const mpz_t op1, const mpz_t op2); + void __gmpz_ior (mpz_t rop, const mpz_t op1, const mpz_t op2); + void __gmpz_clear (mpz_t x); + void __gmpz_tdiv_q_2exp (mpz_t q, const mpz_t n, mp_bitcnt_t b); + void __gmpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d); + void __gmpz_mul_2exp (mpz_t rop, const mpz_t op1, mp_bitcnt_t op2); + int __gmpz_tstbit (const mpz_t op, mp_bitcnt_t bit_index); + int __gmpz_perfect_square_p (const mpz_t op); + int __gmpz_jacobi (const mpz_t a, const mpz_t b); + void __gmpz_gcd (mpz_t rop, const mpz_t op1, const mpz_t op2); + UNIX_ULONG __gmpz_gcd_ui (mpz_t rop, const mpz_t op1, + UNIX_ULONG op2); + void __gmpz_lcm (mpz_t rop, const mpz_t op1, const mpz_t op2); + int __gmpz_invert (mpz_t rop, const mpz_t op1, const mpz_t op2); + int __gmpz_divisible_p (const mpz_t n, const mpz_t d); + int __gmpz_divisible_ui_p (const mpz_t n, UNIX_ULONG d); + + size_t __gmpz_size (const mpz_t op); + UNIX_ULONG __gmpz_getlimbn (const mpz_t op, size_t n); + """ + +if sys.platform == "win32": + raise ImportError("Not using GMP on Windows") + +lib = load_lib("gmp", gmp_defs) +implementation = {"library": "gmp", "api": backend} + +if hasattr(lib, "__mpir_version"): + raise ImportError("MPIR library detected") + + +# Lazy creation of GMP methods +class _GMP(object): + + def __getattr__(self, name): + if name.startswith("mpz_"): + func_name = "__gmpz_" + name[4:] + elif name.startswith("gmp_"): + func_name = "__gmp_" + name[4:] + else: + raise AttributeError("Attribute %s is invalid" % name) + func = getattr(lib, func_name) + setattr(self, name, func) + return func + + +_gmp = _GMP() + + +# In order to create a function that returns a pointer to +# a new MPZ structure, we need to break the abstraction +# and know exactly what ffi backend we have +if implementation["api"] == "ctypes": + from ctypes import Structure, c_int, c_void_p, byref + + class _MPZ(Structure): + _fields_ = [('_mp_alloc', c_int), + ('_mp_size', c_int), + ('_mp_d', c_void_p)] + + def new_mpz(): + return byref(_MPZ()) + + _gmp.mpz_getlimbn.restype = c_ulong + +else: + # We are using CFFI + from Crypto.Util._raw_api import ffi + + def new_mpz(): + return ffi.new("MPZ*") + + +# Size of a native word +_sys_bits = 8 * struct.calcsize("P") + + +class IntegerGMP(IntegerBase): + """A fast, arbitrary precision integer""" + + _zero_mpz_p = new_mpz() + _gmp.mpz_init_set_ui(_zero_mpz_p, c_ulong(0)) + + def __init__(self, value): + """Initialize the integer to the given value.""" + + self._mpz_p = new_mpz() + self._initialized = False + + if isinstance(value, float): + raise ValueError("A floating point type is not a natural number") + + if is_native_int(value): + _gmp.mpz_init(self._mpz_p) + self._initialized = True + if value == 0: + return + + tmp = new_mpz() + _gmp.mpz_init(tmp) + + try: + positive = value >= 0 + reduce = abs(value) + slots = (reduce.bit_length() - 1) // 32 + 1 + + while slots > 0: + slots = slots - 1 + _gmp.mpz_set_ui(tmp, + c_ulong(0xFFFFFFFF & (reduce >> (slots * 32)))) + _gmp.mpz_mul_2exp(tmp, tmp, c_ulong(slots * 32)) + _gmp.mpz_add(self._mpz_p, self._mpz_p, tmp) + finally: + _gmp.mpz_clear(tmp) + + if not positive: + _gmp.mpz_neg(self._mpz_p, self._mpz_p) + + elif isinstance(value, IntegerGMP): + _gmp.mpz_init_set(self._mpz_p, value._mpz_p) + self._initialized = True + else: + raise NotImplementedError + + # Conversions + def __int__(self): + tmp = new_mpz() + _gmp.mpz_init_set(tmp, self._mpz_p) + + try: + value = 0 + slot = 0 + while _gmp.mpz_cmp(tmp, self._zero_mpz_p) != 0: + lsb = _gmp.mpz_get_ui(tmp) & 0xFFFFFFFF + value |= lsb << (slot * 32) + _gmp.mpz_tdiv_q_2exp(tmp, tmp, c_ulong(32)) + slot = slot + 1 + finally: + _gmp.mpz_clear(tmp) + + if self < 0: + value = -value + return int(value) + + def __str__(self): + return str(int(self)) + + def __repr__(self): + return "Integer(%s)" % str(self) + + # Only Python 2.x + def __hex__(self): + return hex(int(self)) + + # Only Python 3.x + def __index__(self): + return int(self) + + def to_bytes(self, block_size=0, byteorder='big'): + """Convert the number into a byte string. + + This method encodes the number in network order and prepends + as many zero bytes as required. It only works for non-negative + values. + + :Parameters: + block_size : integer + The exact size the output byte string must have. + If zero, the string has the minimal length. + byteorder : string + 'big' for big-endian integers (default), 'little' for litte-endian. + :Returns: + A byte string. + :Raise ValueError: + If the value is negative or if ``block_size`` is + provided and the length of the byte string would exceed it. + """ + + if self < 0: + raise ValueError("Conversion only valid for non-negative numbers") + + num_limbs = _gmp.mpz_size(self._mpz_p) + if _sys_bits == 32: + spchar = "L" + num_limbs = max(1, num_limbs, (block_size + 3) // 4) + elif _sys_bits == 64: + spchar = "Q" + num_limbs = max(1, num_limbs, (block_size + 7) // 8) + else: + raise ValueError("Unknown limb size") + + # mpz_getlimbn returns 0 if i is larger than the number of actual limbs + limbs = [_gmp.mpz_getlimbn(self._mpz_p, num_limbs - i - 1) for i in range(num_limbs)] + + result = struct.pack(">" + spchar * num_limbs, *limbs) + cutoff_len = len(result) - block_size + if block_size == 0: + result = result.lstrip(b'\x00') + elif cutoff_len > 0: + if result[:cutoff_len] != b'\x00' * (cutoff_len): + raise ValueError("Number is too big to convert to " + "byte string of prescribed length") + result = result[cutoff_len:] + elif cutoff_len < 0: + result = b'\x00' * (-cutoff_len) + result + + if byteorder == 'little': + result = result[::-1] + elif byteorder == 'big': + pass + else: + raise ValueError("Incorrect byteorder") + + if len(result) == 0: + result = b'\x00' + + return result + + @staticmethod + def from_bytes(byte_string, byteorder='big'): + """Convert a byte string into a number. + + :Parameters: + byte_string : byte string + The input number, encoded in network order. + It can only be non-negative. + byteorder : string + 'big' for big-endian integers (default), 'little' for litte-endian. + + :Return: + The ``Integer`` object carrying the same value as the input. + """ + result = IntegerGMP(0) + if byteorder == 'big': + pass + elif byteorder == 'little': + byte_string = bytearray(byte_string) + byte_string.reverse() + else: + raise ValueError("Incorrect byteorder") + _gmp.mpz_import( + result._mpz_p, + c_size_t(len(byte_string)), # Amount of words to read + 1, # Big endian + c_size_t(1), # Each word is 1 byte long + 0, # Endianess within a word - not relevant + c_size_t(0), # No nails + c_uint8_ptr(byte_string)) + return result + + # Relations + def _apply_and_return(self, func, term): + if not isinstance(term, IntegerGMP): + term = IntegerGMP(term) + return func(self._mpz_p, term._mpz_p) + + def __eq__(self, term): + if not (isinstance(term, IntegerGMP) or is_native_int(term)): + return False + return self._apply_and_return(_gmp.mpz_cmp, term) == 0 + + def __ne__(self, term): + if not (isinstance(term, IntegerGMP) or is_native_int(term)): + return True + return self._apply_and_return(_gmp.mpz_cmp, term) != 0 + + def __lt__(self, term): + return self._apply_and_return(_gmp.mpz_cmp, term) < 0 + + def __le__(self, term): + return self._apply_and_return(_gmp.mpz_cmp, term) <= 0 + + def __gt__(self, term): + return self._apply_and_return(_gmp.mpz_cmp, term) > 0 + + def __ge__(self, term): + return self._apply_and_return(_gmp.mpz_cmp, term) >= 0 + + def __nonzero__(self): + return _gmp.mpz_cmp(self._mpz_p, self._zero_mpz_p) != 0 + __bool__ = __nonzero__ + + def is_negative(self): + return _gmp.mpz_cmp(self._mpz_p, self._zero_mpz_p) < 0 + + # Arithmetic operations + def __add__(self, term): + result = IntegerGMP(0) + if not isinstance(term, IntegerGMP): + try: + term = IntegerGMP(term) + except NotImplementedError: + return NotImplemented + _gmp.mpz_add(result._mpz_p, + self._mpz_p, + term._mpz_p) + return result + + def __sub__(self, term): + result = IntegerGMP(0) + if not isinstance(term, IntegerGMP): + try: + term = IntegerGMP(term) + except NotImplementedError: + return NotImplemented + _gmp.mpz_sub(result._mpz_p, + self._mpz_p, + term._mpz_p) + return result + + def __mul__(self, term): + result = IntegerGMP(0) + if not isinstance(term, IntegerGMP): + try: + term = IntegerGMP(term) + except NotImplementedError: + return NotImplemented + _gmp.mpz_mul(result._mpz_p, + self._mpz_p, + term._mpz_p) + return result + + def __floordiv__(self, divisor): + if not isinstance(divisor, IntegerGMP): + divisor = IntegerGMP(divisor) + if _gmp.mpz_cmp(divisor._mpz_p, + self._zero_mpz_p) == 0: + raise ZeroDivisionError("Division by zero") + result = IntegerGMP(0) + _gmp.mpz_fdiv_q(result._mpz_p, + self._mpz_p, + divisor._mpz_p) + return result + + def __mod__(self, divisor): + if not isinstance(divisor, IntegerGMP): + divisor = IntegerGMP(divisor) + comp = _gmp.mpz_cmp(divisor._mpz_p, + self._zero_mpz_p) + if comp == 0: + raise ZeroDivisionError("Division by zero") + if comp < 0: + raise ValueError("Modulus must be positive") + result = IntegerGMP(0) + _gmp.mpz_mod(result._mpz_p, + self._mpz_p, + divisor._mpz_p) + return result + + def inplace_pow(self, exponent, modulus=None): + + if modulus is None: + if exponent < 0: + raise ValueError("Exponent must not be negative") + + # Normal exponentiation + if exponent > 256: + raise ValueError("Exponent is too big") + _gmp.mpz_pow_ui(self._mpz_p, + self._mpz_p, # Base + c_ulong(int(exponent)) + ) + else: + # Modular exponentiation + if not isinstance(modulus, IntegerGMP): + modulus = IntegerGMP(modulus) + if not modulus: + raise ZeroDivisionError("Division by zero") + if modulus.is_negative(): + raise ValueError("Modulus must be positive") + if is_native_int(exponent): + if exponent < 0: + raise ValueError("Exponent must not be negative") + if exponent < 65536: + _gmp.mpz_powm_ui(self._mpz_p, + self._mpz_p, + c_ulong(exponent), + modulus._mpz_p) + return self + exponent = IntegerGMP(exponent) + elif exponent.is_negative(): + raise ValueError("Exponent must not be negative") + _gmp.mpz_powm(self._mpz_p, + self._mpz_p, + exponent._mpz_p, + modulus._mpz_p) + return self + + def __pow__(self, exponent, modulus=None): + result = IntegerGMP(self) + return result.inplace_pow(exponent, modulus) + + def __abs__(self): + result = IntegerGMP(0) + _gmp.mpz_abs(result._mpz_p, self._mpz_p) + return result + + def sqrt(self, modulus=None): + """Return the largest Integer that does not + exceed the square root""" + + if modulus is None: + if self < 0: + raise ValueError("Square root of negative value") + result = IntegerGMP(0) + _gmp.mpz_sqrt(result._mpz_p, + self._mpz_p) + else: + if modulus <= 0: + raise ValueError("Modulus must be positive") + modulus = int(modulus) + result = IntegerGMP(self._tonelli_shanks(int(self) % modulus, modulus)) + + return result + + def __iadd__(self, term): + if is_native_int(term): + if 0 <= term < 65536: + _gmp.mpz_add_ui(self._mpz_p, + self._mpz_p, + c_ulong(term)) + return self + if -65535 < term < 0: + _gmp.mpz_sub_ui(self._mpz_p, + self._mpz_p, + c_ulong(-term)) + return self + term = IntegerGMP(term) + _gmp.mpz_add(self._mpz_p, + self._mpz_p, + term._mpz_p) + return self + + def __isub__(self, term): + if is_native_int(term): + if 0 <= term < 65536: + _gmp.mpz_sub_ui(self._mpz_p, + self._mpz_p, + c_ulong(term)) + return self + if -65535 < term < 0: + _gmp.mpz_add_ui(self._mpz_p, + self._mpz_p, + c_ulong(-term)) + return self + term = IntegerGMP(term) + _gmp.mpz_sub(self._mpz_p, + self._mpz_p, + term._mpz_p) + return self + + def __imul__(self, term): + if is_native_int(term): + if 0 <= term < 65536: + _gmp.mpz_mul_ui(self._mpz_p, + self._mpz_p, + c_ulong(term)) + return self + if -65535 < term < 0: + _gmp.mpz_mul_ui(self._mpz_p, + self._mpz_p, + c_ulong(-term)) + _gmp.mpz_neg(self._mpz_p, self._mpz_p) + return self + term = IntegerGMP(term) + _gmp.mpz_mul(self._mpz_p, + self._mpz_p, + term._mpz_p) + return self + + def __imod__(self, divisor): + if not isinstance(divisor, IntegerGMP): + divisor = IntegerGMP(divisor) + comp = _gmp.mpz_cmp(divisor._mpz_p, + divisor._zero_mpz_p) + if comp == 0: + raise ZeroDivisionError("Division by zero") + if comp < 0: + raise ValueError("Modulus must be positive") + _gmp.mpz_mod(self._mpz_p, + self._mpz_p, + divisor._mpz_p) + return self + + # Boolean/bit operations + def __and__(self, term): + result = IntegerGMP(0) + if not isinstance(term, IntegerGMP): + term = IntegerGMP(term) + _gmp.mpz_and(result._mpz_p, + self._mpz_p, + term._mpz_p) + return result + + def __or__(self, term): + result = IntegerGMP(0) + if not isinstance(term, IntegerGMP): + term = IntegerGMP(term) + _gmp.mpz_ior(result._mpz_p, + self._mpz_p, + term._mpz_p) + return result + + def __rshift__(self, pos): + result = IntegerGMP(0) + if pos < 0: + raise ValueError("negative shift count") + if pos > 65536: + if self < 0: + return -1 + else: + return 0 + _gmp.mpz_tdiv_q_2exp(result._mpz_p, + self._mpz_p, + c_ulong(int(pos))) + return result + + def __irshift__(self, pos): + if pos < 0: + raise ValueError("negative shift count") + if pos > 65536: + if self < 0: + return -1 + else: + return 0 + _gmp.mpz_tdiv_q_2exp(self._mpz_p, + self._mpz_p, + c_ulong(int(pos))) + return self + + def __lshift__(self, pos): + result = IntegerGMP(0) + if not 0 <= pos < 65536: + raise ValueError("Incorrect shift count") + _gmp.mpz_mul_2exp(result._mpz_p, + self._mpz_p, + c_ulong(int(pos))) + return result + + def __ilshift__(self, pos): + if not 0 <= pos < 65536: + raise ValueError("Incorrect shift count") + _gmp.mpz_mul_2exp(self._mpz_p, + self._mpz_p, + c_ulong(int(pos))) + return self + + def get_bit(self, n): + """Return True if the n-th bit is set to 1. + Bit 0 is the least significant.""" + + if self < 0: + raise ValueError("no bit representation for negative values") + if n < 0: + raise ValueError("negative bit count") + if n > 65536: + return 0 + return bool(_gmp.mpz_tstbit(self._mpz_p, + c_ulong(int(n)))) + + # Extra + def is_odd(self): + return _gmp.mpz_tstbit(self._mpz_p, 0) == 1 + + def is_even(self): + return _gmp.mpz_tstbit(self._mpz_p, 0) == 0 + + def size_in_bits(self): + """Return the minimum number of bits that can encode the number.""" + + if self < 0: + raise ValueError("Conversion only valid for non-negative numbers") + return _gmp.mpz_sizeinbase(self._mpz_p, 2) + + def size_in_bytes(self): + """Return the minimum number of bytes that can encode the number.""" + return (self.size_in_bits() - 1) // 8 + 1 + + def is_perfect_square(self): + return _gmp.mpz_perfect_square_p(self._mpz_p) != 0 + + def fail_if_divisible_by(self, small_prime): + """Raise an exception if the small prime is a divisor.""" + + if is_native_int(small_prime): + if 0 < small_prime < 65536: + if _gmp.mpz_divisible_ui_p(self._mpz_p, + c_ulong(small_prime)): + raise ValueError("The value is composite") + return + small_prime = IntegerGMP(small_prime) + if _gmp.mpz_divisible_p(self._mpz_p, + small_prime._mpz_p): + raise ValueError("The value is composite") + + def multiply_accumulate(self, a, b): + """Increment the number by the product of a and b.""" + + if not isinstance(a, IntegerGMP): + a = IntegerGMP(a) + if is_native_int(b): + if 0 < b < 65536: + _gmp.mpz_addmul_ui(self._mpz_p, + a._mpz_p, + c_ulong(b)) + return self + if -65535 < b < 0: + _gmp.mpz_submul_ui(self._mpz_p, + a._mpz_p, + c_ulong(-b)) + return self + b = IntegerGMP(b) + _gmp.mpz_addmul(self._mpz_p, + a._mpz_p, + b._mpz_p) + return self + + def set(self, source): + """Set the Integer to have the given value""" + + if not isinstance(source, IntegerGMP): + source = IntegerGMP(source) + _gmp.mpz_set(self._mpz_p, + source._mpz_p) + return self + + def inplace_inverse(self, modulus): + """Compute the inverse of this number in the ring of + modulo integers. + + Raise an exception if no inverse exists. + """ + + if not isinstance(modulus, IntegerGMP): + modulus = IntegerGMP(modulus) + + comp = _gmp.mpz_cmp(modulus._mpz_p, + self._zero_mpz_p) + if comp == 0: + raise ZeroDivisionError("Modulus cannot be zero") + if comp < 0: + raise ValueError("Modulus must be positive") + + result = _gmp.mpz_invert(self._mpz_p, + self._mpz_p, + modulus._mpz_p) + if not result: + raise ValueError("No inverse value can be computed") + return self + + def inverse(self, modulus): + result = IntegerGMP(self) + result.inplace_inverse(modulus) + return result + + def gcd(self, term): + """Compute the greatest common denominator between this + number and another term.""" + + result = IntegerGMP(0) + if is_native_int(term): + if 0 < term < 65535: + _gmp.mpz_gcd_ui(result._mpz_p, + self._mpz_p, + c_ulong(term)) + return result + term = IntegerGMP(term) + _gmp.mpz_gcd(result._mpz_p, self._mpz_p, term._mpz_p) + return result + + def lcm(self, term): + """Compute the least common multiplier between this + number and another term.""" + + result = IntegerGMP(0) + if not isinstance(term, IntegerGMP): + term = IntegerGMP(term) + _gmp.mpz_lcm(result._mpz_p, self._mpz_p, term._mpz_p) + return result + + @staticmethod + def jacobi_symbol(a, n): + """Compute the Jacobi symbol""" + + if not isinstance(a, IntegerGMP): + a = IntegerGMP(a) + if not isinstance(n, IntegerGMP): + n = IntegerGMP(n) + if n <= 0 or n.is_even(): + raise ValueError("n must be positive odd for the Jacobi symbol") + return _gmp.mpz_jacobi(a._mpz_p, n._mpz_p) + + @staticmethod + def _mult_modulo_bytes(term1, term2, modulus): + if not isinstance(term1, IntegerGMP): + term1 = IntegerGMP(term1) + if not isinstance(term2, IntegerGMP): + term2 = IntegerGMP(term2) + if not isinstance(modulus, IntegerGMP): + modulus = IntegerGMP(modulus) + + if modulus < 0: + raise ValueError("Modulus must be positive") + if modulus == 0: + raise ZeroDivisionError("Modulus cannot be zero") + if (modulus & 1) == 0: + raise ValueError("Odd modulus is required") + + product = (term1 * term2) % modulus + return product.to_bytes(modulus.size_in_bytes()) + + # Clean-up + def __del__(self): + + try: + if self._mpz_p is not None: + if self._initialized: + _gmp.mpz_clear(self._mpz_p) + + self._mpz_p = None + except AttributeError: + pass diff --git a/wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.pyi b/wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2181b470c76781267ea957a30fb06a8c6f308702 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/Crypto/Math/_IntegerGMP.pyi @@ -0,0 +1,3 @@ +from ._IntegerBase import IntegerBase +class IntegerGMP(IntegerBase): + pass diff --git a/wemm/lib/python3.10/site-packages/Crypto/Math/__init__.py b/wemm/lib/python3.10/site-packages/Crypto/Math/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/wemm/lib/python3.10/site-packages/Crypto/Signature/PKCS1_PSS.pyi b/wemm/lib/python3.10/site-packages/Crypto/Signature/PKCS1_PSS.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1371e69a4292405f47be36ad8628f9e2741e61e4 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/Crypto/Signature/PKCS1_PSS.pyi @@ -0,0 +1,28 @@ +from typing import Union, Callable, Optional +from typing_extensions import Protocol + +from Crypto.PublicKey.RSA import RsaKey + + +class Hash(Protocol): + def digest(self) -> bytes: ... + def update(self, bytes) -> None: ... + + +class HashModule(Protocol): + @staticmethod + def new(data: Optional[bytes]) -> Hash: ... + + +MaskFunction = Callable[[bytes, int, Union[Hash, HashModule]], bytes] +RndFunction = Callable[[int], bytes] + +class PSS_SigScheme: + def __init__(self, key: RsaKey, mgfunc: MaskFunction, saltLen: int, randfunc: RndFunction) -> None: ... + def can_sign(self) -> bool: ... + def sign(self, msg_hash: Hash) -> bytes: ... + def verify(self, msg_hash: Hash, signature: bytes) -> bool: ... + + + +def new(rsa_key: RsaKey, mgfunc: Optional[MaskFunction]=None, saltLen: Optional[int]=None, randfunc: Optional[RndFunction]=None) -> PSS_SigScheme: ... diff --git a/wemm/lib/python3.10/site-packages/Crypto/Signature/pss.pyi b/wemm/lib/python3.10/site-packages/Crypto/Signature/pss.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d4088e166592c0662f380e2f79866af690e959f9 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/Crypto/Signature/pss.pyi @@ -0,0 +1,30 @@ +from typing import Union, Callable, Optional +from typing_extensions import Protocol + +from Crypto.PublicKey.RSA import RsaKey + + +class Hash(Protocol): + def digest(self) -> bytes: ... + def update(self, bytes) -> None: ... + + +class HashModule(Protocol): + @staticmethod + def new(data: Optional[bytes]) -> Hash: ... + + +MaskFunction = Callable[[bytes, int, Union[Hash, HashModule]], bytes] +RndFunction = Callable[[int], bytes] + +class PSS_SigScheme: + def __init__(self, key: RsaKey, mgfunc: MaskFunction, saltLen: int, randfunc: RndFunction) -> None: ... + def can_sign(self) -> bool: ... + def sign(self, msg_hash: Hash) -> bytes: ... + def verify(self, msg_hash: Hash, signature: bytes) -> None: ... + + +MGF1 : MaskFunction +def _EMSA_PSS_ENCODE(mhash: Hash, emBits: int, randFunc: RndFunction, mgf:MaskFunction, sLen: int) -> str: ... +def _EMSA_PSS_VERIFY(mhash: Hash, em: str, emBits: int, mgf: MaskFunction, sLen: int) -> None: ... +def new(rsa_key: RsaKey, **kwargs: Union[MaskFunction, RndFunction, int]) -> PSS_SigScheme: ... diff --git a/wemm/lib/python3.10/site-packages/Crypto/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/Crypto/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62919b236dca9e432d5cd03e3ad9b2cf38814ccc Binary files /dev/null and b/wemm/lib/python3.10/site-packages/Crypto/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d54aca4bc65f7a328b06f0084b0249bf87aa7dfe Binary files /dev/null and b/wemm/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER b/wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE b/wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2bd6453d255e19b973f19b128596a8b6dd65b2c3 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2014-05-31/endpoint-rule-set-1.json.gz b/wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2014-05-31/endpoint-rule-set-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..a4ce808d4cf93286d25fe86aa30c0b65c7bde613 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2014-05-31/endpoint-rule-set-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a06b3ceba5eaeb6c6f1759e44ec4eb1d041492dbeb669d033a0f92a05fe513a2 +size 1839 diff --git a/wemm/lib/python3.10/site-packages/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json.gz b/wemm/lib/python3.10/site-packages/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..5378a153ac0e57f03b808c9464eab1f050c0423f --- /dev/null +++ b/wemm/lib/python3.10/site-packages/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:190a177a61ada13dd88dabd5c81eaf6885cc2c6823df30485ce2262e4d941118 +size 1153 diff --git a/wemm/lib/python3.10/site-packages/tokenizers/__init__.pyi b/wemm/lib/python3.10/site-packages/tokenizers/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5dbc665dcf67fa37034de75619eedb9f346e955e --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/__init__.pyi @@ -0,0 +1,1200 @@ +# Generated content DO NOT EDIT +class AddedToken: + """ + Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. + It can have special options that defines the way it should behave. + + Args: + content (:obj:`str`): The content of the token + + single_word (:obj:`bool`, defaults to :obj:`False`): + Defines whether this token should only match single words. If :obj:`True`, this + token will never match inside of a word. For example the token ``ing`` would match + on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. + The notion of "`inside of a word`" is defined by the word boundaries pattern in + regular expressions (ie. the token should start and end with word boundaries). + + lstrip (:obj:`bool`, defaults to :obj:`False`): + Defines whether this token should strip all potential whitespaces on its left side. + If :obj:`True`, this token will greedily match any whitespace on its left. For + example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text + ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). + + rstrip (:obj:`bool`, defaults to :obj:`False`): + Defines whether this token should strip all potential whitespaces on its right + side. If :obj:`True`, this token will greedily match any whitespace on its right. + It works just like :obj:`lstrip` but on the right. + + normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): + Defines whether this token should match against the normalized version of the input + text. For example, with the added token ``"yesterday"``, and a normalizer in charge of + lowercasing the text, the token could be extract from the input ``"I saw a lion + Yesterday"``. + special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): + Defines whether this token should be skipped when decoding. + + """ + def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False): + pass + + @property + def content(self): + """ + Get the content of this :obj:`AddedToken` + """ + pass + + @property + def lstrip(self): + """ + Get the value of the :obj:`lstrip` option + """ + pass + + @property + def normalized(self): + """ + Get the value of the :obj:`normalized` option + """ + pass + + @property + def rstrip(self): + """ + Get the value of the :obj:`rstrip` option + """ + pass + + @property + def single_word(self): + """ + Get the value of the :obj:`single_word` option + """ + pass + + @property + def special(self): + """ + Get the value of the :obj:`special` option + """ + pass + +class Encoding: + """ + The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. + """ + @property + def attention_mask(self): + """ + The attention mask + + This indicates to the LM which tokens should be attended to, and which should not. + This is especially important when batching sequences, where we need to applying + padding. + + Returns: + :obj:`List[int]`: The attention mask + """ + pass + + def char_to_token(self, char_pos, sequence_index=0): + """ + Get the token that contains the char at the given position in the input sequence. + + Args: + char_pos (:obj:`int`): + The position of a char in the input string + sequence_index (:obj:`int`, defaults to :obj:`0`): + The index of the sequence that contains the target char + + Returns: + :obj:`int`: The index of the token that contains this char in the encoded sequence + """ + pass + + def char_to_word(self, char_pos, sequence_index=0): + """ + Get the word that contains the char at the given position in the input sequence. + + Args: + char_pos (:obj:`int`): + The position of a char in the input string + sequence_index (:obj:`int`, defaults to :obj:`0`): + The index of the sequence that contains the target char + + Returns: + :obj:`int`: The index of the word that contains this char in the input sequence + """ + pass + + @property + def ids(self): + """ + The generated IDs + + The IDs are the main input to a Language Model. They are the token indices, + the numerical representations that a LM understands. + + Returns: + :obj:`List[int]`: The list of IDs + """ + pass + + @staticmethod + def merge(encodings, growing_offsets=True): + """ + Merge the list of encodings into one final :class:`~tokenizers.Encoding` + + Args: + encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): + The list of encodings that should be merged in one + + growing_offsets (:obj:`bool`, defaults to :obj:`True`): + Whether the offsets should accumulate while merging + + Returns: + :class:`~tokenizers.Encoding`: The resulting Encoding + """ + pass + + @property + def n_sequences(self): + """ + The number of sequences represented + + Returns: + :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` + """ + pass + + @property + def offsets(self): + """ + The offsets associated to each token + + These offsets let's you slice the input string, and thus retrieve the original + part that led to producing the corresponding token. + + Returns: + A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets + """ + pass + + @property + def overflowing(self): + """ + A :obj:`List` of overflowing :class:`~tokenizers.Encoding` + + When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting + the output into as many pieces as required to match the specified maximum length. + This field lets you retrieve all the subsequent pieces. + + When you use pairs of sequences, the overflowing pieces will contain enough + variations to cover all the possible combinations, while respecting the provided + maximum length. + """ + pass + + def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"): + """ + Pad the :class:`~tokenizers.Encoding` at the given length + + Args: + length (:obj:`int`): + The desired length + + direction: (:obj:`str`, defaults to :obj:`right`): + The expected padding direction. Can be either :obj:`right` or :obj:`left` + + pad_id (:obj:`int`, defaults to :obj:`0`): + The ID corresponding to the padding token + + pad_type_id (:obj:`int`, defaults to :obj:`0`): + The type ID corresponding to the padding token + + pad_token (:obj:`str`, defaults to `[PAD]`): + The pad token to use + """ + pass + + @property + def sequence_ids(self): + """ + The generated sequence indices. + + They represent the index of the input sequence associated to each token. + The sequence id can be None if the token is not related to any input sequence, + like for example with special tokens. + + Returns: + A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. + """ + pass + + def set_sequence_id(self, sequence_id): + """ + Set the given sequence index + + Set the given sequence index for the whole range of tokens contained in this + :class:`~tokenizers.Encoding`. + """ + pass + + @property + def special_tokens_mask(self): + """ + The special token mask + + This indicates which tokens are special tokens, and which are not. + + Returns: + :obj:`List[int]`: The special tokens mask + """ + pass + + def token_to_chars(self, token_index): + """ + Get the offsets of the token at the given index. + + The returned offsets are related to the input sequence that contains the + token. In order to determine in which input sequence it belongs, you + must call :meth:`~tokenizers.Encoding.token_to_sequence()`. + + Args: + token_index (:obj:`int`): + The index of a token in the encoded sequence. + + Returns: + :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` + """ + pass + + def token_to_sequence(self, token_index): + """ + Get the index of the sequence represented by the given token. + + In the general use case, this method returns :obj:`0` for a single sequence or + the first sequence of a pair, and :obj:`1` for the second sequence of a pair + + Args: + token_index (:obj:`int`): + The index of a token in the encoded sequence. + + Returns: + :obj:`int`: The sequence id of the given token + """ + pass + + def token_to_word(self, token_index): + """ + Get the index of the word that contains the token in one of the input sequences. + + The returned word index is related to the input sequence that contains + the token. In order to determine in which input sequence it belongs, you + must call :meth:`~tokenizers.Encoding.token_to_sequence()`. + + Args: + token_index (:obj:`int`): + The index of a token in the encoded sequence. + + Returns: + :obj:`int`: The index of the word in the relevant input sequence. + """ + pass + + @property + def tokens(self): + """ + The generated tokens + + They are the string representation of the IDs. + + Returns: + :obj:`List[str]`: The list of tokens + """ + pass + + def truncate(self, max_length, stride=0, direction="right"): + """ + Truncate the :class:`~tokenizers.Encoding` at the given length + + If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating + this information is lost. It will be considered as representing a single sequence. + + Args: + max_length (:obj:`int`): + The desired length + + stride (:obj:`int`, defaults to :obj:`0`): + The length of previous content to be included in each overflowing piece + + direction (:obj:`str`, defaults to :obj:`right`): + Truncate direction + """ + pass + + @property + def type_ids(self): + """ + The generated type IDs + + Generally used for tasks like sequence classification or question answering, + these tokens let the LM know which input sequence corresponds to each tokens. + + Returns: + :obj:`List[int]`: The list of type ids + """ + pass + + @property + def word_ids(self): + """ + The generated word indices. + + They represent the index of the word associated to each token. + When the input is pre-tokenized, they correspond to the ID of the given input label, + otherwise they correspond to the words indices as defined by the + :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. + + For special tokens and such (any token that was generated from something that was + not part of the input), the output is :obj:`None` + + Returns: + A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. + """ + pass + + def word_to_chars(self, word_index, sequence_index=0): + """ + Get the offsets of the word at the given index in one of the input sequences. + + Args: + word_index (:obj:`int`): + The index of a word in one of the input sequences. + sequence_index (:obj:`int`, defaults to :obj:`0`): + The index of the sequence that contains the target word + + Returns: + :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` + """ + pass + + def word_to_tokens(self, word_index, sequence_index=0): + """ + Get the encoded tokens corresponding to the word at the given index + in one of the input sequences. + + Args: + word_index (:obj:`int`): + The index of a word in one of the input sequences. + sequence_index (:obj:`int`, defaults to :obj:`0`): + The index of the sequence that contains the target word + + Returns: + :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` + """ + pass + + @property + def words(self): + """ + The generated word indices. + + .. warning:: + This is deprecated and will be removed in a future version. + Please use :obj:`~tokenizers.Encoding.word_ids` instead. + + They represent the index of the word associated to each token. + When the input is pre-tokenized, they correspond to the ID of the given input label, + otherwise they correspond to the words indices as defined by the + :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. + + For special tokens and such (any token that was generated from something that was + not part of the input), the output is :obj:`None` + + Returns: + A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. + """ + pass + +class NormalizedString: + """ + NormalizedString + + A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. + While making all the requested modifications, it keeps track of the alignment information + between the two versions of the string. + + Args: + sequence: str: + The string sequence used to initialize this NormalizedString + """ + def append(self, s): + """ + Append the given sequence to the string + """ + pass + + def clear(self): + """ + Clears the string + """ + pass + + def filter(self, func): + """ + Filter each character of the string using the given func + """ + pass + + def for_each(self, func): + """ + Calls the given function for each character of the string + """ + pass + + def lowercase(self): + """ + Lowercase the string + """ + pass + + def lstrip(self): + """ + Strip the left of the string + """ + pass + + def map(self, func): + """ + Calls the given function for each character of the string + + Replaces each character of the string using the returned value. Each + returned value **must** be a str of length 1 (ie a character). + """ + pass + + def nfc(self): + """ + Runs the NFC normalization + """ + pass + + def nfd(self): + """ + Runs the NFD normalization + """ + pass + + def nfkc(self): + """ + Runs the NFKC normalization + """ + pass + + def nfkd(self): + """ + Runs the NFKD normalization + """ + pass + + @property + def normalized(self): + """ + The normalized part of the string + """ + pass + + def prepend(self, s): + """ + Prepend the given sequence to the string + """ + pass + + def replace(self, pattern, content): + """ + Replace the content of the given pattern with the provided content + + Args: + pattern: Pattern: + A pattern used to match the string. Usually a string or a Regex + + content: str: + The content to be used as replacement + """ + pass + + def rstrip(self): + """ + Strip the right of the string + """ + pass + + def slice(self, range): + """ + Slice the string using the given range + """ + pass + + def split(self, pattern, behavior): + """ + Split the NormalizedString using the given pattern and the specified behavior + + Args: + pattern: Pattern: + A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` + + behavior: SplitDelimiterBehavior: + The behavior to use when splitting. + Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", + "contiguous" + + Returns: + A list of NormalizedString, representing each split + """ + pass + + def strip(self): + """ + Strip both ends of the string + """ + pass + + def uppercase(self): + """ + Uppercase the string + """ + pass + +class PreTokenizedString: + """ + PreTokenizedString + + Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the + underlying string, while keeping track of the alignment information (offsets). + + The PreTokenizedString manages what we call `splits`. Each split represents a substring + which is a subpart of the original string, with the relevant offsets and tokens. + + When calling one of the methods used to modify the PreTokenizedString (namely one of + `split`, `normalize` or `tokenize), only the `splits` that don't have any associated + tokens will get modified. + + Args: + sequence: str: + The string sequence used to initialize this PreTokenizedString + """ + def __init__(self, sequence): + pass + + def get_splits(self, offset_referential="original", offset_type="char"): + """ + Get the splits currently managed by the PreTokenizedString + + Args: + offset_referential: :obj:`str` + Whether the returned splits should have offsets expressed relative + to the original string, or the normalized one. choices: "original", "normalized". + + offset_type: :obj:`str` + Whether the returned splits should have offsets expressed in bytes or chars. + When slicing an str, we usually want to use chars, which is the default value. + Now in some cases it might be interesting to get these offsets expressed in bytes, + so it is possible to change this here. + choices: "char", "bytes" + + Returns + A list of splits + """ + pass + + def normalize(self, func): + """ + Normalize each split of the `PreTokenizedString` using the given `func` + + Args: + func: Callable[[NormalizedString], None]: + The function used to normalize each underlying split. This function + does not need to return anything, just calling the methods on the provided + NormalizedString allow its modification. + """ + pass + + def split(self, func): + """ + Split the PreTokenizedString using the given `func` + + Args: + func: Callable[[index, NormalizedString], List[NormalizedString]]: + The function used to split each underlying split. + It is expected to return a list of `NormalizedString`, that represent the new + splits. If the given `NormalizedString` does not need any splitting, we can + just return it directly. + In order for the offsets to be tracked accurately, any returned `NormalizedString` + should come from calling either `.split` or `.slice` on the received one. + """ + pass + + def to_encoding(self, type_id=0, word_idx=None): + """ + Return an Encoding generated from this PreTokenizedString + + Args: + type_id: int = 0: + The type_id to be used on the generated Encoding. + + word_idx: Optional[int] = None: + An optional word index to be used for each token of this Encoding. If provided, + all the word indices in the generated Encoding will use this value, instead + of the one automatically tracked during pre-tokenization. + + Returns: + An Encoding + """ + pass + + def tokenize(self, func): + """ + Tokenize each split of the `PreTokenizedString` using the given `func` + + Args: + func: Callable[[str], List[Token]]: + The function used to tokenize each underlying split. This function must return + a list of Token generated from the input str. + """ + pass + +class Regex: + """ + Instantiate a new Regex with the given pattern + """ + def __init__(self, pattern): + pass + +class Token: + pass + +class Tokenizer: + """ + A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input + and outputs an :class:`~tokenizers.Encoding`. + + Args: + model (:class:`~tokenizers.models.Model`): + The core algorithm that this :obj:`Tokenizer` should be using. + + """ + def __init__(self, model): + pass + + def add_special_tokens(self, tokens): + """ + Add the given special tokens to the Tokenizer. + + If these tokens are already part of the vocabulary, it just let the Tokenizer know about + them. If they don't exist, the Tokenizer creates them, giving them a new id. + + These special tokens will never be processed by the model (ie won't be split into + multiple tokens), and they can be removed from the output when decoding. + + Args: + tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): + The list of special tokens we want to add to the vocabulary. Each token can either + be a string or an instance of :class:`~tokenizers.AddedToken` for more + customization. + + Returns: + :obj:`int`: The number of tokens that were created in the vocabulary + """ + pass + + def add_tokens(self, tokens): + """ + Add the given tokens to the vocabulary + + The given tokens are added only if they don't already exist in the vocabulary. + Each token then gets a new attributed id. + + Args: + tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): + The list of tokens we want to add to the vocabulary. Each token can be either a + string or an instance of :class:`~tokenizers.AddedToken` for more customization. + + Returns: + :obj:`int`: The number of tokens that were created in the vocabulary + """ + pass + + def decode(self, ids, skip_special_tokens=True): + """ + Decode the given list of ids back to a string + + This is used to decode anything coming back from a Language Model + + Args: + ids (A :obj:`List/Tuple` of :obj:`int`): + The list of ids that we want to decode + + skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether the special tokens should be removed from the decoded string + + Returns: + :obj:`str`: The decoded string + """ + pass + + def decode_batch(self, sequences, skip_special_tokens=True): + """ + Decode a batch of ids back to their corresponding string + + Args: + sequences (:obj:`List` of :obj:`List[int]`): + The batch of sequences we want to decode + + skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether the special tokens should be removed from the decoded strings + + Returns: + :obj:`List[str]`: A list of decoded strings + """ + pass + + @property + def decoder(self): + """ + The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer + """ + pass + + def enable_padding( + self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None + ): + """ + Enable the padding + + Args: + direction (:obj:`str`, `optional`, defaults to :obj:`right`): + The direction in which to pad. Can be either ``right`` or ``left`` + + pad_to_multiple_of (:obj:`int`, `optional`): + If specified, the padding length should always snap to the next multiple of the + given value. For example if we were going to pad witha length of 250 but + ``pad_to_multiple_of=8`` then we will pad to 256. + + pad_id (:obj:`int`, defaults to 0): + The id to be used when padding + + pad_type_id (:obj:`int`, defaults to 0): + The type id to be used when padding + + pad_token (:obj:`str`, defaults to :obj:`[PAD]`): + The pad token to be used when padding + + length (:obj:`int`, `optional`): + If specified, the length at which to pad. If not specified we pad using the size of + the longest sequence in a batch. + """ + pass + + def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"): + """ + Enable truncation + + Args: + max_length (:obj:`int`): + The max length at which to truncate + + stride (:obj:`int`, `optional`): + The length of the previous first sequence to be included in the overflowing + sequence + + strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): + The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or + ``only_second``. + + direction (:obj:`str`, defaults to :obj:`right`): + Truncate direction + """ + pass + + def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True): + """ + Encode the given sequence and pair. This method can process raw text sequences + as well as already pre-tokenized sequences. + + Example: + Here are some examples of the inputs that are accepted:: + + encode("A single sequence")` + encode("A sequence", "And its pair")` + encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` + encode( + [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], + is_pretokenized=True + ) + + Args: + sequence (:obj:`~tokenizers.InputSequence`): + The main input sequence we want to encode. This sequence can be either raw + text or pre-tokenized, according to the ``is_pretokenized`` argument: + + - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` + - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` + + pair (:obj:`~tokenizers.InputSequence`, `optional`): + An optional input sequence. The expected format is the same that for ``sequence``. + + is_pretokenized (:obj:`bool`, defaults to :obj:`False`): + Whether the input is already pre-tokenized + + add_special_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether to add the special tokens + + Returns: + :class:`~tokenizers.Encoding`: The encoded result + + """ + pass + + def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True): + """ + Encode the given batch of inputs. This method accept both raw text sequences + as well as already pre-tokenized sequences. + + Example: + Here are some examples of the inputs that are accepted:: + + encode_batch([ + "A single sequence", + ("A tuple with a sequence", "And its pair"), + [ "A", "pre", "tokenized", "sequence" ], + ([ "A", "pre", "tokenized", "sequence" ], "And its pair") + ]) + + Args: + input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): + A list of single sequences or pair sequences to encode. Each sequence + can be either raw text or pre-tokenized, according to the ``is_pretokenized`` + argument: + + - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` + - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` + + is_pretokenized (:obj:`bool`, defaults to :obj:`False`): + Whether the input is already pre-tokenized + + add_special_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether to add the special tokens + + Returns: + A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch + + """ + pass + + @property + def encode_special_tokens(self): + """ + Modifies the tokenizer in order to use or not the special tokens + during encoding. + + Args: + value (:obj:`bool`): + Whether to use the special tokens or not + + """ + pass + + @staticmethod + def from_buffer(buffer): + """ + Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. + + Args: + buffer (:obj:`bytes`): + A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` + + Returns: + :class:`~tokenizers.Tokenizer`: The new tokenizer + """ + pass + + @staticmethod + def from_file(path): + """ + Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. + + Args: + path (:obj:`str`): + A path to a local JSON file representing a previously serialized + :class:`~tokenizers.Tokenizer` + + Returns: + :class:`~tokenizers.Tokenizer`: The new tokenizer + """ + pass + + @staticmethod + def from_pretrained(identifier, revision="main", auth_token=None): + """ + Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the + Hugging Face Hub. + + Args: + identifier (:obj:`str`): + The identifier of a Model on the Hugging Face Hub, that contains + a tokenizer.json file + revision (:obj:`str`, defaults to `main`): + A branch or commit id + auth_token (:obj:`str`, `optional`, defaults to `None`): + An optional auth token used to access private repositories on the + Hugging Face Hub + + Returns: + :class:`~tokenizers.Tokenizer`: The new tokenizer + """ + pass + + @staticmethod + def from_str(json): + """ + Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. + + Args: + json (:obj:`str`): + A valid JSON string representing a previously serialized + :class:`~tokenizers.Tokenizer` + + Returns: + :class:`~tokenizers.Tokenizer`: The new tokenizer + """ + pass + + def get_added_tokens_decoder(self): + """ + Get the underlying vocabulary + + Returns: + :obj:`Dict[int, AddedToken]`: The vocabulary + """ + pass + + def get_vocab(self, with_added_tokens=True): + """ + Get the underlying vocabulary + + Args: + with_added_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether to include the added tokens + + Returns: + :obj:`Dict[str, int]`: The vocabulary + """ + pass + + def get_vocab_size(self, with_added_tokens=True): + """ + Get the size of the underlying vocabulary + + Args: + with_added_tokens (:obj:`bool`, defaults to :obj:`True`): + Whether to include the added tokens + + Returns: + :obj:`int`: The size of the vocabulary + """ + pass + + def id_to_token(self, id): + """ + Convert the given id to its corresponding token if it exists + + Args: + id (:obj:`int`): + The id to convert + + Returns: + :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary + """ + pass + + @property + def model(self): + """ + The :class:`~tokenizers.models.Model` in use by the Tokenizer + """ + pass + + def no_padding(self): + """ + Disable padding + """ + pass + + def no_truncation(self): + """ + Disable truncation + """ + pass + + @property + def normalizer(self): + """ + The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer + """ + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + :param is_pair: Boolean indicating if the input would be a single sentence or a pair + :return: + """ + pass + + @property + def padding(self): + """ + Get the current padding parameters + + `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` + + Returns: + (:obj:`dict`, `optional`): + A dict with the current padding parameters if padding is enabled + """ + pass + + def post_process(self, encoding, pair=None, add_special_tokens=True): + """ + Apply all the post-processing steps to the given encodings. + + The various steps are: + + 1. Truncate according to the set truncation params (provided with + :meth:`~tokenizers.Tokenizer.enable_truncation`) + 2. Apply the :class:`~tokenizers.processors.PostProcessor` + 3. Pad according to the set padding params (provided with + :meth:`~tokenizers.Tokenizer.enable_padding`) + + Args: + encoding (:class:`~tokenizers.Encoding`): + The :class:`~tokenizers.Encoding` corresponding to the main sequence. + + pair (:class:`~tokenizers.Encoding`, `optional`): + An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Returns: + :class:`~tokenizers.Encoding`: The final post-processed encoding + """ + pass + + @property + def post_processor(self): + """ + The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer + """ + pass + + @property + def pre_tokenizer(self): + """ + The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer + """ + pass + + def save(self, path, pretty=True): + """ + Save the :class:`~tokenizers.Tokenizer` to the file at the given path. + + Args: + path (:obj:`str`): + A path to a file in which to save the serialized tokenizer. + + pretty (:obj:`bool`, defaults to :obj:`True`): + Whether the JSON file should be pretty formatted. + """ + pass + + def to_str(self, pretty=False): + """ + Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. + + Args: + pretty (:obj:`bool`, defaults to :obj:`False`): + Whether the JSON string should be pretty formatted. + + Returns: + :obj:`str`: A string representing the serialized Tokenizer + """ + pass + + def token_to_id(self, token): + """ + Convert the given token to its corresponding id if it exists + + Args: + token (:obj:`str`): + The token to convert + + Returns: + :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary + """ + pass + + def train(self, files, trainer=None): + """ + Train the Tokenizer using the given files. + + Reads the files line by line, while keeping all the whitespace, even new lines. + If you want to train from data store in-memory, you can check + :meth:`~tokenizers.Tokenizer.train_from_iterator` + + Args: + files (:obj:`List[str]`): + A list of path to the files that we should use for training + + trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): + An optional trainer that should be used to train our Model + """ + pass + + def train_from_iterator(self, iterator, trainer=None, length=None): + """ + Train the Tokenizer using the provided iterator. + + You can provide anything that is a Python Iterator + + * A list of sequences :obj:`List[str]` + * A generator that yields :obj:`str` or :obj:`List[str]` + * A Numpy array of strings + * ... + + Args: + iterator (:obj:`Iterator`): + Any iterator over strings or list of strings + + trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): + An optional trainer that should be used to train our Model + + length (:obj:`int`, `optional`): + The total number of sequences in the iterator. This is used to + provide meaningful progress tracking + """ + pass + + @property + def truncation(self): + """ + Get the currently set truncation parameters + + `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` + + Returns: + (:obj:`dict`, `optional`): + A dict with the current truncation parameters if truncation is enabled + """ + pass diff --git a/wemm/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5293e6b680f26ec17509f0928116ec96570b4071 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi b/wemm/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b967fbd141802c496e3d94985732c3649492ac49 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi @@ -0,0 +1,271 @@ +# Generated content DO NOT EDIT +class Decoder: + """ + Base class for all decoders + + This class is not supposed to be instantiated directly. Instead, any implementation of + a Decoder will return an instance of this class when instantiated. + """ + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class BPEDecoder(Decoder): + """ + BPEDecoder Decoder + + Args: + suffix (:obj:`str`, `optional`, defaults to :obj:``): + The suffix that was used to caracterize an end-of-word. This suffix will + be replaced by whitespaces during the decoding + """ + def __init__(self, suffix=""): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class ByteFallback(Decoder): + """ + ByteFallback Decoder + ByteFallback is a simple trick which converts tokens looking like `<0x61>` + to pure bytes, and attempts to make them into a string. If the tokens + cannot be decoded you will get � instead for each inconvertable byte token + + """ + def __init__(self): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class ByteLevel(Decoder): + """ + ByteLevel Decoder + + This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel` + :class:`~tokenizers.pre_tokenizers.PreTokenizer`. + """ + def __init__(self): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class CTC(Decoder): + """ + CTC Decoder + + Args: + pad_token (:obj:`str`, `optional`, defaults to :obj:``): + The pad token used by CTC to delimit a new token. + word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`): + The word delimiter token. It will be replaced by a + cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to cleanup some tokenization artifacts. + Mainly spaces before punctuation, and some abbreviated english forms. + """ + def __init__(self, pad_token="", word_delimiter_token="|", cleanup=True): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class Fuse(Decoder): + """ + Fuse Decoder + Fuse simply fuses every token into a single string. + This is the last step of decoding, this decoder exists only if + there is need to add other decoders *after* the fusion + """ + def __init__(self): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class Metaspace(Decoder): + """ + Metaspace Decoder + + Args: + replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): + The replacement character. Must be exactly one character. By default we + use the `▁` (U+2581) meta symbol (Same as in SentencePiece). + + prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`): + Whether to add a space to the first word if there isn't already one. This + lets us treat `hello` exactly like `say hello`. + Choices: "always", "never", "first". First means the space is only added on the first + token (relevant when special tokens are used or other pre_tokenizer are used). + """ + def __init__(self, replacement="▁", prepend_scheme="always", split=True): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class Replace(Decoder): + """ + Replace Decoder + + This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace` + :class:`~tokenizers.pre_tokenizers.PreTokenizer`. + """ + def __init__(self, pattern, content): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class Sequence(Decoder): + """ + Sequence Decoder + + Args: + decoders (:obj:`List[Decoder]`) + The decoders that need to be chained + """ + def __init__(self, decoders): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class Strip(Decoder): + """ + Strip normalizer + Strips n left characters of each token, or n right characters of each token + """ + def __init__(self, content, left=0, right=0): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass + +class WordPiece(Decoder): + """ + WordPiece Decoder + + Args: + prefix (:obj:`str`, `optional`, defaults to :obj:`##`): + The prefix to use for subwords that are not a beginning-of-word + + cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, + and some abbreviated english forms. + """ + def __init__(self, prefix="##", cleanup=True): + pass + + def decode(self, tokens): + """ + Decode the given list of tokens to a final string + + Args: + tokens (:obj:`List[str]`): + The list of tokens to decode + + Returns: + :obj:`str`: The decoded string + """ + pass diff --git a/wemm/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd84b327d9b3e3f8450d75108e583b8274514b4c Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0f96aa6ac9abc005866fd0e676c781b66734fe3 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6604ba1b9be28ee9dbb754e5f58f9b2f1a22c5e7 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ea350f2d64b421d66c204b7c979cd3779c24d5e Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py b/wemm/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py new file mode 100644 index 0000000000000000000000000000000000000000..c7e3dbc466259795ed9d168f57d8fcabe947e96e --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py @@ -0,0 +1,122 @@ +from typing import Dict, Iterator, List, Optional, Tuple, Union + +from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers +from tokenizers.models import BPE +from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str + +from .base_tokenizer import BaseTokenizer + + +class ByteLevelBPETokenizer(BaseTokenizer): + """ByteLevelBPETokenizer + + Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model + """ + + def __init__( + self, + vocab: Optional[Union[str, Dict[str, int]]] = None, + merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, + add_prefix_space: bool = False, + lowercase: bool = False, + dropout: Optional[float] = None, + unicode_normalizer: Optional[str] = None, + continuing_subword_prefix: Optional[str] = None, + end_of_word_suffix: Optional[str] = None, + trim_offsets: bool = False, + ): + if vocab is not None and merges is not None: + tokenizer = Tokenizer( + BPE( + vocab, + merges, + dropout=dropout, + continuing_subword_prefix=continuing_subword_prefix or "", + end_of_word_suffix=end_of_word_suffix or "", + ) + ) + else: + tokenizer = Tokenizer(BPE()) + + # Check for Unicode normalization first (before everything else) + normalizers = [] + + if unicode_normalizer: + normalizers += [unicode_normalizer_from_str(unicode_normalizer)] + + if lowercase: + normalizers += [Lowercase()] + + # Create the normalizer structure + if len(normalizers) > 0: + if len(normalizers) > 1: + tokenizer.normalizer = Sequence(normalizers) + else: + tokenizer.normalizer = normalizers[0] + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets) + + parameters = { + "model": "ByteLevelBPE", + "add_prefix_space": add_prefix_space, + "lowercase": lowercase, + "dropout": dropout, + "unicode_normalizer": unicode_normalizer, + "continuing_subword_prefix": continuing_subword_prefix, + "end_of_word_suffix": end_of_word_suffix, + "trim_offsets": trim_offsets, + } + + super().__init__(tokenizer, parameters) + + @staticmethod + def from_file(vocab_filename: str, merges_filename: str, **kwargs): + vocab, merges = BPE.read_file(vocab_filename, merges_filename) + return ByteLevelBPETokenizer(vocab, merges, **kwargs) + + def train( + self, + files: Union[str, List[str]], + vocab_size: int = 30000, + min_frequency: int = 2, + show_progress: bool = True, + special_tokens: List[Union[str, AddedToken]] = [], + ): + """Train the model using the given files""" + + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + min_frequency=min_frequency, + show_progress=show_progress, + special_tokens=special_tokens, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), + ) + if isinstance(files, str): + files = [files] + self._tokenizer.train(files, trainer=trainer) + + def train_from_iterator( + self, + iterator: Union[Iterator[str], Iterator[Iterator[str]]], + vocab_size: int = 30000, + min_frequency: int = 2, + show_progress: bool = True, + special_tokens: List[Union[str, AddedToken]] = [], + length: Optional[int] = None, + ): + """Train the model using the given iterator""" + + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + min_frequency=min_frequency, + show_progress=show_progress, + special_tokens=special_tokens, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), + ) + self._tokenizer.train_from_iterator( + iterator, + trainer=trainer, + length=length, + ) diff --git a/wemm/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py b/wemm/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py new file mode 100644 index 0000000000000000000000000000000000000000..29ca5977d389d6ff4788fe263d65957e9c4e55fa --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py @@ -0,0 +1,150 @@ +from typing import Dict, Iterator, List, Optional, Tuple, Union + +from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers +from ..models import BPE +from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str +from .base_tokenizer import BaseTokenizer + + +class CharBPETokenizer(BaseTokenizer): + """Original BPE Tokenizer + + Represents the BPE algorithm, as introduced by Rico Sennrich + (https://arxiv.org/abs/1508.07909) + + The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original + Sennrich subword-nmt implementation by the following options that you can deactivate: + - adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by: + * removing any control characters and replacing all whitespaces by the classic one. + * handle chinese chars by putting spaces around them. + * strip all accents. + - spitting on punctuation in addition to whitespaces (deactivate it with + `split_on_whitespace_only=True`) + """ + + def __init__( + self, + vocab: Optional[Union[str, Dict[str, int]]] = None, + merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, + unk_token: Union[str, AddedToken] = "", + suffix: str = "", + dropout: Optional[float] = None, + lowercase: bool = False, + unicode_normalizer: Optional[str] = None, + bert_normalizer: bool = True, + split_on_whitespace_only: bool = False, + ): + if vocab is not None and merges is not None: + tokenizer = Tokenizer( + BPE( + vocab, + merges, + dropout=dropout, + unk_token=str(unk_token), + end_of_word_suffix=suffix, + ) + ) + else: + tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix)) + + if tokenizer.token_to_id(str(unk_token)) is not None: + tokenizer.add_special_tokens([str(unk_token)]) + + # Check for Unicode normalization first (before everything else) + normalizers = [] + + if unicode_normalizer: + normalizers += [unicode_normalizer_from_str(unicode_normalizer)] + + if bert_normalizer: + normalizers += [BertNormalizer(lowercase=False)] + + if lowercase: + normalizers += [Lowercase()] + + # Create the normalizer structure + if len(normalizers) > 0: + if len(normalizers) > 1: + tokenizer.normalizer = Sequence(normalizers) + else: + tokenizer.normalizer = normalizers[0] + + if split_on_whitespace_only: + tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit() + else: + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + + tokenizer.decoder = decoders.BPEDecoder(suffix=suffix) + + parameters = { + "model": "BPE", + "unk_token": unk_token, + "suffix": suffix, + "dropout": dropout, + "lowercase": lowercase, + "unicode_normalizer": unicode_normalizer, + "bert_normalizer": bert_normalizer, + "split_on_whitespace_only": split_on_whitespace_only, + } + + super().__init__(tokenizer, parameters) + + @staticmethod + def from_file(vocab_filename: str, merges_filename: str, **kwargs): + vocab, merges = BPE.read_file(vocab_filename, merges_filename) + return CharBPETokenizer(vocab, merges, **kwargs) + + def train( + self, + files: Union[str, List[str]], + vocab_size: int = 30000, + min_frequency: int = 2, + special_tokens: List[Union[str, AddedToken]] = [""], + limit_alphabet: int = 1000, + initial_alphabet: List[str] = [], + suffix: Optional[str] = "", + show_progress: bool = True, + ): + """Train the model using the given files""" + + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + min_frequency=min_frequency, + special_tokens=special_tokens, + limit_alphabet=limit_alphabet, + initial_alphabet=initial_alphabet, + end_of_word_suffix=suffix, + show_progress=show_progress, + ) + if isinstance(files, str): + files = [files] + self._tokenizer.train(files, trainer=trainer) + + def train_from_iterator( + self, + iterator: Union[Iterator[str], Iterator[Iterator[str]]], + vocab_size: int = 30000, + min_frequency: int = 2, + special_tokens: List[Union[str, AddedToken]] = [""], + limit_alphabet: int = 1000, + initial_alphabet: List[str] = [], + suffix: Optional[str] = "", + show_progress: bool = True, + length: Optional[int] = None, + ): + """Train the model using the given iterator""" + + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + min_frequency=min_frequency, + special_tokens=special_tokens, + limit_alphabet=limit_alphabet, + initial_alphabet=initial_alphabet, + end_of_word_suffix=suffix, + show_progress=show_progress, + ) + self._tokenizer.train_from_iterator( + iterator, + trainer=trainer, + length=length, + ) diff --git a/wemm/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py b/wemm/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py new file mode 100644 index 0000000000000000000000000000000000000000..1237e85eb688c02f480e9aa968f476a7401f6067 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py @@ -0,0 +1,196 @@ +import json +import os +from typing import Iterator, List, Optional, Union, Tuple + +from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers +from tokenizers.models import Unigram + +from .base_tokenizer import BaseTokenizer + + +class SentencePieceUnigramTokenizer(BaseTokenizer): + """SentencePiece Unigram Tokenizer + + Represents the Unigram algorithm, with the pretokenization used by SentencePiece + """ + + def __init__( + self, + vocab: Optional[List[Tuple[str, float]]] = None, + replacement: str = "▁", + add_prefix_space: bool = True, + ): + if vocab is not None: + # Let Unigram(..) fail if only one of them is None + tokenizer = Tokenizer(Unigram(vocab)) + else: + tokenizer = Tokenizer(Unigram()) + + tokenizer.normalizer = normalizers.Sequence( + [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")] + ) + prepend_scheme = "always" if add_prefix_space else "never" + tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + + parameters = { + "model": "SentencePieceUnigram", + "replacement": replacement, + "add_prefix_space": add_prefix_space, + } + + super().__init__(tokenizer, parameters) + + def train( + self, + files: Union[str, List[str]], + vocab_size: int = 8000, + show_progress: bool = True, + special_tokens: Optional[List[Union[str, AddedToken]]] = None, + initial_alphabet: Optional[List[str]] = None, + unk_token: Optional[str] = None, + ): + """ + Train the model using the given files + + Args: + files (:obj:`List[str]`): + A list of path to the files that we should use for training + vocab_size (:obj:`int`): + The size of the final vocabulary, including all tokens and alphabet. + show_progress (:obj:`bool`): + Whether to show progress bars while training. + special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): + A list of special tokens the model should know of. + initial_alphabet (:obj:`List[str]`, `optional`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + unk_token (:obj:`str`, `optional`): + The unknown token to be used by the model. + """ + + if special_tokens is None: + special_tokens = [] + + if initial_alphabet is None: + initial_alphabet = [] + + trainer = trainers.UnigramTrainer( + vocab_size=vocab_size, + special_tokens=special_tokens, + show_progress=show_progress, + initial_alphabet=initial_alphabet, + unk_token=unk_token, + ) + + if isinstance(files, str): + files = [files] + self._tokenizer.train(files, trainer=trainer) + + def train_from_iterator( + self, + iterator: Union[Iterator[str], Iterator[Iterator[str]]], + vocab_size: int = 8000, + show_progress: bool = True, + special_tokens: Optional[List[Union[str, AddedToken]]] = None, + initial_alphabet: Optional[List[str]] = None, + unk_token: Optional[str] = None, + length: Optional[int] = None, + ): + """ + Train the model using the given iterator + + Args: + iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`): + Any iterator over strings or list of strings + vocab_size (:obj:`int`): + The size of the final vocabulary, including all tokens and alphabet. + show_progress (:obj:`bool`): + Whether to show progress bars while training. + special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): + A list of special tokens the model should know of. + initial_alphabet (:obj:`List[str]`, `optional`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + unk_token (:obj:`str`, `optional`): + The unknown token to be used by the model. + length (:obj:`int`, `optional`): + The total number of sequences in the iterator. This is used to + provide meaningful progress tracking + """ + + if special_tokens is None: + special_tokens = [] + + if initial_alphabet is None: + initial_alphabet = [] + + trainer = trainers.UnigramTrainer( + vocab_size=vocab_size, + special_tokens=special_tokens, + show_progress=show_progress, + initial_alphabet=initial_alphabet, + unk_token=unk_token, + ) + + self._tokenizer.train_from_iterator( + iterator, + trainer=trainer, + length=length, + ) + + @staticmethod + def from_spm(filename: str): + try: + import sys + + sys.path.append(".") + + import sentencepiece_model_pb2 as model + except Exception: + raise Exception( + "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." + ) + + m = model.ModelProto() + m.ParseFromString(open(filename, "rb").read()) + + precompiled_charsmap = m.normalizer_spec.precompiled_charsmap + vocab = [(piece.piece, piece.score) for piece in m.pieces] + unk_id = m.trainer_spec.unk_id + model_type = m.trainer_spec.model_type + byte_fallback = m.trainer_spec.byte_fallback + if model_type != 1: + raise Exception( + "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" + ) + + replacement = "▁" + add_prefix_space = True + + tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback)) + + if precompiled_charsmap: + tokenizer.normalizer = normalizers.Sequence( + [ + normalizers.Precompiled(precompiled_charsmap), + normalizers.Replace(Regex(" {2,}"), " "), + ] + ) + else: + tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")]) + prepend_scheme = "always" if add_prefix_space else "never" + tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + + parameters = { + "model": "SentencePieceUnigram", + } + + obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters) + BaseTokenizer.__init__(obj, tokenizer, parameters) + return obj diff --git a/wemm/lib/python3.10/site-packages/tokenizers/models/__init__.py b/wemm/lib/python3.10/site-packages/tokenizers/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..68ac211aa8032249db6b929ca64f9130c358d40b --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/models/__init__.py @@ -0,0 +1,8 @@ +# Generated content DO NOT EDIT +from .. import models + +Model = models.Model +BPE = models.BPE +Unigram = models.Unigram +WordLevel = models.WordLevel +WordPiece = models.WordPiece diff --git a/wemm/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi b/wemm/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..507d4473163f7b48af6665af8534b512cf456a7a --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi @@ -0,0 +1,595 @@ +# Generated content DO NOT EDIT +class Normalizer: + """ + Base class for all normalizers + + This class is not supposed to be instantiated directly. Instead, any implementation of a + Normalizer will return an instance of this class when instantiated. + """ + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class BertNormalizer(Normalizer): + """ + BertNormalizer + + Takes care of normalizing raw text before giving it to a Bert model. + This includes cleaning the text, handling accents, chinese chars and lowercasing + + Args: + clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to clean the text, by removing any control characters + and replacing all whitespaces by the classic one. + + handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to handle chinese chars by putting spaces around them. + + strip_accents (:obj:`bool`, `optional`): + Whether to strip all accents. If this option is not specified (ie == None), + then it will be determined by the value for `lowercase` (as in the original Bert). + + lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to lowercase. + """ + def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Lowercase(Normalizer): + """ + Lowercase Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class NFC(Normalizer): + """ + NFC Unicode Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class NFD(Normalizer): + """ + NFD Unicode Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class NFKC(Normalizer): + """ + NFKC Unicode Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class NFKD(Normalizer): + """ + NFKD Unicode Normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Nmt(Normalizer): + """ + Nmt normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Precompiled(Normalizer): + """ + Precompiled normalizer + Don't use manually it is used for compatiblity for SentencePiece. + """ + def __init__(self, precompiled_charsmap): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Prepend(Normalizer): + """ + Prepend normalizer + """ + def __init__(self, prepend): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Replace(Normalizer): + """ + Replace normalizer + """ + def __init__(self, pattern, content): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Sequence(Normalizer): + """ + Allows concatenating multiple other Normalizer as a Sequence. + All the normalizers run in sequence in the given order + + Args: + normalizers (:obj:`List[Normalizer]`): + A list of Normalizer to be run as a sequence + """ + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class Strip(Normalizer): + """ + Strip normalizer + """ + def __init__(self, left=True, right=True): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass + +class StripAccents(Normalizer): + """ + StripAccents normalizer + """ + def __init__(self): + pass + + def normalize(self, normalized): + """ + Normalize a :class:`~tokenizers.NormalizedString` in-place + + This method allows to modify a :class:`~tokenizers.NormalizedString` to + keep track of the alignment information. If you just want to see the result + of the normalization on a raw string, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize_str` + + Args: + normalized (:class:`~tokenizers.NormalizedString`): + The normalized string on which to apply this + :class:`~tokenizers.normalizers.Normalizer` + """ + pass + + def normalize_str(self, sequence): + """ + Normalize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment + information. If you need to get/convert offsets, you can use + :meth:`~tokenizers.normalizers.Normalizer.normalize` + + Args: + sequence (:obj:`str`): + A string to normalize + + Returns: + :obj:`str`: A string after normalization + """ + pass diff --git a/wemm/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abb0ca2e3e53075dc2ef654652fe5878f24babfe Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.pyi b/wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d81d3802bfccbf822334522cac268e6ef0ab760f --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.pyi @@ -0,0 +1,607 @@ +# Generated content DO NOT EDIT +class PreTokenizer: + """ + Base class for all pre-tokenizers + + This class is not supposed to be instantiated directly. Instead, any implementation of a + PreTokenizer will return an instance of this class when instantiated. + """ + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class BertPreTokenizer(PreTokenizer): + """ + BertPreTokenizer + + This pre-tokenizer splits tokens on spaces, and also on punctuation. + Each occurence of a punctuation character will be treated separately. + """ + def __init__(self): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class ByteLevel(PreTokenizer): + """ + ByteLevel PreTokenizer + + This pre-tokenizer takes care of replacing all bytes of the given string + with a corresponding representation, as well as splitting into words. + + Args: + add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to add a space to the first word if there isn't already one. This + lets us treat `hello` exactly like `say hello`. + use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): + Set this to :obj:`False` to prevent this `pre_tokenizer` from using + the GPT2 specific regexp for spliting on whitespace. + """ + def __init__(self, add_prefix_space=True, use_regex=True): + pass + + @staticmethod + def alphabet(): + """ + Returns the alphabet used by this PreTokenizer. + + Since the ByteLevel works as its name suggests, at the byte level, it + encodes each byte value to a unique visible character. This means that there is a + total of 256 different characters composing this alphabet. + + Returns: + :obj:`List[str]`: A list of characters that compose the alphabet + """ + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class CharDelimiterSplit(PreTokenizer): + """ + This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` + + Args: + delimiter: str: + The delimiter char that will be used to split input + """ + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class Digits(PreTokenizer): + """ + This pre-tokenizer simply splits using the digits in separate tokens + + Args: + individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): + If set to True, digits will each be separated as follows:: + + "Call 123 please" -> "Call ", "1", "2", "3", " please" + + If set to False, digits will grouped as follows:: + + "Call 123 please" -> "Call ", "123", " please" + """ + def __init__(self, individual_digits=False): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class Metaspace(PreTokenizer): + """ + Metaspace pre-tokenizer + + This pre-tokenizer replaces any whitespace by the provided replacement character. + It then tries to split on these spaces. + + Args: + replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): + The replacement character. Must be exactly one character. By default we + use the `▁` (U+2581) meta symbol (Same as in SentencePiece). + + prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`): + Whether to add a space to the first word if there isn't already one. This + lets us treat `hello` exactly like `say hello`. + Choices: "always", "never", "first". First means the space is only added on the first + token (relevant when special tokens are used or other pre_tokenizer are used). + + """ + def __init__(self, replacement="_", prepend_scheme="always", split=True): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class Punctuation(PreTokenizer): + """ + This pre-tokenizer simply splits on punctuation as individual characters. + + Args: + behavior (:class:`~tokenizers.SplitDelimiterBehavior`): + The behavior to use when splitting. + Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", + "contiguous" + """ + def __init__(self, behavior="isolated"): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class Sequence(PreTokenizer): + """ + This pre-tokenizer composes other pre_tokenizers and applies them in sequence + """ + def __init__(self, pretokenizers): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class Split(PreTokenizer): + """ + Split PreTokenizer + + This versatile pre-tokenizer splits using the provided pattern and + according to the provided behavior. The pattern can be inverted by + making use of the invert flag. + + Args: + pattern (:obj:`str` or :class:`~tokenizers.Regex`): + A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` + + behavior (:class:`~tokenizers.SplitDelimiterBehavior`): + The behavior to use when splitting. + Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", + "contiguous" + + invert (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether to invert the pattern. + """ + def __init__(self, pattern, behavior, invert=False): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class UnicodeScripts(PreTokenizer): + """ + This pre-tokenizer splits on characters that belong to different language family + It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt + Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. + This mimicks SentencePiece Unigram implementation. + """ + def __init__(self): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class Whitespace(PreTokenizer): + """ + This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` + """ + def __init__(self): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass + +class WhitespaceSplit(PreTokenizer): + """ + This pre-tokenizer simply splits on the whitespace. Works like `.split()` + """ + def __init__(self): + pass + + def pre_tokenize(self, pretok): + """ + Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place + + This method allows to modify a :class:`~tokenizers.PreTokenizedString` to + keep track of the pre-tokenization, and leverage the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of + the pre-tokenization of a raw string, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` + + Args: + pretok (:class:`~tokenizers.PreTokenizedString): + The pre-tokenized string on which to apply this + :class:`~tokenizers.pre_tokenizers.PreTokenizer` + """ + pass + + def pre_tokenize_str(self, sequence): + """ + Pre tokenize the given string + + This method provides a way to visualize the effect of a + :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the + alignment, nor does it provide all the capabilities of the + :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use + :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` + + Args: + sequence (:obj:`str`): + A string to pre-tokeize + + Returns: + :obj:`List[Tuple[str, Offsets]]`: + A list of tuple with the pre-tokenized parts and their offsets + """ + pass diff --git a/wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2d26e87b1f1a7a0cbea2ed928fa4b92c6325089 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.py b/wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..06d124037b6d932615fa0d31b02f8ac82ac0b5fc --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.py @@ -0,0 +1,9 @@ +# Generated content DO NOT EDIT +from .. import processors + +PostProcessor = processors.PostProcessor +BertProcessing = processors.BertProcessing +ByteLevel = processors.ByteLevel +RobertaProcessing = processors.RobertaProcessing +Sequence = processors.Sequence +TemplateProcessing = processors.TemplateProcessing diff --git a/wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi b/wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5136d02bbc4d391eba1b2feb4882c1f563db92f3 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi @@ -0,0 +1,342 @@ +# Generated content DO NOT EDIT +class PostProcessor: + """ + Base class for all post-processors + + This class is not supposed to be instantiated directly. Instead, any implementation of + a PostProcessor will return an instance of this class when instantiated. + """ + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class BertProcessing(PostProcessor): + """ + This post-processor takes care of adding the special tokens needed by + a Bert model: + + - a SEP token + - a CLS token + + Args: + sep (:obj:`Tuple[str, int]`): + A tuple with the string representation of the SEP token, and its id + + cls (:obj:`Tuple[str, int]`): + A tuple with the string representation of the CLS token, and its id + """ + def __init__(self, sep, cls): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class ByteLevel(PostProcessor): + """ + This post-processor takes care of trimming the offsets. + + By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't + want the offsets to include these whitespaces, then this PostProcessor must be used. + + Args: + trim_offsets (:obj:`bool`): + Whether to trim the whitespaces from the produced offsets. + """ + def __init__(self, trim_offsets=True): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class RobertaProcessing(PostProcessor): + """ + This post-processor takes care of adding the special tokens needed by + a Roberta model: + + - a SEP token + - a CLS token + + It also takes care of trimming the offsets. + By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't + want the offsets to include these whitespaces, then this PostProcessor should be initialized + with :obj:`trim_offsets=True` + + Args: + sep (:obj:`Tuple[str, int]`): + A tuple with the string representation of the SEP token, and its id + + cls (:obj:`Tuple[str, int]`): + A tuple with the string representation of the CLS token, and its id + + trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to trim the whitespaces from the produced offsets. + + add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether the add_prefix_space option was enabled during pre-tokenization. This + is relevant because it defines the way the offsets are trimmed out. + """ + def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class Sequence(PostProcessor): + """ + Sequence Processor + + Args: + processors (:obj:`List[PostProcessor]`) + The processors that need to be chained + """ + def __init__(self, processors): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass + +class TemplateProcessing(PostProcessor): + """ + Provides a way to specify templates in order to add the special tokens to each + input sequence as relevant. + + Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to + delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first + sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair + sequences. The final result looks like this: + + - Single sequence: :obj:`[CLS] Hello there [SEP]` + - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` + + With the type ids as following:: + + [CLS] ... [SEP] ... [SEP] + 0 0 0 1 1 + + You can achieve such behavior using a TemplateProcessing:: + + TemplateProcessing( + single="[CLS] $0 [SEP]", + pair="[CLS] $A [SEP] $B:1 [SEP]:1", + special_tokens=[("[CLS]", 1), ("[SEP]", 0)], + ) + + In this example, each input sequence is identified using a ``$`` construct. This identifier + lets us specify each input sequence, and the type_id to use. When nothing is specified, + it uses the default values. Here are the different ways to specify it: + + - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` + - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... + - Specifying both: ``$A:0``, ``$B:1``, ... + + The same construct is used for special tokens: ``(:)?``. + + **Warning**: You must ensure that you are giving the correct tokens/ids as these + will be added to the Encoding without any further check. If the given ids correspond + to something totally different in a `Tokenizer` using this `PostProcessor`, it + might lead to unexpected results. + + Args: + single (:obj:`Template`): + The template used for single sequences + + pair (:obj:`Template`): + The template used when both sequences are specified + + special_tokens (:obj:`Tokens`): + The list of special tokens used in each sequences + + Types: + + Template (:obj:`str` or :obj:`List`): + - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens + - If a :obj:`List[str]` is provided, a list of tokens + + Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): + - A :obj:`Tuple` with both a token and its associated ID, in any order + - A :obj:`dict` with the following keys: + - "id": :obj:`str` => The special token id, as specified in the Template + - "ids": :obj:`List[int]` => The associated IDs + - "tokens": :obj:`List[str]` => The associated tokens + + The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have + the same length. + """ + def __init__(self, single, pair, special_tokens): + pass + + def num_special_tokens_to_add(self, is_pair): + """ + Return the number of special tokens that would be added for single/pair sentences. + + Args: + is_pair (:obj:`bool`): + Whether the input would be a pair of sequences + + Returns: + :obj:`int`: The number of tokens to add + """ + pass + + def process(self, encoding, pair=None, add_special_tokens=True): + """ + Post-process the given encodings, generating the final one + + Args: + encoding (:class:`~tokenizers.Encoding`): + The encoding for the first sequence + + pair (:class:`~tokenizers.Encoding`, `optional`): + The encoding for the pair sequence + + add_special_tokens (:obj:`bool`): + Whether to add the special tokens + + Return: + :class:`~tokenizers.Encoding`: The final encoding + """ + pass diff --git a/wemm/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..981a5ae0b36060db3346fb1acdc4905f295a7296 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so b/wemm/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..44045565e87ba72e92e03ecbbb72572d4746c019 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e4732f10ce97c9f5e903774dcb953c0f1518f40b27e287992075311835cefce +size 11815960 diff --git a/wemm/lib/python3.10/site-packages/tokenizers/tools/__init__.py b/wemm/lib/python3.10/site-packages/tokenizers/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f941e2ed39c7d69fa14abff7dcf973d93843ea06 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/tools/__init__.py @@ -0,0 +1 @@ +from .visualizer import Annotation, EncodingVisualizer diff --git a/wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c56e3947685cbb192e8d3f3c86de0cfbe5df0125 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc b/wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22ebaa8105c053caecfccf79ed0c2b0853d84aa0 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css b/wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css new file mode 100644 index 0000000000000000000000000000000000000000..f54fde45ada66c902c0b41969d0f40d51c9717da --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css @@ -0,0 +1,170 @@ +.tokenized-text { + width:100%; + padding:2rem; + max-height: 400px; + overflow-y: auto; + box-sizing:border-box; + line-height:4rem; /* Lots of space between lines */ + font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace; + box-shadow: 2px 2px 2px rgba(0,0,0,0.2); + background-color: rgba(0,0,0,0.01); + letter-spacing:2px; /* Give some extra separation between chars */ +} +.non-token{ + /* White space and other things the tokenizer ignores*/ + white-space: pre; + letter-spacing:4px; + border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/ + border-bottom:1px solid #A0A0A0; + line-height: 1rem; + height: calc(100% - 2px); +} + +.token { + white-space: pre; + position:relative; + color:black; + letter-spacing:2px; +} + +.annotation{ + white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */ + border-radius:4px; + position:relative; + width:fit-content; +} +.annotation:before { + /*The before holds the text and the after holds the background*/ + z-index:1000; /* Make sure this is above the background */ + content:attr(data-label); /* The annotations label is on a data attribute */ + color:white; + position:absolute; + font-size:1rem; + text-align:center; + font-weight:bold; + + top:1.75rem; + line-height:0; + left:0; + width:100%; + padding:0.5rem 0; + /* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/ + overflow: hidden; + white-space: nowrap; + text-overflow:ellipsis; +} + +.annotation:after { + content:attr(data-label); /* The content defines the width of the annotation*/ + position:absolute; + font-size:0.75rem; + text-align:center; + font-weight:bold; + text-overflow:ellipsis; + top:1.75rem; + line-height:0; + overflow: hidden; + white-space: nowrap; + + left:0; + width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ + + padding:0.5rem 0; + /* Nast hack below: + We set the annotations color in code because we don't know the colors at css time. + But you can't pass a color as a data attribute to get it into the pseudo element (this thing) + So to get around that, annotations have the color set on them with a style attribute and then we + can get the color with currentColor. + Annotations wrap tokens and tokens set the color back to black + */ + background-color: currentColor; +} +.annotation:hover::after, .annotation:hover::before{ + /* When the user hovers over an annotation expand the label to display in full + */ + min-width: fit-content; +} + +.annotation:hover{ + /* Emphasize the annotation start end with a border on hover*/ + border-color: currentColor; + border: 2px solid; +} +.special-token:not(:empty){ + /* + A none empty special token is like UNK (as opposed to CLS which has no representation in the text ) + */ + position:relative; +} +.special-token:empty::before{ + /* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/ + content:attr(data-stok); + background:#202020; + font-size:0.75rem; + color:white; + margin: 0 0.25rem; + padding: 0.25rem; + border-radius:4px +} + +.special-token:not(:empty):before { + /* Special tokens that have text (UNK) are displayed above the actual text*/ + content:attr(data-stok); + position:absolute; + bottom:1.75rem; + min-width:100%; + width:100%; + height:1rem; + line-height:1rem; + font-size:1rem; + text-align:center; + color:white; + font-weight:bold; + background:#202020; + border-radius:10%; +} +/* +We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations +instead we apply even and odd class at generation time and color them that way + */ +.even-token{ + background:#DCDCDC ; + border: 1px solid #DCDCDC; +} +.odd-token{ + background:#A0A0A0; + border: 1px solid #A0A0A0; +} +.even-token.multi-token,.odd-token.multi-token{ + background: repeating-linear-gradient( + 45deg, + transparent, + transparent 1px, + #ccc 1px, + #ccc 1px + ), + /* on "bottom" */ + linear-gradient( + to bottom, + #FFB6C1, + #999 + ); +} + +.multi-token:hover::after { + content:"This char has more than 1 token"; /* The content defines the width of the annotation*/ + color:white; + background-color: black; + position:absolute; + font-size:0.75rem; + text-align:center; + font-weight:bold; + text-overflow:ellipsis; + top:1.75rem; + line-height:0; + overflow: hidden; + white-space: nowrap; + left:0; + width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ + padding:0.5rem 0; +} diff --git a/wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer.py b/wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c988a6481fd167e0013aa18cb4ff16067b704245 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/tools/visualizer.py @@ -0,0 +1,403 @@ +import itertools +import os +import re +from string import Template +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple + +from tokenizers import Encoding, Tokenizer + + +dirname = os.path.dirname(__file__) +css_filename = os.path.join(dirname, "visualizer-styles.css") +with open(css_filename) as f: + css = f.read() + + +class Annotation: + start: int + end: int + label: int + + def __init__(self, start: int, end: int, label: str): + self.start = start + self.end = end + self.label = label + + +AnnotationList = List[Annotation] +PartialIntList = List[Optional[int]] + + +class CharStateKey(NamedTuple): + token_ix: Optional[int] + anno_ix: Optional[int] + + +class CharState: + char_ix: Optional[int] + + def __init__(self, char_ix): + self.char_ix = char_ix + + self.anno_ix: Optional[int] = None + self.tokens: List[int] = [] + + @property + def token_ix(self): + return self.tokens[0] if len(self.tokens) > 0 else None + + @property + def is_multitoken(self): + """ + BPE tokenizers can output more than one token for a char + """ + return len(self.tokens) > 1 + + def partition_key(self) -> CharStateKey: + return CharStateKey( + token_ix=self.token_ix, + anno_ix=self.anno_ix, + ) + + +class Aligned: + pass + + +class EncodingVisualizer: + """ + Build an EncodingVisualizer + + Args: + + tokenizer (:class:`~tokenizers.Tokenizer`): + A tokenizer instance + + default_to_notebook (:obj:`bool`): + Whether to render html output in a notebook by default + + annotation_converter (:obj:`Callable`, `optional`): + An optional (lambda) function that takes an annotation in any format and returns + an Annotation object + """ + + unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE) + + def __init__( + self, + tokenizer: Tokenizer, + default_to_notebook: bool = True, + annotation_converter: Optional[Callable[[Any], Annotation]] = None, + ): + if default_to_notebook: + try: + from IPython.core.display import HTML, display + except ImportError: + raise Exception( + """We couldn't import IPython utils for html display. + Are you running in a notebook? + You can also pass `default_to_notebook=False` to get back raw HTML + """ + ) + + self.tokenizer = tokenizer + self.default_to_notebook = default_to_notebook + self.annotation_coverter = annotation_converter + pass + + def __call__( + self, + text: str, + annotations: AnnotationList = [], + default_to_notebook: Optional[bool] = None, + ) -> Optional[str]: + """ + Build a visualization of the given text + + Args: + text (:obj:`str`): + The text to tokenize + + annotations (:obj:`List[Annotation]`, `optional`): + An optional list of annotations of the text. The can either be an annotation class + or anything else if you instantiated the visualizer with a converter function + + default_to_notebook (:obj:`bool`, `optional`, defaults to `False`): + If True, will render the html in a notebook. Otherwise returns an html string. + + Returns: + The HTML string if default_to_notebook is False, otherwise (default) returns None and + renders the HTML in the notebook + + """ + final_default_to_notebook = self.default_to_notebook + if default_to_notebook is not None: + final_default_to_notebook = default_to_notebook + if final_default_to_notebook: + try: + from IPython.core.display import HTML, display + except ImportError: + raise Exception( + """We couldn't import IPython utils for html display. + Are you running in a notebook?""" + ) + if self.annotation_coverter is not None: + annotations = list(map(self.annotation_coverter, annotations)) + encoding = self.tokenizer.encode(text) + html = EncodingVisualizer.__make_html(text, encoding, annotations) + if final_default_to_notebook: + display(HTML(html)) + else: + return html + + @staticmethod + def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]: + """ + Generates a color palette for all the labels in a given set of annotations + + Args: + annotations (:obj:`Annotation`): + A list of annotations + + Returns: + :obj:`dict`: A dictionary mapping labels to colors in HSL format + """ + if len(annotations) == 0: + return {} + labels = set(map(lambda x: x.label, annotations)) + num_labels = len(labels) + h_step = int(255 / num_labels) + if h_step < 20: + h_step = 20 + s = 32 + l = 64 # noqa: E741 + h = 10 + colors = {} + + for label in sorted(labels): # sort so we always get the same colors for a given set of labels + colors[label] = f"hsl({h},{s}%,{l}%" + h += h_step + return colors + + @staticmethod + def consecutive_chars_to_html( + consecutive_chars_list: List[CharState], + text: str, + encoding: Encoding, + ): + """ + Converts a list of "consecutive chars" into a single HTML element. + Chars are consecutive if they fall under the same word, token and annotation. + The CharState class is a named tuple with a "partition_key" method that makes it easy to + compare if two chars are consecutive. + + Args: + consecutive_chars_list (:obj:`List[CharState]`): + A list of CharStates that have been grouped together + + text (:obj:`str`): + The original text being processed + + encoding (:class:`~tokenizers.Encoding`): + The encoding returned from the tokenizer + + Returns: + :obj:`str`: The HTML span for a set of consecutive chars + """ + first = consecutive_chars_list[0] + if first.char_ix is None: + # its a special token + stoken = encoding.tokens[first.token_ix] + # special tokens are represented as empty spans. We use the data attribute and css + # magic to display it + return f'' + # We're not in a special token so this group has a start and end. + last = consecutive_chars_list[-1] + start = first.char_ix + end = last.char_ix + 1 + span_text = text[start:end] + css_classes = [] # What css classes will we apply on the resulting span + data_items = {} # What data attributes will we apply on the result span + if first.token_ix is not None: + # We can either be in a token or not (e.g. in white space) + css_classes.append("token") + if first.is_multitoken: + css_classes.append("multi-token") + if first.token_ix % 2: + # We use this to color alternating tokens. + # A token might be split by an annotation that ends in the middle of it, so this + # lets us visually indicate a consecutive token despite its possible splitting in + # the html markup + css_classes.append("odd-token") + else: + # Like above, but a different color so we can see the tokens alternate + css_classes.append("even-token") + if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None: + # This is a special token that is in the text. probably UNK + css_classes.append("special-token") + # TODO is this the right name for the data attribute ? + data_items["stok"] = encoding.tokens[first.token_ix] + else: + # In this case we are looking at a group/single char that is not tokenized. + # e.g. white space + css_classes.append("non-token") + css = f'''class="{' '.join(css_classes)}"''' + data = "" + for key, val in data_items.items(): + data += f' data-{key}="{val}"' + return f"{span_text}" + + @staticmethod + def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str: + char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations) + current_consecutive_chars = [char_states[0]] + prev_anno_ix = char_states[0].anno_ix + spans = [] + label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations) + cur_anno_ix = char_states[0].anno_ix + if cur_anno_ix is not None: + # If we started in an annotation make a span for it + anno = annotations[cur_anno_ix] + label = anno.label + color = label_colors_dict[label] + spans.append(f'') + + for cs in char_states[1:]: + cur_anno_ix = cs.anno_ix + if cur_anno_ix != prev_anno_ix: + # If we've transitioned in or out of an annotation + spans.append( + # Create a span from the current consecutive characters + EncodingVisualizer.consecutive_chars_to_html( + current_consecutive_chars, + text=text, + encoding=encoding, + ) + ) + current_consecutive_chars = [cs] + + if prev_anno_ix is not None: + # if we transitioned out of an annotation close it's span + spans.append("") + if cur_anno_ix is not None: + # If we entered a new annotation make a span for it + anno = annotations[cur_anno_ix] + label = anno.label + color = label_colors_dict[label] + spans.append(f'') + prev_anno_ix = cur_anno_ix + + if cs.partition_key() == current_consecutive_chars[0].partition_key(): + # If the current charchter is in the same "group" as the previous one + current_consecutive_chars.append(cs) + else: + # Otherwise we make a span for the previous group + spans.append( + EncodingVisualizer.consecutive_chars_to_html( + current_consecutive_chars, + text=text, + encoding=encoding, + ) + ) + # An reset the consecutive_char_list to form a new group + current_consecutive_chars = [cs] + # All that's left is to fill out the final span + # TODO I think there is an edge case here where an annotation's span might not close + spans.append( + EncodingVisualizer.consecutive_chars_to_html( + current_consecutive_chars, + text=text, + encoding=encoding, + ) + ) + res = HTMLBody(spans) # Send the list of spans to the body of our html + return res + + @staticmethod + def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList: + """ + Args: + text (:obj:`str`): + The raw text we want to align to + + annotations (:obj:`AnnotationList`): + A (possibly empty) list of annotations + + Returns: + A list of length len(text) whose entry at index i is None if there is no annotation on + charachter i or k, the index of the annotation that covers index i where k is with + respect to the list of annotations + """ + annotation_map = [None] * len(text) + for anno_ix, a in enumerate(annotations): + for i in range(a.start, a.end): + annotation_map[i] = anno_ix + return annotation_map + + @staticmethod + def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]: + """ + For each character in the original text, we emit a tuple representing it's "state": + + * which token_ix it corresponds to + * which word_ix it corresponds to + * which annotation_ix it corresponds to + + Args: + text (:obj:`str`): + The raw text we want to align to + + annotations (:obj:`List[Annotation]`): + A (possibly empty) list of annotations + + encoding: (:class:`~tokenizers.Encoding`): + The encoding returned from the tokenizer + + Returns: + :obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what + it's state is + """ + annotation_map = EncodingVisualizer.__make_anno_map(text, annotations) + # Todo make this a dataclass or named tuple + char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))] + for token_ix, token in enumerate(encoding.tokens): + offsets = encoding.token_to_chars(token_ix) + if offsets is not None: + start, end = offsets + for i in range(start, end): + char_states[i].tokens.append(token_ix) + for char_ix, anno_ix in enumerate(annotation_map): + char_states[char_ix].anno_ix = anno_ix + + return char_states + + +def HTMLBody(children: List[str], css_styles=css) -> str: + """ + Generates the full html with css from a list of html spans + + Args: + children (:obj:`List[str]`): + A list of strings, assumed to be html elements + + css_styles (:obj:`str`, `optional`): + Optional alternative implementation of the css + + Returns: + :obj:`str`: An HTML string with style markup + """ + children_text = "".join(children) + return f""" + + + + + +
+ {children_text} +
+ + + """ diff --git a/wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.py b/wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..22f94c50b7cf63f0b38231ab1ecec88141a678fd --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.py @@ -0,0 +1,8 @@ +# Generated content DO NOT EDIT +from .. import trainers + +Trainer = trainers.Trainer +BpeTrainer = trainers.BpeTrainer +UnigramTrainer = trainers.UnigramTrainer +WordLevelTrainer = trainers.WordLevelTrainer +WordPieceTrainer = trainers.WordPieceTrainer diff --git a/wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi b/wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d6c5257188b57df682dd34be5f58237c36363c64 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi @@ -0,0 +1,156 @@ +# Generated content DO NOT EDIT +class Trainer: + """ + Base class for all trainers + + This class is not supposed to be instantiated directly. Instead, any implementation of a + Trainer will return an instance of this class when instantiated. + """ + +class BpeTrainer(Trainer): + """ + Trainer capable of training a BPE model + + Args: + vocab_size (:obj:`int`, `optional`): + The size of the final vocabulary, including all tokens and alphabet. + + min_frequency (:obj:`int`, `optional`): + The minimum frequency a pair should have in order to be merged. + + show_progress (:obj:`bool`, `optional`): + Whether to show progress bars while training. + + special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): + A list of special tokens the model should know of. + + limit_alphabet (:obj:`int`, `optional`): + The maximum different characters to keep in the alphabet. + + initial_alphabet (:obj:`List[str]`, `optional`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + + continuing_subword_prefix (:obj:`str`, `optional`): + A prefix to be used for every subword that is not a beginning-of-word. + + end_of_word_suffix (:obj:`str`, `optional`): + A suffix to be used for every subword that is a end-of-word. + + max_token_length (:obj:`int`, `optional`): + Prevents creating tokens longer than the specified size. + This can help with reducing polluting your vocabulary with + highly repetitive tokens like `======` for wikipedia + + """ + +class UnigramTrainer(Trainer): + """ + Trainer capable of training a Unigram model + + Args: + vocab_size (:obj:`int`): + The size of the final vocabulary, including all tokens and alphabet. + + show_progress (:obj:`bool`): + Whether to show progress bars while training. + + special_tokens (:obj:`List[Union[str, AddedToken]]`): + A list of special tokens the model should know of. + + initial_alphabet (:obj:`List[str]`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + + shrinking_factor (:obj:`float`): + The shrinking factor used at each step of the training to prune the + vocabulary. + + unk_token (:obj:`str`): + The token used for out-of-vocabulary tokens. + + max_piece_length (:obj:`int`): + The maximum length of a given token. + + n_sub_iterations (:obj:`int`): + The number of iterations of the EM algorithm to perform before + pruning the vocabulary. + """ + def __init__( + self, + vocab_size=8000, + show_progress=True, + special_tokens=[], + shrinking_factor=0.75, + unk_token=None, + max_piece_length=16, + n_sub_iterations=2, + ): + pass + +class WordLevelTrainer(Trainer): + """ + Trainer capable of training a WorldLevel model + + Args: + vocab_size (:obj:`int`, `optional`): + The size of the final vocabulary, including all tokens and alphabet. + + min_frequency (:obj:`int`, `optional`): + The minimum frequency a pair should have in order to be merged. + + show_progress (:obj:`bool`, `optional`): + Whether to show progress bars while training. + + special_tokens (:obj:`List[Union[str, AddedToken]]`): + A list of special tokens the model should know of. + """ + +class WordPieceTrainer(Trainer): + """ + Trainer capable of training a WordPiece model + + Args: + vocab_size (:obj:`int`, `optional`): + The size of the final vocabulary, including all tokens and alphabet. + + min_frequency (:obj:`int`, `optional`): + The minimum frequency a pair should have in order to be merged. + + show_progress (:obj:`bool`, `optional`): + Whether to show progress bars while training. + + special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): + A list of special tokens the model should know of. + + limit_alphabet (:obj:`int`, `optional`): + The maximum different characters to keep in the alphabet. + + initial_alphabet (:obj:`List[str]`, `optional`): + A list of characters to include in the initial alphabet, even + if not seen in the training dataset. + If the strings contain more than one character, only the first one + is kept. + + continuing_subword_prefix (:obj:`str`, `optional`): + A prefix to be used for every subword that is not a beginning-of-word. + + end_of_word_suffix (:obj:`str`, `optional`): + A suffix to be used for every subword that is a end-of-word. + """ + def __init__( + self, + vocab_size=30000, + min_frequency=0, + show_progress=True, + special_tokens=[], + limit_alphabet=None, + initial_alphabet=[], + continuing_subword_prefix="##", + end_of_word_suffix=None, + ): + pass diff --git a/wemm/lib/python3.10/site-packages/torchvision/__init__.py b/wemm/lib/python3.10/site-packages/torchvision/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..590b32732acc02156fcac09b8acff6087b9947f2 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/__init__.py @@ -0,0 +1,114 @@ +import os +import warnings +from modulefinder import Module + +import torch +from torchvision import datasets, io, models, ops, transforms, utils + +from .extension import _HAS_OPS + +try: + from .version import __version__ # noqa: F401 +except ImportError: + pass + + +# Check if torchvision is being imported within the root folder +if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join( + os.path.realpath(os.getcwd()), "torchvision" +): + message = ( + "You are importing torchvision within its own root folder ({}). " + "This is not expected to work and may give errors. Please exit the " + "torchvision project source and relaunch your python interpreter." + ) + warnings.warn(message.format(os.getcwd())) + +_image_backend = "PIL" + +_video_backend = "pyav" + + +def set_image_backend(backend): + """ + Specifies the package used to load images. + + Args: + backend (string): Name of the image backend. one of {'PIL', 'accimage'}. + The :mod:`accimage` package uses the Intel IPP library. It is + generally faster than PIL, but does not support as many operations. + """ + global _image_backend + if backend not in ["PIL", "accimage"]: + raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'") + _image_backend = backend + + +def get_image_backend(): + """ + Gets the name of the package used to load images + """ + return _image_backend + + +def set_video_backend(backend): + """ + Specifies the package used to decode videos. + + Args: + backend (string): Name of the video backend. one of {'pyav', 'video_reader'}. + The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic + binding for the FFmpeg libraries. + The :mod:`video_reader` package includes a native C++ implementation on + top of FFMPEG libraries, and a python API of TorchScript custom operator. + It generally decodes faster than :mod:`pyav`, but is perhaps less robust. + + .. note:: + Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader' + backend, please compile torchvision from source. + """ + global _video_backend + if backend not in ["pyav", "video_reader", "cuda"]: + raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend) + if backend == "video_reader" and not io._HAS_VIDEO_OPT: + # TODO: better messages + message = "video_reader video backend is not available. Please compile torchvision from source and try again" + raise RuntimeError(message) + elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER: + # TODO: better messages + message = "cuda video backend is not available." + raise RuntimeError(message) + else: + _video_backend = backend + + +def get_video_backend(): + """ + Returns the currently active video backend used to decode videos. + + Returns: + str: Name of the video backend. one of {'pyav', 'video_reader'}. + """ + + return _video_backend + + +def _is_tracing(): + return torch._C._get_tracing_state() + + +_WARN_ABOUT_BETA_TRANSFORMS = True +_BETA_TRANSFORMS_WARNING = ( + "The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. " + "While we do not expect major breaking changes, some APIs may still change " + "according to user feedback. Please submit any feedback you may have in " + "this issue: https://github.com/pytorch/vision/issues/6753, and you can also " + "check out https://github.com/pytorch/vision/issues/7319 to learn more about " + "the APIs that we suspect might involve future changes. " + "You can silence this warning by calling torchvision.disable_beta_transforms_warning()." +) + + +def disable_beta_transforms_warning(): + global _WARN_ABOUT_BETA_TRANSFORMS + _WARN_ABOUT_BETA_TRANSFORMS = False diff --git a/wemm/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py b/wemm/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..18afc3ed93a8272600d73cc240047a0a49f23991 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/_internally_replaced_utils.py @@ -0,0 +1,58 @@ +import importlib.machinery +import os + +from torch.hub import _get_torch_home + + +_HOME = os.path.join(_get_torch_home(), "datasets", "vision") +_USE_SHARDED_DATASETS = False + + +def _download_file_from_remote_location(fpath: str, url: str) -> None: + pass + + +def _is_remote_location_available() -> bool: + return False + + +try: + from torch.hub import load_state_dict_from_url # noqa: 401 +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401 + + +def _get_extension_path(lib_name): + + lib_dir = os.path.dirname(__file__) + if os.name == "nt": + # Register the main torchvision library location on the default DLL path + import ctypes + import sys + + kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True) + with_load_library_flags = hasattr(kernel32, "AddDllDirectory") + prev_error_mode = kernel32.SetErrorMode(0x0001) + + if with_load_library_flags: + kernel32.AddDllDirectory.restype = ctypes.c_void_p + + if sys.version_info >= (3, 8): + os.add_dll_directory(lib_dir) + elif with_load_library_flags: + res = kernel32.AddDllDirectory(lib_dir) + if res is None: + err = ctypes.WinError(ctypes.get_last_error()) + err.strerror += f' Error adding "{lib_dir}" to the DLL directories.' + raise err + + kernel32.SetErrorMode(prev_error_mode) + + loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES) + + extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) + ext_specs = extfinder.find_spec(lib_name) + if ext_specs is None: + raise ImportError + + return ext_specs.origin diff --git a/wemm/lib/python3.10/site-packages/torchvision/_utils.py b/wemm/lib/python3.10/site-packages/torchvision/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b739ef0966e9b6fac4574f3d6f04051799f75a16 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/_utils.py @@ -0,0 +1,32 @@ +import enum +from typing import Sequence, Type, TypeVar + +T = TypeVar("T", bound=enum.Enum) + + +class StrEnumMeta(enum.EnumMeta): + auto = enum.auto + + def from_str(self: Type[T], member: str) -> T: # type: ignore[misc] + try: + return self[member] + except KeyError: + # TODO: use `add_suggestion` from torchvision.prototype.utils._internal to improve the error message as + # soon as it is migrated. + raise ValueError(f"Unknown value '{member}' for {self.__name__}.") from None + + +class StrEnum(enum.Enum, metaclass=StrEnumMeta): + pass + + +def sequence_to_str(seq: Sequence, separate_last: str = "") -> str: + if not seq: + return "" + if len(seq) == 1: + return f"'{seq[0]}'" + + head = "'" + "', '".join([str(item) for item in seq[:-1]]) + "'" + tail = f"{'' if separate_last and len(seq) == 2 else ','} {separate_last}'{seq[-1]}'" + + return head + tail diff --git a/wemm/lib/python3.10/site-packages/torchvision/datapoints/__init__.py b/wemm/lib/python3.10/site-packages/torchvision/datapoints/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9343048a2a68c2220cabd1cabf9ceaf72053169 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datapoints/__init__.py @@ -0,0 +1,12 @@ +from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS + +from ._bounding_box import BoundingBox, BoundingBoxFormat +from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT +from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image +from ._mask import Mask +from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video + +if _WARN_ABOUT_BETA_TRANSFORMS: + import warnings + + warnings.warn(_BETA_TRANSFORMS_WARNING) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_datapoint.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_datapoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c066832c5e8bb7f2dcd8e889f7d53e64a37aef52 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_datapoint.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_image.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68e095a8220e252ae909ad36d3a6daac03152b9e Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_image.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_mask.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_mask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1189f761faaff52916e2e21eabc7fc35e1f263ca Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datapoints/__pycache__/_mask.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datapoints/_datapoint.py b/wemm/lib/python3.10/site-packages/torchvision/datapoints/_datapoint.py new file mode 100644 index 0000000000000000000000000000000000000000..fe489d13ea094c0dec8f91b3b76ff82c45014dfb --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datapoints/_datapoint.py @@ -0,0 +1,259 @@ +from __future__ import annotations + +from types import ModuleType +from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union + +import PIL.Image +import torch +from torch._C import DisableTorchFunctionSubclass +from torch.types import _device, _dtype, _size +from torchvision.transforms import InterpolationMode + + +D = TypeVar("D", bound="Datapoint") +_FillType = Union[int, float, Sequence[int], Sequence[float], None] +_FillTypeJIT = Optional[List[float]] + + +class Datapoint(torch.Tensor): + __F: Optional[ModuleType] = None + + @staticmethod + def _to_tensor( + data: Any, + dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str, int]] = None, + requires_grad: Optional[bool] = None, + ) -> torch.Tensor: + if requires_grad is None: + requires_grad = data.requires_grad if isinstance(data, torch.Tensor) else False + return torch.as_tensor(data, dtype=dtype, device=device).requires_grad_(requires_grad) + + @classmethod + def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D: + raise NotImplementedError + + _NO_WRAPPING_EXCEPTIONS = { + torch.Tensor.clone: lambda cls, input, output: cls.wrap_like(input, output), + torch.Tensor.to: lambda cls, input, output: cls.wrap_like(input, output), + # We don't need to wrap the output of `Tensor.requires_grad_`, since it is an inplace operation and thus + # retains the type automatically + torch.Tensor.requires_grad_: lambda cls, input, output: output, + } + + @classmethod + def __torch_function__( + cls, + func: Callable[..., torch.Tensor], + types: Tuple[Type[torch.Tensor], ...], + args: Sequence[Any] = (), + kwargs: Optional[Mapping[str, Any]] = None, + ) -> torch.Tensor: + """For general information about how the __torch_function__ protocol works, + see https://pytorch.org/docs/stable/notes/extending.html#extending-torch + + TL;DR: Every time a PyTorch operator is called, it goes through the inputs and looks for the + ``__torch_function__`` method. If one is found, it is invoked with the operator as ``func`` as well as the + ``args`` and ``kwargs`` of the original call. + + The default behavior of :class:`~torch.Tensor`'s is to retain a custom tensor type. For the :class:`Datapoint` + use case, this has two downsides: + + 1. Since some :class:`Datapoint`'s require metadata to be constructed, the default wrapping, i.e. + ``return cls(func(*args, **kwargs))``, will fail for them. + 2. For most operations, there is no way of knowing if the input type is still valid for the output. + + For these reasons, the automatic output wrapping is turned off for most operators. The only exceptions are + listed in :attr:`Datapoint._NO_WRAPPING_EXCEPTIONS` + """ + # Since super().__torch_function__ has no hook to prevent the coercing of the output into the input type, we + # need to reimplement the functionality. + + if not all(issubclass(cls, t) for t in types): + return NotImplemented + + with DisableTorchFunctionSubclass(): + output = func(*args, **kwargs or dict()) + + wrapper = cls._NO_WRAPPING_EXCEPTIONS.get(func) + # Apart from `func` needing to be an exception, we also require the primary operand, i.e. `args[0]`, to be + # an instance of the class that `__torch_function__` was invoked on. The __torch_function__ protocol will + # invoke this method on *all* types involved in the computation by walking the MRO upwards. For example, + # `torch.Tensor(...).to(datapoints.Image(...))` will invoke `datapoints.Image.__torch_function__` with + # `args = (torch.Tensor(), datapoints.Image())` first. Without this guard, the original `torch.Tensor` would + # be wrapped into a `datapoints.Image`. + if wrapper and isinstance(args[0], cls): + return wrapper(cls, args[0], output) + + # Inplace `func`'s, canonically identified with a trailing underscore in their name like `.add_(...)`, + # will retain the input type. Thus, we need to unwrap here. + if isinstance(output, cls): + return output.as_subclass(torch.Tensor) + + return output + + def _make_repr(self, **kwargs: Any) -> str: + # This is a poor man's implementation of the proposal in https://github.com/pytorch/pytorch/issues/76532. + # If that ever gets implemented, remove this in favor of the solution on the `torch.Tensor` class. + extra_repr = ", ".join(f"{key}={value}" for key, value in kwargs.items()) + return f"{super().__repr__()[:-1]}, {extra_repr})" + + @property + def _F(self) -> ModuleType: + # This implements a lazy import of the functional to get around the cyclic import. This import is deferred + # until the first time we need reference to the functional module and it's shared across all instances of + # the class. This approach avoids the DataLoader issue described at + # https://github.com/pytorch/vision/pull/6476#discussion_r953588621 + if Datapoint.__F is None: + from ..transforms.v2 import functional + + Datapoint.__F = functional + return Datapoint.__F + + # Add properties for common attributes like shape, dtype, device, ndim etc + # this way we return the result without passing into __torch_function__ + @property + def shape(self) -> _size: # type: ignore[override] + with DisableTorchFunctionSubclass(): + return super().shape + + @property + def ndim(self) -> int: # type: ignore[override] + with DisableTorchFunctionSubclass(): + return super().ndim + + @property + def device(self, *args: Any, **kwargs: Any) -> _device: # type: ignore[override] + with DisableTorchFunctionSubclass(): + return super().device + + @property + def dtype(self) -> _dtype: # type: ignore[override] + with DisableTorchFunctionSubclass(): + return super().dtype + + def horizontal_flip(self) -> Datapoint: + return self + + def vertical_flip(self) -> Datapoint: + return self + + # TODO: We have to ignore override mypy error as there is torch.Tensor built-in deprecated op: Tensor.resize + # https://github.com/pytorch/pytorch/blob/e8727994eb7cdb2ab642749d6549bc497563aa06/torch/_tensor.py#L588-L593 + def resize( # type: ignore[override] + self, + size: List[int], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[Union[str, bool]] = "warn", + ) -> Datapoint: + return self + + def crop(self, top: int, left: int, height: int, width: int) -> Datapoint: + return self + + def center_crop(self, output_size: List[int]) -> Datapoint: + return self + + def resized_crop( + self, + top: int, + left: int, + height: int, + width: int, + size: List[int], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + antialias: Optional[Union[str, bool]] = "warn", + ) -> Datapoint: + return self + + def pad( + self, + padding: List[int], + fill: Optional[Union[int, float, List[float]]] = None, + padding_mode: str = "constant", + ) -> Datapoint: + return self + + def rotate( + self, + angle: float, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + expand: bool = False, + center: Optional[List[float]] = None, + fill: _FillTypeJIT = None, + ) -> Datapoint: + return self + + def affine( + self, + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + fill: _FillTypeJIT = None, + center: Optional[List[float]] = None, + ) -> Datapoint: + return self + + def perspective( + self, + startpoints: Optional[List[List[int]]], + endpoints: Optional[List[List[int]]], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: _FillTypeJIT = None, + coefficients: Optional[List[float]] = None, + ) -> Datapoint: + return self + + def elastic( + self, + displacement: torch.Tensor, + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: _FillTypeJIT = None, + ) -> Datapoint: + return self + + def rgb_to_grayscale(self, num_output_channels: int = 1) -> Datapoint: + return self + + def adjust_brightness(self, brightness_factor: float) -> Datapoint: + return self + + def adjust_saturation(self, saturation_factor: float) -> Datapoint: + return self + + def adjust_contrast(self, contrast_factor: float) -> Datapoint: + return self + + def adjust_sharpness(self, sharpness_factor: float) -> Datapoint: + return self + + def adjust_hue(self, hue_factor: float) -> Datapoint: + return self + + def adjust_gamma(self, gamma: float, gain: float = 1) -> Datapoint: + return self + + def posterize(self, bits: int) -> Datapoint: + return self + + def solarize(self, threshold: float) -> Datapoint: + return self + + def autocontrast(self) -> Datapoint: + return self + + def equalize(self) -> Datapoint: + return self + + def invert(self) -> Datapoint: + return self + + def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Datapoint: + return self + + +_InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint] +_InputTypeJIT = torch.Tensor diff --git a/wemm/lib/python3.10/site-packages/torchvision/datapoints/_image.py b/wemm/lib/python3.10/site-packages/torchvision/datapoints/_image.py new file mode 100644 index 0000000000000000000000000000000000000000..e47a6c10fc38716ae7475095a50cd435180f7050 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datapoints/_image.py @@ -0,0 +1,260 @@ +from __future__ import annotations + +from typing import Any, List, Optional, Tuple, Union + +import PIL.Image +import torch +from torchvision.transforms.functional import InterpolationMode + +from ._datapoint import _FillTypeJIT, Datapoint + + +class Image(Datapoint): + """[BETA] :class:`torch.Tensor` subclass for images. + + Args: + data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as + well as PIL images. + dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from + ``data``. + device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a + :class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU. + requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and + ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``. + """ + + @classmethod + def _wrap(cls, tensor: torch.Tensor) -> Image: + image = tensor.as_subclass(cls) + return image + + def __new__( + cls, + data: Any, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str, int]] = None, + requires_grad: Optional[bool] = None, + ) -> Image: + if isinstance(data, PIL.Image.Image): + from torchvision.transforms.v2 import functional as F + + data = F.pil_to_tensor(data) + + tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) + if tensor.ndim < 2: + raise ValueError + elif tensor.ndim == 2: + tensor = tensor.unsqueeze(0) + + return cls._wrap(tensor) + + @classmethod + def wrap_like(cls, other: Image, tensor: torch.Tensor) -> Image: + return cls._wrap(tensor) + + def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] + return self._make_repr() + + @property + def spatial_size(self) -> Tuple[int, int]: + return tuple(self.shape[-2:]) # type: ignore[return-value] + + @property + def num_channels(self) -> int: + return self.shape[-3] + + def horizontal_flip(self) -> Image: + output = self._F.horizontal_flip_image_tensor(self.as_subclass(torch.Tensor)) + return Image.wrap_like(self, output) + + def vertical_flip(self) -> Image: + output = self._F.vertical_flip_image_tensor(self.as_subclass(torch.Tensor)) + return Image.wrap_like(self, output) + + def resize( # type: ignore[override] + self, + size: List[int], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[Union[str, bool]] = "warn", + ) -> Image: + output = self._F.resize_image_tensor( + self.as_subclass(torch.Tensor), size, interpolation=interpolation, max_size=max_size, antialias=antialias + ) + return Image.wrap_like(self, output) + + def crop(self, top: int, left: int, height: int, width: int) -> Image: + output = self._F.crop_image_tensor(self.as_subclass(torch.Tensor), top, left, height, width) + return Image.wrap_like(self, output) + + def center_crop(self, output_size: List[int]) -> Image: + output = self._F.center_crop_image_tensor(self.as_subclass(torch.Tensor), output_size=output_size) + return Image.wrap_like(self, output) + + def resized_crop( + self, + top: int, + left: int, + height: int, + width: int, + size: List[int], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + antialias: Optional[Union[str, bool]] = "warn", + ) -> Image: + output = self._F.resized_crop_image_tensor( + self.as_subclass(torch.Tensor), + top, + left, + height, + width, + size=list(size), + interpolation=interpolation, + antialias=antialias, + ) + return Image.wrap_like(self, output) + + def pad( + self, + padding: List[int], + fill: Optional[Union[int, float, List[float]]] = None, + padding_mode: str = "constant", + ) -> Image: + output = self._F.pad_image_tensor(self.as_subclass(torch.Tensor), padding, fill=fill, padding_mode=padding_mode) + return Image.wrap_like(self, output) + + def rotate( + self, + angle: float, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + expand: bool = False, + center: Optional[List[float]] = None, + fill: _FillTypeJIT = None, + ) -> Image: + output = self._F.rotate_image_tensor( + self.as_subclass(torch.Tensor), angle, interpolation=interpolation, expand=expand, fill=fill, center=center + ) + return Image.wrap_like(self, output) + + def affine( + self, + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + fill: _FillTypeJIT = None, + center: Optional[List[float]] = None, + ) -> Image: + output = self._F.affine_image_tensor( + self.as_subclass(torch.Tensor), + angle, + translate=translate, + scale=scale, + shear=shear, + interpolation=interpolation, + fill=fill, + center=center, + ) + return Image.wrap_like(self, output) + + def perspective( + self, + startpoints: Optional[List[List[int]]], + endpoints: Optional[List[List[int]]], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: _FillTypeJIT = None, + coefficients: Optional[List[float]] = None, + ) -> Image: + output = self._F.perspective_image_tensor( + self.as_subclass(torch.Tensor), + startpoints, + endpoints, + interpolation=interpolation, + fill=fill, + coefficients=coefficients, + ) + return Image.wrap_like(self, output) + + def elastic( + self, + displacement: torch.Tensor, + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: _FillTypeJIT = None, + ) -> Image: + output = self._F.elastic_image_tensor( + self.as_subclass(torch.Tensor), displacement, interpolation=interpolation, fill=fill + ) + return Image.wrap_like(self, output) + + def rgb_to_grayscale(self, num_output_channels: int = 1) -> Image: + output = self._F.rgb_to_grayscale_image_tensor( + self.as_subclass(torch.Tensor), num_output_channels=num_output_channels + ) + return Image.wrap_like(self, output) + + def adjust_brightness(self, brightness_factor: float) -> Image: + output = self._F.adjust_brightness_image_tensor( + self.as_subclass(torch.Tensor), brightness_factor=brightness_factor + ) + return Image.wrap_like(self, output) + + def adjust_saturation(self, saturation_factor: float) -> Image: + output = self._F.adjust_saturation_image_tensor( + self.as_subclass(torch.Tensor), saturation_factor=saturation_factor + ) + return Image.wrap_like(self, output) + + def adjust_contrast(self, contrast_factor: float) -> Image: + output = self._F.adjust_contrast_image_tensor(self.as_subclass(torch.Tensor), contrast_factor=contrast_factor) + return Image.wrap_like(self, output) + + def adjust_sharpness(self, sharpness_factor: float) -> Image: + output = self._F.adjust_sharpness_image_tensor( + self.as_subclass(torch.Tensor), sharpness_factor=sharpness_factor + ) + return Image.wrap_like(self, output) + + def adjust_hue(self, hue_factor: float) -> Image: + output = self._F.adjust_hue_image_tensor(self.as_subclass(torch.Tensor), hue_factor=hue_factor) + return Image.wrap_like(self, output) + + def adjust_gamma(self, gamma: float, gain: float = 1) -> Image: + output = self._F.adjust_gamma_image_tensor(self.as_subclass(torch.Tensor), gamma=gamma, gain=gain) + return Image.wrap_like(self, output) + + def posterize(self, bits: int) -> Image: + output = self._F.posterize_image_tensor(self.as_subclass(torch.Tensor), bits=bits) + return Image.wrap_like(self, output) + + def solarize(self, threshold: float) -> Image: + output = self._F.solarize_image_tensor(self.as_subclass(torch.Tensor), threshold=threshold) + return Image.wrap_like(self, output) + + def autocontrast(self) -> Image: + output = self._F.autocontrast_image_tensor(self.as_subclass(torch.Tensor)) + return Image.wrap_like(self, output) + + def equalize(self) -> Image: + output = self._F.equalize_image_tensor(self.as_subclass(torch.Tensor)) + return Image.wrap_like(self, output) + + def invert(self) -> Image: + output = self._F.invert_image_tensor(self.as_subclass(torch.Tensor)) + return Image.wrap_like(self, output) + + def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Image: + output = self._F.gaussian_blur_image_tensor( + self.as_subclass(torch.Tensor), kernel_size=kernel_size, sigma=sigma + ) + return Image.wrap_like(self, output) + + def normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Image: + output = self._F.normalize_image_tensor(self.as_subclass(torch.Tensor), mean=mean, std=std, inplace=inplace) + return Image.wrap_like(self, output) + + +_ImageType = Union[torch.Tensor, PIL.Image.Image, Image] +_ImageTypeJIT = torch.Tensor +_TensorImageType = Union[torch.Tensor, Image] +_TensorImageTypeJIT = torch.Tensor diff --git a/wemm/lib/python3.10/site-packages/torchvision/datapoints/_video.py b/wemm/lib/python3.10/site-packages/torchvision/datapoints/_video.py new file mode 100644 index 0000000000000000000000000000000000000000..a6fbe2bd4739659ea041635588b098ab8d7c6eac --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datapoints/_video.py @@ -0,0 +1,250 @@ +from __future__ import annotations + +from typing import Any, List, Optional, Tuple, Union + +import torch +from torchvision.transforms.functional import InterpolationMode + +from ._datapoint import _FillTypeJIT, Datapoint + + +class Video(Datapoint): + """[BETA] :class:`torch.Tensor` subclass for videos. + + Args: + data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`. + dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from + ``data``. + device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a + :class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU. + requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and + ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``. + """ + + @classmethod + def _wrap(cls, tensor: torch.Tensor) -> Video: + video = tensor.as_subclass(cls) + return video + + def __new__( + cls, + data: Any, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str, int]] = None, + requires_grad: Optional[bool] = None, + ) -> Video: + tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) + if data.ndim < 4: + raise ValueError + return cls._wrap(tensor) + + @classmethod + def wrap_like(cls, other: Video, tensor: torch.Tensor) -> Video: + return cls._wrap(tensor) + + def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] + return self._make_repr() + + @property + def spatial_size(self) -> Tuple[int, int]: + return tuple(self.shape[-2:]) # type: ignore[return-value] + + @property + def num_channels(self) -> int: + return self.shape[-3] + + @property + def num_frames(self) -> int: + return self.shape[-4] + + def horizontal_flip(self) -> Video: + output = self._F.horizontal_flip_video(self.as_subclass(torch.Tensor)) + return Video.wrap_like(self, output) + + def vertical_flip(self) -> Video: + output = self._F.vertical_flip_video(self.as_subclass(torch.Tensor)) + return Video.wrap_like(self, output) + + def resize( # type: ignore[override] + self, + size: List[int], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[Union[str, bool]] = "warn", + ) -> Video: + output = self._F.resize_video( + self.as_subclass(torch.Tensor), + size, + interpolation=interpolation, + max_size=max_size, + antialias=antialias, + ) + return Video.wrap_like(self, output) + + def crop(self, top: int, left: int, height: int, width: int) -> Video: + output = self._F.crop_video(self.as_subclass(torch.Tensor), top, left, height, width) + return Video.wrap_like(self, output) + + def center_crop(self, output_size: List[int]) -> Video: + output = self._F.center_crop_video(self.as_subclass(torch.Tensor), output_size=output_size) + return Video.wrap_like(self, output) + + def resized_crop( + self, + top: int, + left: int, + height: int, + width: int, + size: List[int], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + antialias: Optional[Union[str, bool]] = "warn", + ) -> Video: + output = self._F.resized_crop_video( + self.as_subclass(torch.Tensor), + top, + left, + height, + width, + size=list(size), + interpolation=interpolation, + antialias=antialias, + ) + return Video.wrap_like(self, output) + + def pad( + self, + padding: List[int], + fill: Optional[Union[int, float, List[float]]] = None, + padding_mode: str = "constant", + ) -> Video: + output = self._F.pad_video(self.as_subclass(torch.Tensor), padding, fill=fill, padding_mode=padding_mode) + return Video.wrap_like(self, output) + + def rotate( + self, + angle: float, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + expand: bool = False, + center: Optional[List[float]] = None, + fill: _FillTypeJIT = None, + ) -> Video: + output = self._F.rotate_video( + self.as_subclass(torch.Tensor), angle, interpolation=interpolation, expand=expand, fill=fill, center=center + ) + return Video.wrap_like(self, output) + + def affine( + self, + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + fill: _FillTypeJIT = None, + center: Optional[List[float]] = None, + ) -> Video: + output = self._F.affine_video( + self.as_subclass(torch.Tensor), + angle, + translate=translate, + scale=scale, + shear=shear, + interpolation=interpolation, + fill=fill, + center=center, + ) + return Video.wrap_like(self, output) + + def perspective( + self, + startpoints: Optional[List[List[int]]], + endpoints: Optional[List[List[int]]], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: _FillTypeJIT = None, + coefficients: Optional[List[float]] = None, + ) -> Video: + output = self._F.perspective_video( + self.as_subclass(torch.Tensor), + startpoints, + endpoints, + interpolation=interpolation, + fill=fill, + coefficients=coefficients, + ) + return Video.wrap_like(self, output) + + def elastic( + self, + displacement: torch.Tensor, + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: _FillTypeJIT = None, + ) -> Video: + output = self._F.elastic_video( + self.as_subclass(torch.Tensor), displacement, interpolation=interpolation, fill=fill + ) + return Video.wrap_like(self, output) + + def rgb_to_grayscale(self, num_output_channels: int = 1) -> Video: + output = self._F.rgb_to_grayscale_image_tensor( + self.as_subclass(torch.Tensor), num_output_channels=num_output_channels + ) + return Video.wrap_like(self, output) + + def adjust_brightness(self, brightness_factor: float) -> Video: + output = self._F.adjust_brightness_video(self.as_subclass(torch.Tensor), brightness_factor=brightness_factor) + return Video.wrap_like(self, output) + + def adjust_saturation(self, saturation_factor: float) -> Video: + output = self._F.adjust_saturation_video(self.as_subclass(torch.Tensor), saturation_factor=saturation_factor) + return Video.wrap_like(self, output) + + def adjust_contrast(self, contrast_factor: float) -> Video: + output = self._F.adjust_contrast_video(self.as_subclass(torch.Tensor), contrast_factor=contrast_factor) + return Video.wrap_like(self, output) + + def adjust_sharpness(self, sharpness_factor: float) -> Video: + output = self._F.adjust_sharpness_video(self.as_subclass(torch.Tensor), sharpness_factor=sharpness_factor) + return Video.wrap_like(self, output) + + def adjust_hue(self, hue_factor: float) -> Video: + output = self._F.adjust_hue_video(self.as_subclass(torch.Tensor), hue_factor=hue_factor) + return Video.wrap_like(self, output) + + def adjust_gamma(self, gamma: float, gain: float = 1) -> Video: + output = self._F.adjust_gamma_video(self.as_subclass(torch.Tensor), gamma=gamma, gain=gain) + return Video.wrap_like(self, output) + + def posterize(self, bits: int) -> Video: + output = self._F.posterize_video(self.as_subclass(torch.Tensor), bits=bits) + return Video.wrap_like(self, output) + + def solarize(self, threshold: float) -> Video: + output = self._F.solarize_video(self.as_subclass(torch.Tensor), threshold=threshold) + return Video.wrap_like(self, output) + + def autocontrast(self) -> Video: + output = self._F.autocontrast_video(self.as_subclass(torch.Tensor)) + return Video.wrap_like(self, output) + + def equalize(self) -> Video: + output = self._F.equalize_video(self.as_subclass(torch.Tensor)) + return Video.wrap_like(self, output) + + def invert(self) -> Video: + output = self._F.invert_video(self.as_subclass(torch.Tensor)) + return Video.wrap_like(self, output) + + def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Video: + output = self._F.gaussian_blur_video(self.as_subclass(torch.Tensor), kernel_size=kernel_size, sigma=sigma) + return Video.wrap_like(self, output) + + def normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Video: + output = self._F.normalize_video(self.as_subclass(torch.Tensor), mean=mean, std=std, inplace=inplace) + return Video.wrap_like(self, output) + + +_VideoType = Union[torch.Tensor, Video] +_VideoTypeJIT = torch.Tensor +_TensorVideoType = Union[torch.Tensor, Video] +_TensorVideoTypeJIT = torch.Tensor diff --git a/wemm/lib/python3.10/site-packages/torchvision/io/__init__.py b/wemm/lib/python3.10/site-packages/torchvision/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8427095cea62068d19718f31e3898d0eda856c11 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/io/__init__.py @@ -0,0 +1,69 @@ +from typing import Any, Dict, Iterator + +import torch + +from ..utils import _log_api_usage_once + +try: + from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER +except ModuleNotFoundError: + _HAS_GPU_VIDEO_DECODER = False + +from ._video_opt import ( + _HAS_VIDEO_OPT, + _probe_video_from_file, + _probe_video_from_memory, + _read_video_from_file, + _read_video_from_memory, + _read_video_timestamps_from_file, + _read_video_timestamps_from_memory, + Timebase, + VideoMetaData, +) +from .image import ( + decode_image, + decode_jpeg, + decode_png, + encode_jpeg, + encode_png, + ImageReadMode, + read_file, + read_image, + write_file, + write_jpeg, + write_png, +) +from .video import read_video, read_video_timestamps, write_video +from .video_reader import VideoReader + + +__all__ = [ + "write_video", + "read_video", + "read_video_timestamps", + "_read_video_from_file", + "_read_video_timestamps_from_file", + "_probe_video_from_file", + "_read_video_from_memory", + "_read_video_timestamps_from_memory", + "_probe_video_from_memory", + "_HAS_VIDEO_OPT", + "_HAS_GPU_VIDEO_DECODER", + "_read_video_clip_from_memory", + "_read_video_meta_data", + "VideoMetaData", + "Timebase", + "ImageReadMode", + "decode_image", + "decode_jpeg", + "decode_png", + "encode_jpeg", + "encode_png", + "read_file", + "read_image", + "write_file", + "write_jpeg", + "write_png", + "Video", + "VideoReader", +] diff --git a/wemm/lib/python3.10/site-packages/torchvision/io/__pycache__/video.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/io/__pycache__/video.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de6bedfc0c2364c344c5867690db6bf74a259a90 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/io/__pycache__/video.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/io/__pycache__/video_reader.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/io/__pycache__/video_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73f49aa3ac22047ad1aa2c391472e2e7a01149a4 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/io/__pycache__/video_reader.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/_box_convert.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/_box_convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54434da49b7fb356e9e7cdacb8863fee8a65c06f Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/_box_convert.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/_register_onnx_ops.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/_register_onnx_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d667a787299ff69dc23cdd51172bc87a72c7d19 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/_register_onnx_ops.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/feature_pyramid_network.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/feature_pyramid_network.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b65283f205d5eb6d182499593246d29864cf762 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/feature_pyramid_network.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/ps_roi_align.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/ps_roi_align.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..173afdc607f77490d2df6c83cb0b2081978b7d28 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/ps_roi_align.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/_utils.py b/wemm/lib/python3.10/site-packages/torchvision/ops/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a6ca557a98b899b7c2a11ba0dca3d64730af4268 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/ops/_utils.py @@ -0,0 +1,106 @@ +from typing import List, Optional, Tuple, Union + +import torch +from torch import nn, Tensor + + +def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor: + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + # TODO add back the assert + # assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + + +def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor: + concat_boxes = _cat([b for b in boxes], dim=0) + temp = [] + for i, b in enumerate(boxes): + temp.append(torch.full_like(b[:, :1], i)) + ids = _cat(temp, dim=0) + rois = torch.cat([ids, concat_boxes], dim=1) + return rois + + +def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]): + if isinstance(boxes, (list, tuple)): + for _tensor in boxes: + torch._assert( + _tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]" + ) + elif isinstance(boxes, torch.Tensor): + torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]") + else: + torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]") + return + + +def split_normalization_params( + model: nn.Module, norm_classes: Optional[List[type]] = None +) -> Tuple[List[Tensor], List[Tensor]]: + # Adapted from https://github.com/facebookresearch/ClassyVision/blob/659d7f78/classy_vision/generic/util.py#L501 + if not norm_classes: + norm_classes = [ + nn.modules.batchnorm._BatchNorm, + nn.LayerNorm, + nn.GroupNorm, + nn.modules.instancenorm._InstanceNorm, + nn.LocalResponseNorm, + ] + + for t in norm_classes: + if not issubclass(t, nn.Module): + raise ValueError(f"Class {t} is not a subclass of nn.Module.") + + classes = tuple(norm_classes) + + norm_params = [] + other_params = [] + for module in model.modules(): + if next(module.children(), None): + other_params.extend(p for p in module.parameters(recurse=False) if p.requires_grad) + elif isinstance(module, classes): + norm_params.extend(p for p in module.parameters() if p.requires_grad) + else: + other_params.extend(p for p in module.parameters() if p.requires_grad) + return norm_params, other_params + + +def _upcast(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.is_floating_point(): + return t if t.dtype in (torch.float32, torch.float64) else t.float() + else: + return t if t.dtype in (torch.int32, torch.int64) else t.int() + + +def _upcast_non_float(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.dtype not in (torch.float32, torch.float64): + return t.float() + return t + + +def _loss_inter_union( + boxes1: torch.Tensor, + boxes2: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # Intersection keypoints + xkis1 = torch.max(x1, x1g) + ykis1 = torch.max(y1, y1g) + xkis2 = torch.min(x2, x2g) + ykis2 = torch.min(y2, y2g) + + intsctk = torch.zeros_like(x1) + mask = (ykis2 > ykis1) & (xkis2 > xkis1) + intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) + unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk + + return intsctk, unionk diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/feature_pyramid_network.py b/wemm/lib/python3.10/site-packages/torchvision/ops/feature_pyramid_network.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b190844ffce37824a2806d7f8c58342bf91cfe --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/ops/feature_pyramid_network.py @@ -0,0 +1,249 @@ +from collections import OrderedDict +from typing import Callable, Dict, List, Optional, Tuple + +import torch.nn.functional as F +from torch import nn, Tensor + +from ..ops.misc import Conv2dNormActivation +from ..utils import _log_api_usage_once + + +class ExtraFPNBlock(nn.Module): + """ + Base class for the extra block in the FPN. + + Args: + results (List[Tensor]): the result of the FPN + x (List[Tensor]): the original feature maps + names (List[str]): the names for each one of the + original feature maps + + Returns: + results (List[Tensor]): the extended set of results + of the FPN + names (List[str]): the extended set of names for the results + """ + + def forward( + self, + results: List[Tensor], + x: List[Tensor], + names: List[str], + ) -> Tuple[List[Tensor], List[str]]: + pass + + +class FeaturePyramidNetwork(nn.Module): + """ + Module that adds a FPN from on top of a set of feature maps. This is based on + `"Feature Pyramid Network for Object Detection" `_. + + The feature maps are currently supposed to be in increasing depth + order. + + The input to the model is expected to be an OrderedDict[Tensor], containing + the feature maps on top of which the FPN will be added. + + Args: + in_channels_list (list[int]): number of channels for each feature map that + is passed to the module + out_channels (int): number of channels of the FPN representation + extra_blocks (ExtraFPNBlock or None): if provided, extra operations will + be performed. It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names + norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None + + Examples:: + + >>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5) + >>> # get some dummy data + >>> x = OrderedDict() + >>> x['feat0'] = torch.rand(1, 10, 64, 64) + >>> x['feat2'] = torch.rand(1, 20, 16, 16) + >>> x['feat3'] = torch.rand(1, 30, 8, 8) + >>> # compute the FPN on top of x + >>> output = m(x) + >>> print([(k, v.shape) for k, v in output.items()]) + >>> # returns + >>> [('feat0', torch.Size([1, 5, 64, 64])), + >>> ('feat2', torch.Size([1, 5, 16, 16])), + >>> ('feat3', torch.Size([1, 5, 8, 8]))] + + """ + + _version = 2 + + def __init__( + self, + in_channels_list: List[int], + out_channels: int, + extra_blocks: Optional[ExtraFPNBlock] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None, + ): + super().__init__() + _log_api_usage_once(self) + self.inner_blocks = nn.ModuleList() + self.layer_blocks = nn.ModuleList() + for in_channels in in_channels_list: + if in_channels == 0: + raise ValueError("in_channels=0 is currently not supported") + inner_block_module = Conv2dNormActivation( + in_channels, out_channels, kernel_size=1, padding=0, norm_layer=norm_layer, activation_layer=None + ) + layer_block_module = Conv2dNormActivation( + out_channels, out_channels, kernel_size=3, norm_layer=norm_layer, activation_layer=None + ) + self.inner_blocks.append(inner_block_module) + self.layer_blocks.append(layer_block_module) + + # initialize parameters now to avoid modifying the initialization of top_blocks + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_uniform_(m.weight, a=1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + if extra_blocks is not None: + if not isinstance(extra_blocks, ExtraFPNBlock): + raise TypeError(f"extra_blocks should be of type ExtraFPNBlock not {type(extra_blocks)}") + self.extra_blocks = extra_blocks + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + num_blocks = len(self.inner_blocks) + for block in ["inner_blocks", "layer_blocks"]: + for i in range(num_blocks): + for type in ["weight", "bias"]: + old_key = f"{prefix}{block}.{i}.{type}" + new_key = f"{prefix}{block}.{i}.0.{type}" + if old_key in state_dict: + state_dict[new_key] = state_dict.pop(old_key) + + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.inner_blocks[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.inner_blocks) + if idx < 0: + idx += num_blocks + out = x + for i, module in enumerate(self.inner_blocks): + if i == idx: + out = module(x) + return out + + def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.layer_blocks[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.layer_blocks) + if idx < 0: + idx += num_blocks + out = x + for i, module in enumerate(self.layer_blocks): + if i == idx: + out = module(x) + return out + + def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]: + """ + Computes the FPN for a set of feature maps. + + Args: + x (OrderedDict[Tensor]): feature maps for each feature level. + + Returns: + results (OrderedDict[Tensor]): feature maps after FPN layers. + They are ordered from the highest resolution first. + """ + # unpack OrderedDict into two lists for easier handling + names = list(x.keys()) + x = list(x.values()) + + last_inner = self.get_result_from_inner_blocks(x[-1], -1) + results = [] + results.append(self.get_result_from_layer_blocks(last_inner, -1)) + + for idx in range(len(x) - 2, -1, -1): + inner_lateral = self.get_result_from_inner_blocks(x[idx], idx) + feat_shape = inner_lateral.shape[-2:] + inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="nearest") + last_inner = inner_lateral + inner_top_down + results.insert(0, self.get_result_from_layer_blocks(last_inner, idx)) + + if self.extra_blocks is not None: + results, names = self.extra_blocks(results, x, names) + + # make it back an OrderedDict + out = OrderedDict([(k, v) for k, v in zip(names, results)]) + + return out + + +class LastLevelMaxPool(ExtraFPNBlock): + """ + Applies a max_pool2d on top of the last feature map + """ + + def forward( + self, + x: List[Tensor], + y: List[Tensor], + names: List[str], + ) -> Tuple[List[Tensor], List[str]]: + names.append("pool") + x.append(F.max_pool2d(x[-1], 1, 2, 0)) + return x, names + + +class LastLevelP6P7(ExtraFPNBlock): + """ + This module is used in RetinaNet to generate extra layers, P6 and P7. + """ + + def __init__(self, in_channels: int, out_channels: int): + super().__init__() + self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) + self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) + for module in [self.p6, self.p7]: + nn.init.kaiming_uniform_(module.weight, a=1) + nn.init.constant_(module.bias, 0) + self.use_P5 = in_channels == out_channels + + def forward( + self, + p: List[Tensor], + c: List[Tensor], + names: List[str], + ) -> Tuple[List[Tensor], List[str]]: + p5, c5 = p[-1], c[-1] + x = p5 if self.use_P5 else c5 + p6 = self.p6(x) + p7 = self.p7(F.relu(p6)) + p.extend([p6, p7]) + names.extend(["p6", "p7"]) + return p, names diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/roi_align.py b/wemm/lib/python3.10/site-packages/torchvision/ops/roi_align.py new file mode 100644 index 0000000000000000000000000000000000000000..42e93cca2114e9e22236741a6460b3868a36a9ac --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/ops/roi_align.py @@ -0,0 +1,99 @@ +from typing import List, Union + +import torch +import torch.fx +from torch import nn, Tensor +from torch.jit.annotations import BroadcastingList2 +from torch.nn.modules.utils import _pair +from torchvision.extension import _assert_has_ops + +from ..utils import _log_api_usage_once +from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format + + +@torch.fx.wrap +def roi_align( + input: Tensor, + boxes: Union[Tensor, List[Tensor]], + output_size: BroadcastingList2[int], + spatial_scale: float = 1.0, + sampling_ratio: int = -1, + aligned: bool = False, +) -> Tensor: + """ + Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN. + + Args: + input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element + contains ``C`` feature maps of dimensions ``H x W``. + If the tensor is quantized, we expect a batch size of ``N == 1``. + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. + The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + If a single Tensor is passed, then the first column should + contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``. + If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i + in the batch. + output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling + is performed, as (height, width). + spatial_scale (float): a scaling factor that maps the box coordinates to + the input coordinates. For example, if your boxes are defined on the scale + of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of + the original image), you'll want to set this to 0.5. Default: 1.0 + sampling_ratio (int): number of sampling points in the interpolation grid + used to compute the output value of each pooled output bin. If > 0, + then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If + <= 0, then an adaptive number of grid points are used (computed as + ``ceil(roi_width / output_width)``, and likewise for height). Default: -1 + aligned (bool): If False, use the legacy implementation. + If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two + neighboring pixel indices. This version is used in Detectron2 + + Returns: + Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(roi_align) + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + return torch.ops.torchvision.roi_align( + input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned + ) + + +class RoIAlign(nn.Module): + """ + See :func:`roi_align`. + """ + + def __init__( + self, + output_size: BroadcastingList2[int], + spatial_scale: float, + sampling_ratio: int, + aligned: bool = False, + ): + super().__init__() + _log_api_usage_once(self) + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor: + return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"output_size={self.output_size}" + f", spatial_scale={self.spatial_scale}" + f", sampling_ratio={self.sampling_ratio}" + f", aligned={self.aligned}" + f")" + ) + return s diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/stochastic_depth.py b/wemm/lib/python3.10/site-packages/torchvision/ops/stochastic_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..ff8167b2315e941f7e31a0626eeec270d350a710 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/ops/stochastic_depth.py @@ -0,0 +1,66 @@ +import torch +import torch.fx +from torch import nn, Tensor + +from ..utils import _log_api_usage_once + + +def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) -> Tensor: + """ + Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth" + `_ used for randomly dropping residual + branches of residual architectures. + + Args: + input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one + being its batch i.e. a batch with ``N`` rows. + p (float): probability of the input to be zeroed. + mode (str): ``"batch"`` or ``"row"``. + ``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes + randomly selected rows from the batch. + training: apply stochastic depth if is ``True``. Default: ``True`` + + Returns: + Tensor[N, ...]: The randomly zeroed tensor. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(stochastic_depth) + if p < 0.0 or p > 1.0: + raise ValueError(f"drop probability has to be between 0 and 1, but got {p}") + if mode not in ["batch", "row"]: + raise ValueError(f"mode has to be either 'batch' or 'row', but got {mode}") + if not training or p == 0.0: + return input + + survival_rate = 1.0 - p + if mode == "row": + size = [input.shape[0]] + [1] * (input.ndim - 1) + else: + size = [1] * input.ndim + noise = torch.empty(size, dtype=input.dtype, device=input.device) + noise = noise.bernoulli_(survival_rate) + if survival_rate > 0.0: + noise.div_(survival_rate) + return input * noise + + +torch.fx.wrap("stochastic_depth") + + +class StochasticDepth(nn.Module): + """ + See :func:`stochastic_depth`. + """ + + def __init__(self, p: float, mode: str) -> None: + super().__init__() + _log_api_usage_once(self) + self.p = p + self.mode = mode + + def forward(self, input: Tensor) -> Tensor: + return stochastic_depth(input, self.p, self.mode, self.training) + + def __repr__(self) -> str: + s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})" + return s diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/__init__.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77680a14f0d0599f4004a2ce5c299c0f5e13a0d5 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/__init__.py @@ -0,0 +1,2 @@ +from .transforms import * +from .autoaugment import * diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/_functional_tensor.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/_functional_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e7c17882bc0a4d37cf730a71f9c13ec4802997 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/_functional_tensor.py @@ -0,0 +1,962 @@ +import warnings +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.nn.functional import conv2d, grid_sample, interpolate, pad as torch_pad + + +def _is_tensor_a_torch_image(x: Tensor) -> bool: + return x.ndim >= 2 + + +def _assert_image_tensor(img: Tensor) -> None: + if not _is_tensor_a_torch_image(img): + raise TypeError("Tensor is not a torch image.") + + +def get_dimensions(img: Tensor) -> List[int]: + _assert_image_tensor(img) + channels = 1 if img.ndim == 2 else img.shape[-3] + height, width = img.shape[-2:] + return [channels, height, width] + + +def get_image_size(img: Tensor) -> List[int]: + # Returns (w, h) of tensor image + _assert_image_tensor(img) + return [img.shape[-1], img.shape[-2]] + + +def get_image_num_channels(img: Tensor) -> int: + _assert_image_tensor(img) + if img.ndim == 2: + return 1 + elif img.ndim > 2: + return img.shape[-3] + + raise TypeError(f"Input ndim should be 2 or more. Got {img.ndim}") + + +def _max_value(dtype: torch.dtype) -> int: + if dtype == torch.uint8: + return 255 + elif dtype == torch.int8: + return 127 + elif dtype == torch.int16: + return 32767 + elif dtype == torch.int32: + return 2147483647 + elif dtype == torch.int64: + return 9223372036854775807 + else: + # This is only here for completeness. This value is implicitly assumed in a lot of places so changing it is not + # easy. + return 1 + + +def _assert_channels(img: Tensor, permitted: List[int]) -> None: + c = get_dimensions(img)[0] + if c not in permitted: + raise TypeError(f"Input image tensor permitted channel values are {permitted}, but found {c}") + + +def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor: + if image.dtype == dtype: + return image + + if image.is_floating_point(): + + # TODO: replace with dtype.is_floating_point when torchscript supports it + if torch.tensor(0, dtype=dtype).is_floating_point(): + return image.to(dtype) + + # float to int + if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or ( + image.dtype == torch.float64 and dtype == torch.int64 + ): + msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely." + raise RuntimeError(msg) + + # https://github.com/pytorch/vision/pull/2078#issuecomment-612045321 + # For data in the range 0-1, (float * 255).to(uint) is only 255 + # when float is exactly 1.0. + # `max + 1 - epsilon` provides more evenly distributed mapping of + # ranges of floats to ints. + eps = 1e-3 + max_val = float(_max_value(dtype)) + result = image.mul(max_val + 1.0 - eps) + return result.to(dtype) + else: + input_max = float(_max_value(image.dtype)) + + # int to float + # TODO: replace with dtype.is_floating_point when torchscript supports it + if torch.tensor(0, dtype=dtype).is_floating_point(): + image = image.to(dtype) + return image / input_max + + output_max = float(_max_value(dtype)) + + # int to int + if input_max > output_max: + # factor should be forced to int for torch jit script + # otherwise factor is a float and image // factor can produce different results + factor = int((input_max + 1) // (output_max + 1)) + image = torch.div(image, factor, rounding_mode="floor") + return image.to(dtype) + else: + # factor should be forced to int for torch jit script + # otherwise factor is a float and image * factor can produce different results + factor = int((output_max + 1) // (input_max + 1)) + image = image.to(dtype) + return image * factor + + +def vflip(img: Tensor) -> Tensor: + _assert_image_tensor(img) + + return img.flip(-2) + + +def hflip(img: Tensor) -> Tensor: + _assert_image_tensor(img) + + return img.flip(-1) + + +def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: + _assert_image_tensor(img) + + _, h, w = get_dimensions(img) + right = left + width + bottom = top + height + + if left < 0 or top < 0 or right > w or bottom > h: + padding_ltrb = [ + max(-left + min(0, right), 0), + max(-top + min(0, bottom), 0), + max(right - max(w, left), 0), + max(bottom - max(h, top), 0), + ] + return pad(img[..., max(top, 0) : bottom, max(left, 0) : right], padding_ltrb, fill=0) + return img[..., top:bottom, left:right] + + +def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + _assert_channels(img, [1, 3]) + + if num_output_channels not in (1, 3): + raise ValueError("num_output_channels should be either 1 or 3") + + if img.shape[-3] == 3: + r, g, b = img.unbind(dim=-3) + # This implementation closely follows the TF one: + # https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138 + l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype) + l_img = l_img.unsqueeze(dim=-3) + else: + l_img = img.clone() + + if num_output_channels == 3: + return l_img.expand(img.shape) + + return l_img + + +def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: + if brightness_factor < 0: + raise ValueError(f"brightness_factor ({brightness_factor}) is not non-negative.") + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + + return _blend(img, torch.zeros_like(img), brightness_factor) + + +def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: + if contrast_factor < 0: + raise ValueError(f"contrast_factor ({contrast_factor}) is not non-negative.") + + _assert_image_tensor(img) + + _assert_channels(img, [3, 1]) + c = get_dimensions(img)[0] + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + if c == 3: + mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True) + else: + mean = torch.mean(img.to(dtype), dim=(-3, -2, -1), keepdim=True) + + return _blend(img, mean, contrast_factor) + + +def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].") + + if not (isinstance(img, torch.Tensor)): + raise TypeError("Input img should be Tensor image") + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + if get_dimensions(img)[0] == 1: # Match PIL behaviour + return img + + orig_dtype = img.dtype + img = convert_image_dtype(img, torch.float32) + + img = _rgb2hsv(img) + h, s, v = img.unbind(dim=-3) + h = (h + hue_factor) % 1.0 + img = torch.stack((h, s, v), dim=-3) + img_hue_adj = _hsv2rgb(img) + + return convert_image_dtype(img_hue_adj, orig_dtype) + + +def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: + if saturation_factor < 0: + raise ValueError(f"saturation_factor ({saturation_factor}) is not non-negative.") + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + + if get_dimensions(img)[0] == 1: # Match PIL behaviour + return img + + return _blend(img, rgb_to_grayscale(img), saturation_factor) + + +def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: + if not isinstance(img, torch.Tensor): + raise TypeError("Input img should be a Tensor.") + + _assert_channels(img, [1, 3]) + + if gamma < 0: + raise ValueError("Gamma should be a non-negative real number") + + result = img + dtype = img.dtype + if not torch.is_floating_point(img): + result = convert_image_dtype(result, torch.float32) + + result = (gain * result**gamma).clamp(0, 1) + + result = convert_image_dtype(result, dtype) + return result + + +def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor: + ratio = float(ratio) + bound = _max_value(img1.dtype) + return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype) + + +def _rgb2hsv(img: Tensor) -> Tensor: + r, g, b = img.unbind(dim=-3) + + # Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/ + # src/libImaging/Convert.c#L330 + maxc = torch.max(img, dim=-3).values + minc = torch.min(img, dim=-3).values + + # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN + # from happening in the results, because + # + S channel has division by `maxc`, which is zero only if `maxc = minc` + # + H channel has division by `(maxc - minc)`. + # + # Instead of overwriting NaN afterwards, we just prevent it from occurring, so + # we don't need to deal with it in case we save the NaN in a buffer in + # backprop, if it is ever supported, but it doesn't hurt to do so. + eqc = maxc == minc + + cr = maxc - minc + # Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine. + ones = torch.ones_like(maxc) + s = cr / torch.where(eqc, ones, maxc) + # Note that `eqc => maxc = minc = r = g = b`. So the following calculation + # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it + # would not matter what values `rc`, `gc`, and `bc` have here, and thus + # replacing denominator with 1 when `eqc` is fine. + cr_divisor = torch.where(eqc, ones, cr) + rc = (maxc - r) / cr_divisor + gc = (maxc - g) / cr_divisor + bc = (maxc - b) / cr_divisor + + hr = (maxc == r) * (bc - gc) + hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc) + hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc) + h = hr + hg + hb + h = torch.fmod((h / 6.0 + 1.0), 1.0) + return torch.stack((h, s, maxc), dim=-3) + + +def _hsv2rgb(img: Tensor) -> Tensor: + h, s, v = img.unbind(dim=-3) + i = torch.floor(h * 6.0) + f = (h * 6.0) - i + i = i.to(dtype=torch.int32) + + p = torch.clamp((v * (1.0 - s)), 0.0, 1.0) + q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0) + t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0) + i = i % 6 + + mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1) + + a1 = torch.stack((v, q, p, p, t, v), dim=-3) + a2 = torch.stack((t, v, v, q, p, p), dim=-3) + a3 = torch.stack((p, p, t, v, v, q), dim=-3) + a4 = torch.stack((a1, a2, a3), dim=-4) + + return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4) + + +def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor: + # padding is left, right, top, bottom + + # crop if needed + if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0: + neg_min_padding = [-min(x, 0) for x in padding] + crop_left, crop_right, crop_top, crop_bottom = neg_min_padding + img = img[..., crop_top : img.shape[-2] - crop_bottom, crop_left : img.shape[-1] - crop_right] + padding = [max(x, 0) for x in padding] + + in_sizes = img.size() + + _x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...] + left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0] + right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3] + x_indices = torch.tensor(left_indices + _x_indices + right_indices, device=img.device) + + _y_indices = [i for i in range(in_sizes[-2])] + top_indices = [i for i in range(padding[2] - 1, -1, -1)] + bottom_indices = [-(i + 1) for i in range(padding[3])] + y_indices = torch.tensor(top_indices + _y_indices + bottom_indices, device=img.device) + + ndim = img.ndim + if ndim == 3: + return img[:, y_indices[:, None], x_indices[None, :]] + elif ndim == 4: + return img[:, :, y_indices[:, None], x_indices[None, :]] + else: + raise RuntimeError("Symmetric padding of N-D tensors are not supported yet") + + +def _parse_pad_padding(padding: Union[int, List[int]]) -> List[int]: + if isinstance(padding, int): + if torch.jit.is_scripting(): + # This maybe unreachable + raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]") + pad_left = pad_right = pad_top = pad_bottom = padding + elif len(padding) == 1: + pad_left = pad_right = pad_top = pad_bottom = padding[0] + elif len(padding) == 2: + pad_left = pad_right = padding[0] + pad_top = pad_bottom = padding[1] + else: + pad_left = padding[0] + pad_top = padding[1] + pad_right = padding[2] + pad_bottom = padding[3] + + return [pad_left, pad_right, pad_top, pad_bottom] + + +def pad( + img: Tensor, padding: Union[int, List[int]], fill: Optional[Union[int, float]] = 0, padding_mode: str = "constant" +) -> Tensor: + _assert_image_tensor(img) + + if fill is None: + fill = 0 + + if not isinstance(padding, (int, tuple, list)): + raise TypeError("Got inappropriate padding arg") + if not isinstance(fill, (int, float)): + raise TypeError("Got inappropriate fill arg") + if not isinstance(padding_mode, str): + raise TypeError("Got inappropriate padding_mode arg") + + if isinstance(padding, tuple): + padding = list(padding) + + if isinstance(padding, list): + # TODO: Jit is failing on loading this op when scripted and saved + # https://github.com/pytorch/pytorch/issues/81100 + if len(padding) not in [1, 2, 4]: + raise ValueError( + f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple" + ) + + if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: + raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") + + p = _parse_pad_padding(padding) + + if padding_mode == "edge": + # remap padding_mode str + padding_mode = "replicate" + elif padding_mode == "symmetric": + # route to another implementation + return _pad_symmetric(img, p) + + need_squeeze = False + if img.ndim < 4: + img = img.unsqueeze(dim=0) + need_squeeze = True + + out_dtype = img.dtype + need_cast = False + if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64): + # Here we temporarily cast input tensor to float + # until pytorch issue is resolved : + # https://github.com/pytorch/pytorch/issues/40763 + need_cast = True + img = img.to(torch.float32) + + if padding_mode in ("reflect", "replicate"): + img = torch_pad(img, p, mode=padding_mode) + else: + img = torch_pad(img, p, mode=padding_mode, value=float(fill)) + + if need_squeeze: + img = img.squeeze(dim=0) + + if need_cast: + img = img.to(out_dtype) + + return img + + +def resize( + img: Tensor, + size: List[int], + interpolation: str = "bilinear", + # TODO: in v0.17, change the default to True. This will a private function + # by then, so we don't care about warning here. + antialias: Optional[bool] = None, +) -> Tensor: + _assert_image_tensor(img) + + if isinstance(size, tuple): + size = list(size) + + if antialias is None: + antialias = False + + if antialias and interpolation not in ["bilinear", "bicubic"]: + # We manually set it to False to avoid an error downstream in interpolate() + # This behaviour is documented: the parameter is irrelevant for modes + # that are not bilinear or bicubic. We used to raise an error here, but + # now we don't as True is the default. + antialias = False + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [torch.float32, torch.float64]) + + # Define align_corners to avoid warnings + align_corners = False if interpolation in ["bilinear", "bicubic"] else None + + img = interpolate(img, size=size, mode=interpolation, align_corners=align_corners, antialias=antialias) + + if interpolation == "bicubic" and out_dtype == torch.uint8: + img = img.clamp(min=0, max=255) + + img = _cast_squeeze_out(img, need_cast=need_cast, need_squeeze=need_squeeze, out_dtype=out_dtype) + + return img + + +def _assert_grid_transform_inputs( + img: Tensor, + matrix: Optional[List[float]], + interpolation: str, + fill: Optional[Union[int, float, List[float]]], + supported_interpolation_modes: List[str], + coeffs: Optional[List[float]] = None, +) -> None: + + if not (isinstance(img, torch.Tensor)): + raise TypeError("Input img should be Tensor") + + _assert_image_tensor(img) + + if matrix is not None and not isinstance(matrix, list): + raise TypeError("Argument matrix should be a list") + + if matrix is not None and len(matrix) != 6: + raise ValueError("Argument matrix should have 6 float values") + + if coeffs is not None and len(coeffs) != 8: + raise ValueError("Argument coeffs should have 8 float values") + + if fill is not None and not isinstance(fill, (int, float, tuple, list)): + warnings.warn("Argument fill should be either int, float, tuple or list") + + # Check fill + num_channels = get_dimensions(img)[0] + if fill is not None and isinstance(fill, (tuple, list)) and len(fill) > 1 and len(fill) != num_channels: + msg = ( + "The number of elements in 'fill' cannot broadcast to match the number of " + "channels of the image ({} != {})" + ) + raise ValueError(msg.format(len(fill), num_channels)) + + if interpolation not in supported_interpolation_modes: + raise ValueError(f"Interpolation mode '{interpolation}' is unsupported with Tensor input") + + +def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]: + need_squeeze = False + # make image NCHW + if img.ndim < 4: + img = img.unsqueeze(dim=0) + need_squeeze = True + + out_dtype = img.dtype + need_cast = False + if out_dtype not in req_dtypes: + need_cast = True + req_dtype = req_dtypes[0] + img = img.to(req_dtype) + return img, need_cast, need_squeeze, out_dtype + + +def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype) -> Tensor: + if need_squeeze: + img = img.squeeze(dim=0) + + if need_cast: + if out_dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64): + # it is better to round before cast + img = torch.round(img) + img = img.to(out_dtype) + + return img + + +def _apply_grid_transform( + img: Tensor, grid: Tensor, mode: str, fill: Optional[Union[int, float, List[float]]] +) -> Tensor: + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype]) + + if img.shape[0] > 1: + # Apply same grid to a batch of images + grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3]) + + # Append a dummy mask for customized fill colors, should be faster than grid_sample() twice + if fill is not None: + mask = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device) + img = torch.cat((img, mask), dim=1) + + img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False) + + # Fill with required color + if fill is not None: + mask = img[:, -1:, :, :] # N * 1 * H * W + img = img[:, :-1, :, :] # N * C * H * W + mask = mask.expand_as(img) + fill_list, len_fill = (fill, len(fill)) if isinstance(fill, (tuple, list)) else ([float(fill)], 1) + fill_img = torch.tensor(fill_list, dtype=img.dtype, device=img.device).view(1, len_fill, 1, 1).expand_as(img) + if mode == "nearest": + mask = mask < 0.5 + img[mask] = fill_img[mask] + else: # 'bilinear' + img = img * mask + (1.0 - mask) * fill_img + + img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype) + return img + + +def _gen_affine_grid( + theta: Tensor, + w: int, + h: int, + ow: int, + oh: int, +) -> Tensor: + # https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/ + # AffineGridGenerator.cpp#L18 + # Difference with AffineGridGenerator is that: + # 1) we normalize grid values after applying theta + # 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate + + d = 0.5 + base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device) + x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device) + base_grid[..., 0].copy_(x_grid) + y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1) + base_grid[..., 1].copy_(y_grid) + base_grid[..., 2].fill_(1) + + rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device) + output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta) + return output_grid.view(1, oh, ow, 2) + + +def affine( + img: Tensor, + matrix: List[float], + interpolation: str = "nearest", + fill: Optional[Union[int, float, List[float]]] = None, +) -> Tensor: + _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"]) + + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3) + shape = img.shape + # grid will be generated on the same device as theta and img + grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2]) + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]: + + # Inspired of PIL implementation: + # https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054 + + # pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points. + # Points are shifted due to affine matrix torch convention about + # the center point. Center is (0, 0) for image center pivot point (w * 0.5, h * 0.5) + pts = torch.tensor( + [ + [-0.5 * w, -0.5 * h, 1.0], + [-0.5 * w, 0.5 * h, 1.0], + [0.5 * w, 0.5 * h, 1.0], + [0.5 * w, -0.5 * h, 1.0], + ] + ) + theta = torch.tensor(matrix, dtype=torch.float).view(2, 3) + new_pts = torch.matmul(pts, theta.T) + min_vals, _ = new_pts.min(dim=0) + max_vals, _ = new_pts.max(dim=0) + + # shift points to [0, w] and [0, h] interval to match PIL results + min_vals += torch.tensor((w * 0.5, h * 0.5)) + max_vals += torch.tensor((w * 0.5, h * 0.5)) + + # Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0 + tol = 1e-4 + cmax = torch.ceil((max_vals / tol).trunc_() * tol) + cmin = torch.floor((min_vals / tol).trunc_() * tol) + size = cmax - cmin + return int(size[0]), int(size[1]) # w, h + + +def rotate( + img: Tensor, + matrix: List[float], + interpolation: str = "nearest", + expand: bool = False, + fill: Optional[Union[int, float, List[float]]] = None, +) -> Tensor: + _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"]) + w, h = img.shape[-1], img.shape[-2] + ow, oh = _compute_affine_output_size(matrix, w, h) if expand else (w, h) + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3) + # grid will be generated on the same device as theta and img + grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh) + + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> Tensor: + # https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/ + # src/libImaging/Geometry.c#L394 + + # + # x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1) + # y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1) + # + theta1 = torch.tensor( + [[[coeffs[0], coeffs[1], coeffs[2]], [coeffs[3], coeffs[4], coeffs[5]]]], dtype=dtype, device=device + ) + theta2 = torch.tensor([[[coeffs[6], coeffs[7], 1.0], [coeffs[6], coeffs[7], 1.0]]], dtype=dtype, device=device) + + d = 0.5 + base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device) + x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device) + base_grid[..., 0].copy_(x_grid) + y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1) + base_grid[..., 1].copy_(y_grid) + base_grid[..., 2].fill_(1) + + rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device) + output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1) + output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2)) + + output_grid = output_grid1 / output_grid2 - 1.0 + return output_grid.view(1, oh, ow, 2) + + +def perspective( + img: Tensor, + perspective_coeffs: List[float], + interpolation: str = "bilinear", + fill: Optional[Union[int, float, List[float]]] = None, +) -> Tensor: + if not (isinstance(img, torch.Tensor)): + raise TypeError("Input img should be Tensor.") + + _assert_image_tensor(img) + + _assert_grid_transform_inputs( + img, + matrix=None, + interpolation=interpolation, + fill=fill, + supported_interpolation_modes=["nearest", "bilinear"], + coeffs=perspective_coeffs, + ) + + ow, oh = img.shape[-1], img.shape[-2] + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device) + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor: + ksize_half = (kernel_size - 1) * 0.5 + + x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) + pdf = torch.exp(-0.5 * (x / sigma).pow(2)) + kernel1d = pdf / pdf.sum() + + return kernel1d + + +def _get_gaussian_kernel2d( + kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device +) -> Tensor: + kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0]).to(device, dtype=dtype) + kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1]).to(device, dtype=dtype) + kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :]) + return kernel2d + + +def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor: + if not (isinstance(img, torch.Tensor)): + raise TypeError(f"img should be Tensor. Got {type(img)}") + + _assert_image_tensor(img) + + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device) + kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1]) + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype]) + + # padding = (left, right, top, bottom) + padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2] + img = torch_pad(img, padding, mode="reflect") + img = conv2d(img, kernel, groups=img.shape[-3]) + + img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype) + return img + + +def invert(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + + _assert_channels(img, [1, 3]) + + return _max_value(img.dtype) - img + + +def posterize(img: Tensor, bits: int) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + if img.dtype != torch.uint8: + raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}") + + _assert_channels(img, [1, 3]) + mask = -int(2 ** (8 - bits)) # JIT-friendly for: ~(2 ** (8 - bits) - 1) + return img & mask + + +def solarize(img: Tensor, threshold: float) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + + _assert_channels(img, [1, 3]) + + if threshold > _max_value(img.dtype): + raise TypeError("Threshold should be less than bound of img.") + + inverted_img = invert(img) + return torch.where(img >= threshold, inverted_img, img) + + +def _blurred_degenerate_image(img: Tensor) -> Tensor: + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + + kernel = torch.ones((3, 3), dtype=dtype, device=img.device) + kernel[1, 1] = 5.0 + kernel /= kernel.sum() + kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1]) + + result_tmp, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype]) + result_tmp = conv2d(result_tmp, kernel, groups=result_tmp.shape[-3]) + result_tmp = _cast_squeeze_out(result_tmp, need_cast, need_squeeze, out_dtype) + + result = img.clone() + result[..., 1:-1, 1:-1] = result_tmp + + return result + + +def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: + if sharpness_factor < 0: + raise ValueError(f"sharpness_factor ({sharpness_factor}) is not non-negative.") + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + + if img.size(-1) <= 2 or img.size(-2) <= 2: + return img + + return _blend(img, _blurred_degenerate_image(img), sharpness_factor) + + +def autocontrast(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}") + + _assert_channels(img, [1, 3]) + + bound = _max_value(img.dtype) + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + + minimum = img.amin(dim=(-2, -1), keepdim=True).to(dtype) + maximum = img.amax(dim=(-2, -1), keepdim=True).to(dtype) + scale = bound / (maximum - minimum) + eq_idxs = torch.isfinite(scale).logical_not() + minimum[eq_idxs] = 0 + scale[eq_idxs] = 1 + + return ((img - minimum) * scale).clamp(0, bound).to(img.dtype) + + +def _scale_channel(img_chan: Tensor) -> Tensor: + # TODO: we should expect bincount to always be faster than histc, but this + # isn't always the case. Once + # https://github.com/pytorch/pytorch/issues/53194 is fixed, remove the if + # block and only use bincount. + if img_chan.is_cuda: + hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255) + else: + hist = torch.bincount(img_chan.reshape(-1), minlength=256) + + nonzero_hist = hist[hist != 0] + step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode="floor") + if step == 0: + return img_chan + + lut = torch.div(torch.cumsum(hist, 0) + torch.div(step, 2, rounding_mode="floor"), step, rounding_mode="floor") + lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255) + + return lut[img_chan.to(torch.int64)].to(torch.uint8) + + +def _equalize_single_image(img: Tensor) -> Tensor: + return torch.stack([_scale_channel(img[c]) for c in range(img.size(0))]) + + +def equalize(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if not (3 <= img.ndim <= 4): + raise TypeError(f"Input image tensor should have 3 or 4 dimensions, but found {img.ndim}") + if img.dtype != torch.uint8: + raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}") + + _assert_channels(img, [1, 3]) + + if img.ndim == 3: + return _equalize_single_image(img) + + return torch.stack([_equalize_single_image(x) for x in img]) + + +def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor: + _assert_image_tensor(tensor) + + if not tensor.is_floating_point(): + raise TypeError(f"Input tensor should be a float tensor. Got {tensor.dtype}.") + + if tensor.ndim < 3: + raise ValueError( + f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {tensor.size()}" + ) + + if not inplace: + tensor = tensor.clone() + + dtype = tensor.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device) + std = torch.as_tensor(std, dtype=dtype, device=tensor.device) + if (std == 0).any(): + raise ValueError(f"std evaluated to zero after conversion to {dtype}, leading to division by zero.") + if mean.ndim == 1: + mean = mean.view(-1, 1, 1) + if std.ndim == 1: + std = std.view(-1, 1, 1) + return tensor.sub_(mean).div_(std) + + +def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor: + _assert_image_tensor(img) + + if not inplace: + img = img.clone() + + img[..., i : i + h, j : j + w] = v + return img + + +def _create_identity_grid(size: List[int]) -> Tensor: + hw_space = [torch.linspace((-s + 1) / s, (s - 1) / s, s) for s in size] + grid_y, grid_x = torch.meshgrid(hw_space, indexing="ij") + return torch.stack([grid_x, grid_y], -1).unsqueeze(0) # 1 x H x W x 2 + + +def elastic_transform( + img: Tensor, + displacement: Tensor, + interpolation: str = "bilinear", + fill: Optional[Union[int, float, List[float]]] = None, +) -> Tensor: + + if not (isinstance(img, torch.Tensor)): + raise TypeError(f"img should be Tensor. Got {type(img)}") + + size = list(img.shape[-2:]) + displacement = displacement.to(img.device) + + identity_grid = _create_identity_grid(size) + grid = identity_grid.to(img.device) + displacement + return _apply_grid_transform(img, grid, interpolation, fill) diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py new file mode 100644 index 0000000000000000000000000000000000000000..91df7d42cd71fc554aba51fcf5e90db30e3c3851 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py @@ -0,0 +1,114 @@ +import warnings + +import torch + + +warnings.warn( + "The 'torchvision.transforms._functional_video' module is deprecated since 0.12 and will be removed in the future. " + "Please use the 'torchvision.transforms.functional' module instead." +) + + +def _is_tensor_video_clip(clip): + if not torch.is_tensor(clip): + raise TypeError("clip should be Tensor. Got %s" % type(clip)) + + if not clip.ndimension() == 4: + raise ValueError("clip should be 4D. Got %dD" % clip.dim()) + + return True + + +def crop(clip, i, j, h, w): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + """ + if len(clip.size()) != 4: + raise ValueError("clip should be a 4D tensor") + return clip[..., i : i + h, j : j + w] + + +def resize(clip, target_size, interpolation_mode): + if len(target_size) != 2: + raise ValueError(f"target size should be tuple (height, width), instead got {target_size}") + return torch.nn.functional.interpolate(clip, size=target_size, mode=interpolation_mode, align_corners=False) + + +def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): + """ + Do spatial cropping and resizing to the video clip + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the cropped region. + w (int): Width of the cropped region. + size (tuple(int, int)): height and width of resized clip + Returns: + clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + clip = crop(clip, i, j, h, w) + clip = resize(clip, size, interpolation_mode) + return clip + + +def center_crop(clip, crop_size): + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + h, w = clip.size(-2), clip.size(-1) + th, tw = crop_size + if h < th or w < tw: + raise ValueError("height and width must be no smaller than crop_size") + + i = int(round((h - th) / 2.0)) + j = int(round((w - tw) / 2.0)) + return crop(clip, i, j, th, tw) + + +def to_tensor(clip): + """ + Convert tensor data type from uint8 to float, divide value by 255.0 and + permute the dimensions of clip tensor + Args: + clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) + Return: + clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) + """ + _is_tensor_video_clip(clip) + if not clip.dtype == torch.uint8: + raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype)) + return clip.float().permute(3, 0, 1, 2) / 255.0 + + +def normalize(clip, mean, std, inplace=False): + """ + Args: + clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) + mean (tuple): pixel RGB mean. Size is (3) + std (tuple): pixel standard deviation. Size is (3) + Returns: + normalized clip (torch.tensor): Size is (C, T, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + if not inplace: + clip = clip.clone() + mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) + std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + return clip + + +def hflip(clip): + """ + Args: + clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) + Returns: + flipped clip (torch.tensor): Size is (C, T, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + return clip.flip(-1) diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/_transforms_video.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/_transforms_video.py new file mode 100644 index 0000000000000000000000000000000000000000..a04da4f74849805641e4c470f6b6b8d5f7000e3a --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/_transforms_video.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 + +import numbers +import random +import warnings + +from torchvision.transforms import RandomCrop, RandomResizedCrop + +from . import _functional_video as F + + +__all__ = [ + "RandomCropVideo", + "RandomResizedCropVideo", + "CenterCropVideo", + "NormalizeVideo", + "ToTensorVideo", + "RandomHorizontalFlipVideo", +] + + +warnings.warn( + "The 'torchvision.transforms._transforms_video' module is deprecated since 0.12 and will be removed in the future. " + "Please use the 'torchvision.transforms' module instead." +) + + +class RandomCropVideo(RandomCrop): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: randomly cropped/resized video clip. + size is (C, T, OH, OW) + """ + i, j, h, w = self.get_params(clip, self.size) + return F.crop(clip, i, j, h, w) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size})" + + +class RandomResizedCropVideo(RandomResizedCrop): + def __init__( + self, + size, + scale=(0.08, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), + interpolation_mode="bilinear", + ): + if isinstance(size, tuple): + if len(size) != 2: + raise ValueError(f"size should be tuple (height, width), instead got {size}") + self.size = size + else: + self.size = (size, size) + + self.interpolation_mode = interpolation_mode + self.scale = scale + self.ratio = ratio + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: randomly cropped/resized video clip. + size is (C, T, H, W) + """ + i, j, h, w = self.get_params(clip, self.scale, self.ratio) + return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})" + + +class CenterCropVideo: + def __init__(self, crop_size): + if isinstance(crop_size, numbers.Number): + self.crop_size = (int(crop_size), int(crop_size)) + else: + self.crop_size = crop_size + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: central cropping of video clip. Size is + (C, T, crop_size, crop_size) + """ + return F.center_crop(clip, self.crop_size) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(crop_size={self.crop_size})" + + +class NormalizeVideo: + """ + Normalize the video clip by mean subtraction and division by standard deviation + Args: + mean (3-tuple): pixel RGB mean + std (3-tuple): pixel RGB standard deviation + inplace (boolean): whether do in-place normalization + """ + + def __init__(self, mean, std, inplace=False): + self.mean = mean + self.std = std + self.inplace = inplace + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W) + """ + return F.normalize(clip, self.mean, self.std, self.inplace) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})" + + +class ToTensorVideo: + """ + Convert tensor data type from uint8 to float, divide value by 255.0 and + permute the dimensions of clip tensor + """ + + def __init__(self): + pass + + def __call__(self, clip): + """ + Args: + clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) + Return: + clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) + """ + return F.to_tensor(clip) + + def __repr__(self) -> str: + return self.__class__.__name__ + + +class RandomHorizontalFlipVideo: + """ + Flip the video clip along the horizontal direction with a given probability + Args: + p (float): probability of the clip being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + self.p = p + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Size is (C, T, H, W) + Return: + clip (torch.tensor): Size is (C, T, H, W) + """ + if random.random() < self.p: + clip = F.hflip(clip) + return clip + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/autoaugment.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/autoaugment.py new file mode 100644 index 0000000000000000000000000000000000000000..9dbbe91e741093b01ff8491ba8b39d9b6f578103 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/autoaugment.py @@ -0,0 +1,615 @@ +import math +from enum import Enum +from typing import Dict, List, Optional, Tuple + +import torch +from torch import Tensor + +from . import functional as F, InterpolationMode + +__all__ = ["AutoAugmentPolicy", "AutoAugment", "RandAugment", "TrivialAugmentWide", "AugMix"] + + +def _apply_op( + img: Tensor, op_name: str, magnitude: float, interpolation: InterpolationMode, fill: Optional[List[float]] +): + if op_name == "ShearX": + # magnitude should be arctan(magnitude) + # official autoaug: (1, level, 0, 0, 1, 0) + # https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290 + # compared to + # torchvision: (1, tan(level), 0, 0, 1, 0) + # https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976 + img = F.affine( + img, + angle=0.0, + translate=[0, 0], + scale=1.0, + shear=[math.degrees(math.atan(magnitude)), 0.0], + interpolation=interpolation, + fill=fill, + center=[0, 0], + ) + elif op_name == "ShearY": + # magnitude should be arctan(magnitude) + # See above + img = F.affine( + img, + angle=0.0, + translate=[0, 0], + scale=1.0, + shear=[0.0, math.degrees(math.atan(magnitude))], + interpolation=interpolation, + fill=fill, + center=[0, 0], + ) + elif op_name == "TranslateX": + img = F.affine( + img, + angle=0.0, + translate=[int(magnitude), 0], + scale=1.0, + interpolation=interpolation, + shear=[0.0, 0.0], + fill=fill, + ) + elif op_name == "TranslateY": + img = F.affine( + img, + angle=0.0, + translate=[0, int(magnitude)], + scale=1.0, + interpolation=interpolation, + shear=[0.0, 0.0], + fill=fill, + ) + elif op_name == "Rotate": + img = F.rotate(img, magnitude, interpolation=interpolation, fill=fill) + elif op_name == "Brightness": + img = F.adjust_brightness(img, 1.0 + magnitude) + elif op_name == "Color": + img = F.adjust_saturation(img, 1.0 + magnitude) + elif op_name == "Contrast": + img = F.adjust_contrast(img, 1.0 + magnitude) + elif op_name == "Sharpness": + img = F.adjust_sharpness(img, 1.0 + magnitude) + elif op_name == "Posterize": + img = F.posterize(img, int(magnitude)) + elif op_name == "Solarize": + img = F.solarize(img, magnitude) + elif op_name == "AutoContrast": + img = F.autocontrast(img) + elif op_name == "Equalize": + img = F.equalize(img) + elif op_name == "Invert": + img = F.invert(img) + elif op_name == "Identity": + pass + else: + raise ValueError(f"The provided operator {op_name} is not recognized.") + return img + + +class AutoAugmentPolicy(Enum): + """AutoAugment policies learned on different datasets. + Available policies are IMAGENET, CIFAR10 and SVHN. + """ + + IMAGENET = "imagenet" + CIFAR10 = "cifar10" + SVHN = "svhn" + + +# FIXME: Eliminate copy-pasted code for fill standardization and _augmentation_space() by moving stuff on a base class +class AutoAugment(torch.nn.Module): + r"""AutoAugment data augmentation method based on + `"AutoAugment: Learning Augmentation Strategies from Data" `_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + policy (AutoAugmentPolicy): Desired policy enum defined by + :class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__( + self, + policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET, + interpolation: InterpolationMode = InterpolationMode.NEAREST, + fill: Optional[List[float]] = None, + ) -> None: + super().__init__() + self.policy = policy + self.interpolation = interpolation + self.fill = fill + self.policies = self._get_policies(policy) + + def _get_policies( + self, policy: AutoAugmentPolicy + ) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]: + if policy == AutoAugmentPolicy.IMAGENET: + return [ + (("Posterize", 0.4, 8), ("Rotate", 0.6, 9)), + (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)), + (("Equalize", 0.8, None), ("Equalize", 0.6, None)), + (("Posterize", 0.6, 7), ("Posterize", 0.6, 6)), + (("Equalize", 0.4, None), ("Solarize", 0.2, 4)), + (("Equalize", 0.4, None), ("Rotate", 0.8, 8)), + (("Solarize", 0.6, 3), ("Equalize", 0.6, None)), + (("Posterize", 0.8, 5), ("Equalize", 1.0, None)), + (("Rotate", 0.2, 3), ("Solarize", 0.6, 8)), + (("Equalize", 0.6, None), ("Posterize", 0.4, 6)), + (("Rotate", 0.8, 8), ("Color", 0.4, 0)), + (("Rotate", 0.4, 9), ("Equalize", 0.6, None)), + (("Equalize", 0.0, None), ("Equalize", 0.8, None)), + (("Invert", 0.6, None), ("Equalize", 1.0, None)), + (("Color", 0.6, 4), ("Contrast", 1.0, 8)), + (("Rotate", 0.8, 8), ("Color", 1.0, 2)), + (("Color", 0.8, 8), ("Solarize", 0.8, 7)), + (("Sharpness", 0.4, 7), ("Invert", 0.6, None)), + (("ShearX", 0.6, 5), ("Equalize", 1.0, None)), + (("Color", 0.4, 0), ("Equalize", 0.6, None)), + (("Equalize", 0.4, None), ("Solarize", 0.2, 4)), + (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)), + (("Invert", 0.6, None), ("Equalize", 1.0, None)), + (("Color", 0.6, 4), ("Contrast", 1.0, 8)), + (("Equalize", 0.8, None), ("Equalize", 0.6, None)), + ] + elif policy == AutoAugmentPolicy.CIFAR10: + return [ + (("Invert", 0.1, None), ("Contrast", 0.2, 6)), + (("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)), + (("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)), + (("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)), + (("AutoContrast", 0.5, None), ("Equalize", 0.9, None)), + (("ShearY", 0.2, 7), ("Posterize", 0.3, 7)), + (("Color", 0.4, 3), ("Brightness", 0.6, 7)), + (("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)), + (("Equalize", 0.6, None), ("Equalize", 0.5, None)), + (("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)), + (("Color", 0.7, 7), ("TranslateX", 0.5, 8)), + (("Equalize", 0.3, None), ("AutoContrast", 0.4, None)), + (("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)), + (("Brightness", 0.9, 6), ("Color", 0.2, 8)), + (("Solarize", 0.5, 2), ("Invert", 0.0, None)), + (("Equalize", 0.2, None), ("AutoContrast", 0.6, None)), + (("Equalize", 0.2, None), ("Equalize", 0.6, None)), + (("Color", 0.9, 9), ("Equalize", 0.6, None)), + (("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)), + (("Brightness", 0.1, 3), ("Color", 0.7, 0)), + (("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)), + (("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)), + (("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)), + (("Equalize", 0.8, None), ("Invert", 0.1, None)), + (("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)), + ] + elif policy == AutoAugmentPolicy.SVHN: + return [ + (("ShearX", 0.9, 4), ("Invert", 0.2, None)), + (("ShearY", 0.9, 8), ("Invert", 0.7, None)), + (("Equalize", 0.6, None), ("Solarize", 0.6, 6)), + (("Invert", 0.9, None), ("Equalize", 0.6, None)), + (("Equalize", 0.6, None), ("Rotate", 0.9, 3)), + (("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)), + (("ShearY", 0.9, 8), ("Invert", 0.4, None)), + (("ShearY", 0.9, 5), ("Solarize", 0.2, 6)), + (("Invert", 0.9, None), ("AutoContrast", 0.8, None)), + (("Equalize", 0.6, None), ("Rotate", 0.9, 3)), + (("ShearX", 0.9, 4), ("Solarize", 0.3, 3)), + (("ShearY", 0.8, 8), ("Invert", 0.7, None)), + (("Equalize", 0.9, None), ("TranslateY", 0.6, 6)), + (("Invert", 0.9, None), ("Equalize", 0.6, None)), + (("Contrast", 0.3, 3), ("Rotate", 0.8, 4)), + (("Invert", 0.8, None), ("TranslateY", 0.0, 2)), + (("ShearY", 0.7, 6), ("Solarize", 0.4, 8)), + (("Invert", 0.6, None), ("Rotate", 0.8, 4)), + (("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)), + (("ShearX", 0.1, 6), ("Invert", 0.6, None)), + (("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)), + (("ShearY", 0.8, 4), ("Invert", 0.8, None)), + (("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)), + (("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)), + (("ShearX", 0.7, 2), ("Invert", 0.1, None)), + ] + else: + raise ValueError(f"The provided policy {policy} is not recognized.") + + def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]: + return { + # op_name: (magnitudes, signed) + "ShearX": (torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": (torch.linspace(0.0, 150.0 / 331.0 * image_size[1], num_bins), True), + "TranslateY": (torch.linspace(0.0, 150.0 / 331.0 * image_size[0], num_bins), True), + "Rotate": (torch.linspace(0.0, 30.0, num_bins), True), + "Brightness": (torch.linspace(0.0, 0.9, num_bins), True), + "Color": (torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (torch.linspace(0.0, 0.9, num_bins), True), + "Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False), + "Solarize": (torch.linspace(255.0, 0.0, num_bins), False), + "AutoContrast": (torch.tensor(0.0), False), + "Equalize": (torch.tensor(0.0), False), + "Invert": (torch.tensor(0.0), False), + } + + @staticmethod + def get_params(transform_num: int) -> Tuple[int, Tensor, Tensor]: + """Get parameters for autoaugment transformation + + Returns: + params required by the autoaugment transformation + """ + policy_id = int(torch.randint(transform_num, (1,)).item()) + probs = torch.rand((2,)) + signs = torch.randint(2, (2,)) + + return policy_id, probs, signs + + def forward(self, img: Tensor) -> Tensor: + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: AutoAugmented image. + """ + fill = self.fill + channels, height, width = F.get_dimensions(img) + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * channels + elif fill is not None: + fill = [float(f) for f in fill] + + transform_id, probs, signs = self.get_params(len(self.policies)) + + op_meta = self._augmentation_space(10, (height, width)) + for i, (op_name, p, magnitude_id) in enumerate(self.policies[transform_id]): + if probs[i] <= p: + magnitudes, signed = op_meta[op_name] + magnitude = float(magnitudes[magnitude_id].item()) if magnitude_id is not None else 0.0 + if signed and signs[i] == 0: + magnitude *= -1.0 + img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) + + return img + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(policy={self.policy}, fill={self.fill})" + + +class RandAugment(torch.nn.Module): + r"""RandAugment data augmentation method based on + `"RandAugment: Practical automated data augmentation with a reduced search space" + `_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + num_ops (int): Number of augmentation transformations to apply sequentially. + magnitude (int): Magnitude for all the transformations. + num_magnitude_bins (int): The number of different magnitude values. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__( + self, + num_ops: int = 2, + magnitude: int = 9, + num_magnitude_bins: int = 31, + interpolation: InterpolationMode = InterpolationMode.NEAREST, + fill: Optional[List[float]] = None, + ) -> None: + super().__init__() + self.num_ops = num_ops + self.magnitude = magnitude + self.num_magnitude_bins = num_magnitude_bins + self.interpolation = interpolation + self.fill = fill + + def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]: + return { + # op_name: (magnitudes, signed) + "Identity": (torch.tensor(0.0), False), + "ShearX": (torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": (torch.linspace(0.0, 150.0 / 331.0 * image_size[1], num_bins), True), + "TranslateY": (torch.linspace(0.0, 150.0 / 331.0 * image_size[0], num_bins), True), + "Rotate": (torch.linspace(0.0, 30.0, num_bins), True), + "Brightness": (torch.linspace(0.0, 0.9, num_bins), True), + "Color": (torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (torch.linspace(0.0, 0.9, num_bins), True), + "Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False), + "Solarize": (torch.linspace(255.0, 0.0, num_bins), False), + "AutoContrast": (torch.tensor(0.0), False), + "Equalize": (torch.tensor(0.0), False), + } + + def forward(self, img: Tensor) -> Tensor: + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: Transformed image. + """ + fill = self.fill + channels, height, width = F.get_dimensions(img) + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * channels + elif fill is not None: + fill = [float(f) for f in fill] + + op_meta = self._augmentation_space(self.num_magnitude_bins, (height, width)) + for _ in range(self.num_ops): + op_index = int(torch.randint(len(op_meta), (1,)).item()) + op_name = list(op_meta.keys())[op_index] + magnitudes, signed = op_meta[op_name] + magnitude = float(magnitudes[self.magnitude].item()) if magnitudes.ndim > 0 else 0.0 + if signed and torch.randint(2, (1,)): + magnitude *= -1.0 + img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) + + return img + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"num_ops={self.num_ops}" + f", magnitude={self.magnitude}" + f", num_magnitude_bins={self.num_magnitude_bins}" + f", interpolation={self.interpolation}" + f", fill={self.fill}" + f")" + ) + return s + + +class TrivialAugmentWide(torch.nn.Module): + r"""Dataset-independent data-augmentation with TrivialAugment Wide, as described in + `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" `_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + num_magnitude_bins (int): The number of different magnitude values. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__( + self, + num_magnitude_bins: int = 31, + interpolation: InterpolationMode = InterpolationMode.NEAREST, + fill: Optional[List[float]] = None, + ) -> None: + super().__init__() + self.num_magnitude_bins = num_magnitude_bins + self.interpolation = interpolation + self.fill = fill + + def _augmentation_space(self, num_bins: int) -> Dict[str, Tuple[Tensor, bool]]: + return { + # op_name: (magnitudes, signed) + "Identity": (torch.tensor(0.0), False), + "ShearX": (torch.linspace(0.0, 0.99, num_bins), True), + "ShearY": (torch.linspace(0.0, 0.99, num_bins), True), + "TranslateX": (torch.linspace(0.0, 32.0, num_bins), True), + "TranslateY": (torch.linspace(0.0, 32.0, num_bins), True), + "Rotate": (torch.linspace(0.0, 135.0, num_bins), True), + "Brightness": (torch.linspace(0.0, 0.99, num_bins), True), + "Color": (torch.linspace(0.0, 0.99, num_bins), True), + "Contrast": (torch.linspace(0.0, 0.99, num_bins), True), + "Sharpness": (torch.linspace(0.0, 0.99, num_bins), True), + "Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6)).round().int(), False), + "Solarize": (torch.linspace(255.0, 0.0, num_bins), False), + "AutoContrast": (torch.tensor(0.0), False), + "Equalize": (torch.tensor(0.0), False), + } + + def forward(self, img: Tensor) -> Tensor: + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: Transformed image. + """ + fill = self.fill + channels, height, width = F.get_dimensions(img) + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * channels + elif fill is not None: + fill = [float(f) for f in fill] + + op_meta = self._augmentation_space(self.num_magnitude_bins) + op_index = int(torch.randint(len(op_meta), (1,)).item()) + op_name = list(op_meta.keys())[op_index] + magnitudes, signed = op_meta[op_name] + magnitude = ( + float(magnitudes[torch.randint(len(magnitudes), (1,), dtype=torch.long)].item()) + if magnitudes.ndim > 0 + else 0.0 + ) + if signed and torch.randint(2, (1,)): + magnitude *= -1.0 + + return _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"num_magnitude_bins={self.num_magnitude_bins}" + f", interpolation={self.interpolation}" + f", fill={self.fill}" + f")" + ) + return s + + +class AugMix(torch.nn.Module): + r"""AugMix data augmentation method based on + `"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" `_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + severity (int): The severity of base augmentation operators. Default is ``3``. + mixture_width (int): The number of augmentation chains. Default is ``3``. + chain_depth (int): The depth of augmentation chains. A negative value denotes stochastic depth sampled from the interval [1, 3]. + Default is ``-1``. + alpha (float): The hyperparameter for the probability distributions. Default is ``1.0``. + all_ops (bool): Use all operations (including brightness, contrast, color and sharpness). Default is ``True``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__( + self, + severity: int = 3, + mixture_width: int = 3, + chain_depth: int = -1, + alpha: float = 1.0, + all_ops: bool = True, + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + fill: Optional[List[float]] = None, + ) -> None: + super().__init__() + self._PARAMETER_MAX = 10 + if not (1 <= severity <= self._PARAMETER_MAX): + raise ValueError(f"The severity must be between [1, {self._PARAMETER_MAX}]. Got {severity} instead.") + self.severity = severity + self.mixture_width = mixture_width + self.chain_depth = chain_depth + self.alpha = alpha + self.all_ops = all_ops + self.interpolation = interpolation + self.fill = fill + + def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]: + s = { + # op_name: (magnitudes, signed) + "ShearX": (torch.linspace(0.0, 0.3, num_bins), True), + "ShearY": (torch.linspace(0.0, 0.3, num_bins), True), + "TranslateX": (torch.linspace(0.0, image_size[1] / 3.0, num_bins), True), + "TranslateY": (torch.linspace(0.0, image_size[0] / 3.0, num_bins), True), + "Rotate": (torch.linspace(0.0, 30.0, num_bins), True), + "Posterize": (4 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False), + "Solarize": (torch.linspace(255.0, 0.0, num_bins), False), + "AutoContrast": (torch.tensor(0.0), False), + "Equalize": (torch.tensor(0.0), False), + } + if self.all_ops: + s.update( + { + "Brightness": (torch.linspace(0.0, 0.9, num_bins), True), + "Color": (torch.linspace(0.0, 0.9, num_bins), True), + "Contrast": (torch.linspace(0.0, 0.9, num_bins), True), + "Sharpness": (torch.linspace(0.0, 0.9, num_bins), True), + } + ) + return s + + @torch.jit.unused + def _pil_to_tensor(self, img) -> Tensor: + return F.pil_to_tensor(img) + + @torch.jit.unused + def _tensor_to_pil(self, img: Tensor): + return F.to_pil_image(img) + + def _sample_dirichlet(self, params: Tensor) -> Tensor: + # Must be on a separate method so that we can overwrite it in tests. + return torch._sample_dirichlet(params) + + def forward(self, orig_img: Tensor) -> Tensor: + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: Transformed image. + """ + fill = self.fill + channels, height, width = F.get_dimensions(orig_img) + if isinstance(orig_img, Tensor): + img = orig_img + if isinstance(fill, (int, float)): + fill = [float(fill)] * channels + elif fill is not None: + fill = [float(f) for f in fill] + else: + img = self._pil_to_tensor(orig_img) + + op_meta = self._augmentation_space(self._PARAMETER_MAX, (height, width)) + + orig_dims = list(img.shape) + batch = img.view([1] * max(4 - img.ndim, 0) + orig_dims) + batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1) + + # Sample the beta weights for combining the original and augmented image. To get Beta, we use a Dirichlet + # with 2 parameters. The 1st column stores the weights of the original and the 2nd the ones of augmented image. + m = self._sample_dirichlet( + torch.tensor([self.alpha, self.alpha], device=batch.device).expand(batch_dims[0], -1) + ) + + # Sample the mixing weights and combine them with the ones sampled from Beta for the augmented images. + combined_weights = self._sample_dirichlet( + torch.tensor([self.alpha] * self.mixture_width, device=batch.device).expand(batch_dims[0], -1) + ) * m[:, 1].view([batch_dims[0], -1]) + + mix = m[:, 0].view(batch_dims) * batch + for i in range(self.mixture_width): + aug = batch + depth = self.chain_depth if self.chain_depth > 0 else int(torch.randint(low=1, high=4, size=(1,)).item()) + for _ in range(depth): + op_index = int(torch.randint(len(op_meta), (1,)).item()) + op_name = list(op_meta.keys())[op_index] + magnitudes, signed = op_meta[op_name] + magnitude = ( + float(magnitudes[torch.randint(self.severity, (1,), dtype=torch.long)].item()) + if magnitudes.ndim > 0 + else 0.0 + ) + if signed and torch.randint(2, (1,)): + magnitude *= -1.0 + aug = _apply_op(aug, op_name, magnitude, interpolation=self.interpolation, fill=fill) + mix.add_(combined_weights[:, i].view(batch_dims) * aug) + mix = mix.view(orig_dims).to(dtype=img.dtype) + + if not isinstance(orig_img, Tensor): + return self._tensor_to_pil(mix) + return mix + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"severity={self.severity}" + f", mixture_width={self.mixture_width}" + f", chain_depth={self.chain_depth}" + f", alpha={self.alpha}" + f", all_ops={self.all_ops}" + f", interpolation={self.interpolation}" + f", fill={self.fill}" + f")" + ) + return s diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/functional_pil.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/functional_pil.py new file mode 100644 index 0000000000000000000000000000000000000000..bfcbf1a54424dad2b053febcc825e93aba9ce812 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/functional_pil.py @@ -0,0 +1,11 @@ +import warnings + +from torchvision.transforms._functional_pil import * # noqa + +warnings.warn( + "The torchvision.transforms.functional_pil module is deprecated " + "in 0.15 and will be **removed in 0.17**. Please don't rely on it. " + "You probably just need to use APIs in " + "torchvision.transforms.functional or in " + "torchvision.transforms.v2.functional." +) diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_geometry.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_geometry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dab14f7a0481038b527eed122f133bf71c8bc6e8 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_geometry.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_meta.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_meta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..970be96295453f0a60adb7bd8e0f09f6d56f3cd9 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/__pycache__/_meta.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/_geometry.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/_geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..59791c30b9da4c2fdae78d13fe0b560d82ed11c7 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/_geometry.py @@ -0,0 +1,1433 @@ +import math +import numbers +import warnings +from typing import Any, cast, Dict, List, Literal, Optional, Sequence, Tuple, Type, Union + +import PIL.Image +import torch + +from torchvision import datapoints, transforms as _transforms +from torchvision.ops.boxes import box_iou +from torchvision.transforms.functional import _get_perspective_coeffs +from torchvision.transforms.v2 import functional as F, InterpolationMode, Transform +from torchvision.transforms.v2.functional._geometry import _check_interpolation + +from ._transform import _RandomApplyTransform +from ._utils import ( + _check_padding_arg, + _check_padding_mode_arg, + _check_sequence_input, + _setup_angle, + _setup_fill_arg, + _setup_float_or_seq, + _setup_size, +) +from .utils import has_all, has_any, is_simple_tensor, query_bounding_box, query_spatial_size + + +class RandomHorizontalFlip(_RandomApplyTransform): + """[BETA] Horizontally flip the input with a given probability. + + .. v2betastatus:: RandomHorizontalFlip transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + p (float, optional): probability of the input being flipped. Default value is 0.5 + """ + + _v1_transform_cls = _transforms.RandomHorizontalFlip + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + return F.horizontal_flip(inpt) + + +class RandomVerticalFlip(_RandomApplyTransform): + """[BETA] Vertically flip the input with a given probability. + + .. v2betastatus:: RandomVerticalFlip transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + p (float, optional): probability of the input being flipped. Default value is 0.5 + """ + + _v1_transform_cls = _transforms.RandomVerticalFlip + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + return F.vertical_flip(inpt) + + +class Resize(Transform): + """[BETA] Resize the input to the given size. + + .. v2betastatus:: Resize transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + .. warning:: + The output image might be different depending on its type: when downsampling, the interpolation of PIL images + and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences + in the performance of a network. Therefore, it is preferable to train and serve a model with the same input + types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors + closer. + + Args: + size (sequence or int): Desired output size. If size is a sequence like + (h, w), output size will be matched to this. If size is an int, + smaller edge of the image will be matched to this number. + i.e, if height > width, then image will be rescaled to + (size * height / width, size). + + .. note:: + In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``, + ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + max_size (int, optional): The maximum allowed for the longer edge of + the resized image: if the longer edge of the image is greater + than ``max_size`` after being resized according to ``size``, then + the image is resized again so that the longer edge is equal to + ``max_size``. As a result, ``size`` might be overruled, i.e. the + smaller edge may be shorter than ``size``. This is only supported + if ``size`` is an int (or a sequence of length 1 in torchscript + mode). + antialias (bool, optional): Whether to apply antialiasing. + It only affects **tensors** with bilinear or bicubic modes and it is + ignored otherwise: on PIL images, antialiasing is always applied on + bilinear or bicubic modes; on other modes (for PIL images and + tensors), antialiasing makes no sense and this parameter is ignored. + Possible values are: + + - ``True``: will apply antialiasing for bilinear or bicubic modes. + Other mode aren't affected. This is probably what you want to use. + - ``False``: will not apply antialiasing for tensors on any mode. PIL + images are still antialiased on bilinear or bicubic modes, because + PIL doesn't support no antialias. + - ``None``: equivalent to ``False`` for tensors and ``True`` for + PIL images. This value exists for legacy reasons and you probably + don't want to use it unless you really know what you are doing. + + The current default is ``None`` **but will change to** ``True`` **in + v0.17** for the PIL and Tensor backends to be consistent. + """ + + _v1_transform_cls = _transforms.Resize + + def __init__( + self, + size: Union[int, Sequence[int]], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[Union[str, bool]] = "warn", + ) -> None: + super().__init__() + + if isinstance(size, int): + size = [size] + elif isinstance(size, (list, tuple)) and len(size) in {1, 2}: + size = list(size) + else: + raise ValueError( + f"size can either be an integer or a list or tuple of one or two integers, " f"but got {size} instead." + ) + self.size = size + + self.interpolation = _check_interpolation(interpolation) + self.max_size = max_size + self.antialias = antialias + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + return F.resize( + inpt, + self.size, + interpolation=self.interpolation, + max_size=self.max_size, + antialias=self.antialias, + ) + + +class CenterCrop(Transform): + """[BETA] Crop the input at the center. + + .. v2betastatus:: CenterCrop transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + If image size is smaller than output size along any edge, image is padded with 0 and then center cropped. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + """ + + _v1_transform_cls = _transforms.CenterCrop + + def __init__(self, size: Union[int, Sequence[int]]): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + return F.center_crop(inpt, output_size=self.size) + + +class RandomResizedCrop(Transform): + """[BETA] Crop a random portion of the input and resize it to a given size. + + .. v2betastatus:: RandomResizedCrop transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + A crop of the original input is made: the crop has a random area (H * W) + and a random aspect ratio. This crop is finally resized to the given + size. This is popularly used to train the Inception networks. + + Args: + size (int or sequence): expected output size of the crop, for each edge. If size is an + int instead of sequence like (h, w), a square output size ``(size, size)`` is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + + .. note:: + In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``. + scale (tuple of float, optional): Specifies the lower and upper bounds for the random area of the crop, + before resizing. The scale is defined with respect to the area of the original image. + ratio (tuple of float, optional): lower and upper bounds for the random aspect ratio of the crop, before + resizing. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``, + ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + antialias (bool, optional): Whether to apply antialiasing. + It only affects **tensors** with bilinear or bicubic modes and it is + ignored otherwise: on PIL images, antialiasing is always applied on + bilinear or bicubic modes; on other modes (for PIL images and + tensors), antialiasing makes no sense and this parameter is ignored. + Possible values are: + + - ``True``: will apply antialiasing for bilinear or bicubic modes. + Other mode aren't affected. This is probably what you want to use. + - ``False``: will not apply antialiasing for tensors on any mode. PIL + images are still antialiased on bilinear or bicubic modes, because + PIL doesn't support no antialias. + - ``None``: equivalent to ``False`` for tensors and ``True`` for + PIL images. This value exists for legacy reasons and you probably + don't want to use it unless you really know what you are doing. + + The current default is ``None`` **but will change to** ``True`` **in + v0.17** for the PIL and Tensor backends to be consistent. + """ + + _v1_transform_cls = _transforms.RandomResizedCrop + + def __init__( + self, + size: Union[int, Sequence[int]], + scale: Tuple[float, float] = (0.08, 1.0), + ratio: Tuple[float, float] = (3.0 / 4.0, 4.0 / 3.0), + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + antialias: Optional[Union[str, bool]] = "warn", + ) -> None: + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + + if not isinstance(scale, Sequence): + raise TypeError("Scale should be a sequence") + scale = cast(Tuple[float, float], scale) + if not isinstance(ratio, Sequence): + raise TypeError("Ratio should be a sequence") + ratio = cast(Tuple[float, float], ratio) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("Scale and ratio should be of kind (min, max)") + + self.scale = scale + self.ratio = ratio + self.interpolation = _check_interpolation(interpolation) + self.antialias = antialias + + self._log_ratio = torch.log(torch.tensor(self.ratio)) + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + height, width = query_spatial_size(flat_inputs) + area = height * width + + log_ratio = self._log_ratio + for _ in range(10): + target_area = area * torch.empty(1).uniform_(self.scale[0], self.scale[1]).item() + aspect_ratio = torch.exp( + torch.empty(1).uniform_( + log_ratio[0], # type: ignore[arg-type] + log_ratio[1], # type: ignore[arg-type] + ) + ).item() + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < w <= width and 0 < h <= height: + i = torch.randint(0, height - h + 1, size=(1,)).item() + j = torch.randint(0, width - w + 1, size=(1,)).item() + break + else: + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(self.ratio): + w = width + h = int(round(w / min(self.ratio))) + elif in_ratio > max(self.ratio): + h = height + w = int(round(h * max(self.ratio))) + else: # whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + + return dict(top=i, left=j, height=h, width=w) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + return F.resized_crop( + inpt, **params, size=self.size, interpolation=self.interpolation, antialias=self.antialias + ) + + +ImageOrVideoTypeJIT = Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT] + + +class FiveCrop(Transform): + """[BETA] Crop the image or video into four corners and the central crop. + + .. v2betastatus:: FiveCrop transform + + If the input is a :class:`torch.Tensor` or a :class:`~torchvision.datapoints.Image` or a + :class:`~torchvision.datapoints.Video` it can have arbitrary number of leading batch dimensions. + For example, the image can have ``[..., C, H, W]`` shape. + + .. Note:: + This transform returns a tuple of images and there may be a mismatch in the number of + inputs and targets your Dataset returns. See below for an example of how to deal with + this. + + Args: + size (sequence or int): Desired output size of the crop. If size is an ``int`` + instead of sequence like (h, w), a square crop of size (size, size) is made. + If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + + Example: + >>> class BatchMultiCrop(transforms.Transform): + ... def forward(self, sample: Tuple[Tuple[Union[datapoints.Image, datapoints.Video], ...], int]): + ... images_or_videos, labels = sample + ... batch_size = len(images_or_videos) + ... image_or_video = images_or_videos[0] + ... images_or_videos = image_or_video.wrap_like(image_or_video, torch.stack(images_or_videos)) + ... labels = torch.full((batch_size,), label, device=images_or_videos.device) + ... return images_or_videos, labels + ... + >>> image = datapoints.Image(torch.rand(3, 256, 256)) + >>> label = 3 + >>> transform = transforms.Compose([transforms.FiveCrop(224), BatchMultiCrop()]) + >>> images, labels = transform(image, label) + >>> images.shape + torch.Size([5, 3, 224, 224]) + >>> labels + tensor([3, 3, 3, 3, 3]) + """ + + _v1_transform_cls = _transforms.FiveCrop + + _transformed_types = ( + datapoints.Image, + PIL.Image.Image, + is_simple_tensor, + datapoints.Video, + ) + + def __init__(self, size: Union[int, Sequence[int]]) -> None: + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + + def _transform( + self, inpt: ImageOrVideoTypeJIT, params: Dict[str, Any] + ) -> Tuple[ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT]: + return F.five_crop(inpt, self.size) + + def _check_inputs(self, flat_inputs: List[Any]) -> None: + if has_any(flat_inputs, datapoints.BoundingBox, datapoints.Mask): + raise TypeError(f"BoundingBox'es and Mask's are not supported by {type(self).__name__}()") + + +class TenCrop(Transform): + """[BETA] Crop the image or video into four corners and the central crop plus the flipped version of + these (horizontal flipping is used by default). + + .. v2betastatus:: TenCrop transform + + If the input is a :class:`torch.Tensor` or a :class:`~torchvision.datapoints.Image` or a + :class:`~torchvision.datapoints.Video` it can have arbitrary number of leading batch dimensions. + For example, the image can have ``[..., C, H, W]`` shape. + + See :class:`~torchvision.transforms.v2.FiveCrop` for an example. + + .. Note:: + This transform returns a tuple of images and there may be a mismatch in the number of + inputs and targets your Dataset returns. See below for an example of how to deal with + this. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + vertical_flip (bool, optional): Use vertical flipping instead of horizontal + """ + + _v1_transform_cls = _transforms.TenCrop + + _transformed_types = ( + datapoints.Image, + PIL.Image.Image, + is_simple_tensor, + datapoints.Video, + ) + + def __init__(self, size: Union[int, Sequence[int]], vertical_flip: bool = False) -> None: + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + self.vertical_flip = vertical_flip + + def _check_inputs(self, flat_inputs: List[Any]) -> None: + if has_any(flat_inputs, datapoints.BoundingBox, datapoints.Mask): + raise TypeError(f"BoundingBox'es and Mask's are not supported by {type(self).__name__}()") + + def _transform( + self, inpt: Union[datapoints._ImageType, datapoints._VideoType], params: Dict[str, Any] + ) -> Tuple[ + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ImageOrVideoTypeJIT, + ]: + return F.ten_crop(inpt, self.size, vertical_flip=self.vertical_flip) + + +class Pad(Transform): + """[BETA] Pad the input on all sides with the given "pad" value. + + .. v2betastatus:: Pad transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + padding (int or sequence): Padding on each border. If a single int is provided this + is used to pad all borders. If sequence of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a sequence of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + + .. note:: + In torchscript mode padding as single int is not supported, use a sequence of + length 1: ``[padding, ]``. + fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant. + Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. + Fill value can be also a dictionary mapping data type to the fill value, e.g. + ``fill={datapoints.Image: 127, datapoints.Mask: 0}`` where ``Image`` will be filled with 127 and + ``Mask`` will be filled with 0. + padding_mode (str, optional): Type of padding. Should be: constant, edge, reflect or symmetric. + Default is "constant". + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value at the edge of the image. + + - reflect: pads with reflection of image without repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + """ + + _v1_transform_cls = _transforms.Pad + + def _extract_params_for_v1_transform(self) -> Dict[str, Any]: + params = super()._extract_params_for_v1_transform() + + if not (params["fill"] is None or isinstance(params["fill"], (int, float))): + raise ValueError(f"{type(self).__name__}() can only be scripted for a scalar `fill`, but got {self.fill}.") + + return params + + def __init__( + self, + padding: Union[int, Sequence[int]], + fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0, + padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant", + ) -> None: + super().__init__() + + _check_padding_arg(padding) + _check_padding_mode_arg(padding_mode) + + # This cast does Sequence[int] -> List[int] and is required to make mypy happy + if not isinstance(padding, int): + padding = list(padding) + self.padding = padding + self.fill = fill + self._fill = _setup_fill_arg(fill) + self.padding_mode = padding_mode + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + fill = self._fill[type(inpt)] + return F.pad(inpt, padding=self.padding, fill=fill, padding_mode=self.padding_mode) # type: ignore[arg-type] + + +class RandomZoomOut(_RandomApplyTransform): + """[BETA] "Zoom out" transformation from + `"SSD: Single Shot MultiBox Detector" `_. + + .. v2betastatus:: RandomZoomOut transform + + This transformation randomly pads images, videos, bounding boxes and masks creating a zoom out effect. + Output spatial size is randomly sampled from original size up to a maximum size configured + with ``side_range`` parameter: + + .. code-block:: python + + r = uniform_sample(side_range[0], side_range[1]) + output_width = input_width * r + output_height = input_height * r + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant. + Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. + Fill value can be also a dictionary mapping data type to the fill value, e.g. + ``fill={datapoints.Image: 127, datapoints.Mask: 0}`` where ``Image`` will be filled with 127 and + ``Mask`` will be filled with 0. + side_range (sequence of floats, optional): tuple of two floats defines minimum and maximum factors to + scale the input size. + p (float, optional): probability of the input being flipped. Default value is 0.5 + """ + + def __init__( + self, + fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0, + side_range: Sequence[float] = (1.0, 4.0), + p: float = 0.5, + ) -> None: + super().__init__(p=p) + + self.fill = fill + self._fill = _setup_fill_arg(fill) + + _check_sequence_input(side_range, "side_range", req_sizes=(2,)) + + self.side_range = side_range + if side_range[0] < 1.0 or side_range[0] > side_range[1]: + raise ValueError(f"Invalid canvas side range provided {side_range}.") + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + orig_h, orig_w = query_spatial_size(flat_inputs) + + r = self.side_range[0] + torch.rand(1) * (self.side_range[1] - self.side_range[0]) + canvas_width = int(orig_w * r) + canvas_height = int(orig_h * r) + + r = torch.rand(2) + left = int((canvas_width - orig_w) * r[0]) + top = int((canvas_height - orig_h) * r[1]) + right = canvas_width - (left + orig_w) + bottom = canvas_height - (top + orig_h) + padding = [left, top, right, bottom] + + return dict(padding=padding) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + fill = self._fill[type(inpt)] + return F.pad(inpt, **params, fill=fill) + + +class RandomRotation(Transform): + """[BETA] Rotate the input by angle. + + .. v2betastatus:: RandomRotation transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + degrees (sequence or number): Range of degrees to select from. + If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + expand (bool, optional): Optional expansion flag. + If true, expands the output to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner. + Default is the center of the image. + fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant. + Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. + Fill value can be also a dictionary mapping data type to the fill value, e.g. + ``fill={datapoints.Image: 127, datapoints.Mask: 0}`` where ``Image`` will be filled with 127 and + ``Mask`` will be filled with 0. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + _v1_transform_cls = _transforms.RandomRotation + + def __init__( + self, + degrees: Union[numbers.Number, Sequence], + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + expand: bool = False, + center: Optional[List[float]] = None, + fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0, + ) -> None: + super().__init__() + self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,)) + self.interpolation = _check_interpolation(interpolation) + self.expand = expand + + self.fill = fill + self._fill = _setup_fill_arg(fill) + + if center is not None: + _check_sequence_input(center, "center", req_sizes=(2,)) + + self.center = center + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + angle = torch.empty(1).uniform_(self.degrees[0], self.degrees[1]).item() + return dict(angle=angle) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + fill = self._fill[type(inpt)] + return F.rotate( + inpt, + **params, + interpolation=self.interpolation, + expand=self.expand, + center=self.center, + fill=fill, + ) + + +class RandomAffine(Transform): + """[BETA] Random affine transformation the input keeping center invariant. + + .. v2betastatus:: RandomAffine transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + degrees (sequence or number): Range of degrees to select from. + If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). Set to 0 to deactivate rotations. + translate (tuple, optional): tuple of maximum absolute fraction for horizontal + and vertical translations. For example translate=(a, b), then horizontal shift + is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is + randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default. + scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is + randomly sampled from the range a <= scale <= b. Will keep original scale by default. + shear (sequence or number, optional): Range of degrees to select from. + If shear is a number, a shear parallel to the x-axis in the range (-shear, +shear) + will be applied. Else if shear is a sequence of 2 values a shear parallel to the x-axis in the + range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values, + an x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. + Will not apply shear by default. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant. + Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. + Fill value can be also a dictionary mapping data type to the fill value, e.g. + ``fill={datapoints.Image: 127, datapoints.Mask: 0}`` where ``Image`` will be filled with 127 and + ``Mask`` will be filled with 0. + center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner. + Default is the center of the image. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + _v1_transform_cls = _transforms.RandomAffine + + def __init__( + self, + degrees: Union[numbers.Number, Sequence], + translate: Optional[Sequence[float]] = None, + scale: Optional[Sequence[float]] = None, + shear: Optional[Union[int, float, Sequence[float]]] = None, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0, + center: Optional[List[float]] = None, + ) -> None: + super().__init__() + self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,)) + if translate is not None: + _check_sequence_input(translate, "translate", req_sizes=(2,)) + for t in translate: + if not (0.0 <= t <= 1.0): + raise ValueError("translation values should be between 0 and 1") + self.translate = translate + if scale is not None: + _check_sequence_input(scale, "scale", req_sizes=(2,)) + for s in scale: + if s <= 0: + raise ValueError("scale values should be positive") + self.scale = scale + + if shear is not None: + self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4)) + else: + self.shear = shear + + self.interpolation = _check_interpolation(interpolation) + self.fill = fill + self._fill = _setup_fill_arg(fill) + + if center is not None: + _check_sequence_input(center, "center", req_sizes=(2,)) + + self.center = center + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + height, width = query_spatial_size(flat_inputs) + + angle = torch.empty(1).uniform_(self.degrees[0], self.degrees[1]).item() + if self.translate is not None: + max_dx = float(self.translate[0] * width) + max_dy = float(self.translate[1] * height) + tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item())) + ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item())) + translate = (tx, ty) + else: + translate = (0, 0) + + if self.scale is not None: + scale = torch.empty(1).uniform_(self.scale[0], self.scale[1]).item() + else: + scale = 1.0 + + shear_x = shear_y = 0.0 + if self.shear is not None: + shear_x = torch.empty(1).uniform_(self.shear[0], self.shear[1]).item() + if len(self.shear) == 4: + shear_y = torch.empty(1).uniform_(self.shear[2], self.shear[3]).item() + + shear = (shear_x, shear_y) + return dict(angle=angle, translate=translate, scale=scale, shear=shear) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + fill = self._fill[type(inpt)] + return F.affine( + inpt, + **params, + interpolation=self.interpolation, + fill=fill, + center=self.center, + ) + + +class RandomCrop(Transform): + """[BETA] Crop the input at a random location. + + .. v2betastatus:: RandomCrop transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + padding (int or sequence, optional): Optional padding on each border + of the image. Default is None. If a single int is provided this + is used to pad all borders. If sequence of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a sequence of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + + .. note:: + In torchscript mode padding as single int is not supported, use a sequence of + length 1: ``[padding, ]``. + pad_if_needed (boolean, optional): It will pad the image if smaller than the + desired size to avoid raising an exception. Since cropping is done + after padding, the padding seems to be done at a random offset. + fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant. + Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. + Fill value can be also a dictionary mapping data type to the fill value, e.g. + ``fill={datapoints.Image: 127, datapoints.Mask: 0}`` where ``Image`` will be filled with 127 and + ``Mask`` will be filled with 0. + padding_mode (str, optional): Type of padding. Should be: constant, edge, reflect or symmetric. + Default is constant. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value at the edge of the image. + + - reflect: pads with reflection of image without repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + """ + + _v1_transform_cls = _transforms.RandomCrop + + def _extract_params_for_v1_transform(self) -> Dict[str, Any]: + params = super()._extract_params_for_v1_transform() + + if not (params["fill"] is None or isinstance(params["fill"], (int, float))): + raise ValueError(f"{type(self).__name__}() can only be scripted for a scalar `fill`, but got {self.fill}.") + + padding = self.padding + if padding is not None: + pad_left, pad_right, pad_top, pad_bottom = padding + padding = [pad_left, pad_top, pad_right, pad_bottom] + params["padding"] = padding + + return params + + def __init__( + self, + size: Union[int, Sequence[int]], + padding: Optional[Union[int, Sequence[int]]] = None, + pad_if_needed: bool = False, + fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0, + padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant", + ) -> None: + super().__init__() + + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + + if pad_if_needed or padding is not None: + if padding is not None: + _check_padding_arg(padding) + _check_padding_mode_arg(padding_mode) + + self.padding = F._geometry._parse_pad_padding(padding) if padding else None # type: ignore[arg-type] + self.pad_if_needed = pad_if_needed + self.fill = fill + self._fill = _setup_fill_arg(fill) + self.padding_mode = padding_mode + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + padded_height, padded_width = query_spatial_size(flat_inputs) + + if self.padding is not None: + pad_left, pad_right, pad_top, pad_bottom = self.padding + padded_height += pad_top + pad_bottom + padded_width += pad_left + pad_right + else: + pad_left = pad_right = pad_top = pad_bottom = 0 + + cropped_height, cropped_width = self.size + + if self.pad_if_needed: + if padded_height < cropped_height: + diff = cropped_height - padded_height + + pad_top += diff + pad_bottom += diff + padded_height += 2 * diff + + if padded_width < cropped_width: + diff = cropped_width - padded_width + + pad_left += diff + pad_right += diff + padded_width += 2 * diff + + if padded_height < cropped_height or padded_width < cropped_width: + raise ValueError( + f"Required crop size {(cropped_height, cropped_width)} is larger than " + f"{'padded ' if self.padding is not None else ''}input image size {(padded_height, padded_width)}." + ) + + # We need a different order here than we have in self.padding since this padding will be parsed again in `F.pad` + padding = [pad_left, pad_top, pad_right, pad_bottom] + needs_pad = any(padding) + + needs_vert_crop, top = ( + (True, int(torch.randint(0, padded_height - cropped_height + 1, size=()))) + if padded_height > cropped_height + else (False, 0) + ) + needs_horz_crop, left = ( + (True, int(torch.randint(0, padded_width - cropped_width + 1, size=()))) + if padded_width > cropped_width + else (False, 0) + ) + + return dict( + needs_crop=needs_vert_crop or needs_horz_crop, + top=top, + left=left, + height=cropped_height, + width=cropped_width, + needs_pad=needs_pad, + padding=padding, + ) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + if params["needs_pad"]: + fill = self._fill[type(inpt)] + inpt = F.pad(inpt, padding=params["padding"], fill=fill, padding_mode=self.padding_mode) + + if params["needs_crop"]: + inpt = F.crop(inpt, top=params["top"], left=params["left"], height=params["height"], width=params["width"]) + + return inpt + + +class RandomPerspective(_RandomApplyTransform): + """[BETA] Perform a random perspective transformation of the input with a given probability. + + .. v2betastatus:: RandomPerspective transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + distortion_scale (float, optional): argument to control the degree of distortion and ranges from 0 to 1. + Default is 0.5. + p (float, optional): probability of the input being transformed. Default is 0.5. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant. + Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. + Fill value can be also a dictionary mapping data type to the fill value, e.g. + ``fill={datapoints.Image: 127, datapoints.Mask: 0}`` where ``Image`` will be filled with 127 and + ``Mask`` will be filled with 0. + """ + + _v1_transform_cls = _transforms.RandomPerspective + + def __init__( + self, + distortion_scale: float = 0.5, + p: float = 0.5, + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0, + ) -> None: + super().__init__(p=p) + + if not (0 <= distortion_scale <= 1): + raise ValueError("Argument distortion_scale value should be between 0 and 1") + + self.distortion_scale = distortion_scale + self.interpolation = _check_interpolation(interpolation) + self.fill = fill + self._fill = _setup_fill_arg(fill) + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + height, width = query_spatial_size(flat_inputs) + + distortion_scale = self.distortion_scale + + half_height = height // 2 + half_width = width // 2 + bound_height = int(distortion_scale * half_height) + 1 + bound_width = int(distortion_scale * half_width) + 1 + topleft = [ + int(torch.randint(0, bound_width, size=(1,))), + int(torch.randint(0, bound_height, size=(1,))), + ] + topright = [ + int(torch.randint(width - bound_width, width, size=(1,))), + int(torch.randint(0, bound_height, size=(1,))), + ] + botright = [ + int(torch.randint(width - bound_width, width, size=(1,))), + int(torch.randint(height - bound_height, height, size=(1,))), + ] + botleft = [ + int(torch.randint(0, bound_width, size=(1,))), + int(torch.randint(height - bound_height, height, size=(1,))), + ] + startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]] + endpoints = [topleft, topright, botright, botleft] + perspective_coeffs = _get_perspective_coeffs(startpoints, endpoints) + return dict(coefficients=perspective_coeffs) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + fill = self._fill[type(inpt)] + return F.perspective( + inpt, + None, + None, + fill=fill, + interpolation=self.interpolation, + **params, + ) + + +class ElasticTransform(Transform): + """[BETA] Transform the input with elastic transformations. + + .. v2betastatus:: RandomPerspective transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Given alpha and sigma, it will generate displacement + vectors for all pixels based on random offsets. Alpha controls the strength + and sigma controls the smoothness of the displacements. + The displacements are added to an identity grid and the resulting grid is + used to transform the input. + + .. note:: + Implementation to transform bounding boxes is approximative (not exact). + We construct an approximation of the inverse grid as ``inverse_grid = idenity - displacement``. + This is not an exact inverse of the grid used to transform images, i.e. ``grid = identity + displacement``. + Our assumption is that ``displacement * displacement`` is small and can be ignored. + Large displacements would lead to large errors in the approximation. + + Applications: + Randomly transforms the morphology of objects in images and produces a + see-through-water-like effect. + + Args: + alpha (float or sequence of floats, optional): Magnitude of displacements. Default is 50.0. + sigma (float or sequence of floats, optional): Smoothness of displacements. Default is 5.0. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant. + Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. + Fill value can be also a dictionary mapping data type to the fill value, e.g. + ``fill={datapoints.Image: 127, datapoints.Mask: 0}`` where ``Image`` will be filled with 127 and + ``Mask`` will be filled with 0. + """ + + _v1_transform_cls = _transforms.ElasticTransform + + def __init__( + self, + alpha: Union[float, Sequence[float]] = 50.0, + sigma: Union[float, Sequence[float]] = 5.0, + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0, + ) -> None: + super().__init__() + self.alpha = _setup_float_or_seq(alpha, "alpha", 2) + self.sigma = _setup_float_or_seq(sigma, "sigma", 2) + + self.interpolation = _check_interpolation(interpolation) + self.fill = fill + self._fill = _setup_fill_arg(fill) + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + size = list(query_spatial_size(flat_inputs)) + + dx = torch.rand([1, 1] + size) * 2 - 1 + if self.sigma[0] > 0.0: + kx = int(8 * self.sigma[0] + 1) + # if kernel size is even we have to make it odd + if kx % 2 == 0: + kx += 1 + dx = F.gaussian_blur(dx, [kx, kx], list(self.sigma)) + dx = dx * self.alpha[0] / size[0] + + dy = torch.rand([1, 1] + size) * 2 - 1 + if self.sigma[1] > 0.0: + ky = int(8 * self.sigma[1] + 1) + # if kernel size is even we have to make it odd + if ky % 2 == 0: + ky += 1 + dy = F.gaussian_blur(dy, [ky, ky], list(self.sigma)) + dy = dy * self.alpha[1] / size[1] + displacement = torch.concat([dx, dy], 1).permute([0, 2, 3, 1]) # 1 x H x W x 2 + return dict(displacement=displacement) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + fill = self._fill[type(inpt)] + return F.elastic( + inpt, + **params, + fill=fill, + interpolation=self.interpolation, + ) + + +class RandomIoUCrop(Transform): + """[BETA] Random IoU crop transformation from + `"SSD: Single Shot MultiBox Detector" `_. + + .. v2betastatus:: RandomIoUCrop transform + + This transformation requires an image or video data and ``datapoints.BoundingBox`` in the input. + + .. warning:: + In order to properly remove the bounding boxes below the IoU threshold, `RandomIoUCrop` + must be followed by :class:`~torchvision.transforms.v2.SanitizeBoundingBox`, either immediately + after or later in the transforms pipeline. + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + min_scale (float, optional): Minimum factors to scale the input size. + max_scale (float, optional): Maximum factors to scale the input size. + min_aspect_ratio (float, optional): Minimum aspect ratio for the cropped image or video. + max_aspect_ratio (float, optional): Maximum aspect ratio for the cropped image or video. + sampler_options (list of float, optional): List of minimal IoU (Jaccard) overlap between all the boxes and + a cropped image or video. Default, ``None`` which corresponds to ``[0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]`` + trials (int, optional): Number of trials to find a crop for a given value of minimal IoU (Jaccard) overlap. + Default, 40. + """ + + def __init__( + self, + min_scale: float = 0.3, + max_scale: float = 1.0, + min_aspect_ratio: float = 0.5, + max_aspect_ratio: float = 2.0, + sampler_options: Optional[List[float]] = None, + trials: int = 40, + ): + super().__init__() + # Configuration similar to https://github.com/weiliu89/caffe/blob/ssd/examples/ssd/ssd_coco.py#L89-L174 + self.min_scale = min_scale + self.max_scale = max_scale + self.min_aspect_ratio = min_aspect_ratio + self.max_aspect_ratio = max_aspect_ratio + if sampler_options is None: + sampler_options = [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0] + self.options = sampler_options + self.trials = trials + + def _check_inputs(self, flat_inputs: List[Any]) -> None: + if not ( + has_all(flat_inputs, datapoints.BoundingBox) + and has_any(flat_inputs, PIL.Image.Image, datapoints.Image, is_simple_tensor) + ): + raise TypeError( + f"{type(self).__name__}() requires input sample to contain tensor or PIL images " + "and bounding boxes. Sample can also contain masks." + ) + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + orig_h, orig_w = query_spatial_size(flat_inputs) + bboxes = query_bounding_box(flat_inputs) + + while True: + # sample an option + idx = int(torch.randint(low=0, high=len(self.options), size=(1,))) + min_jaccard_overlap = self.options[idx] + if min_jaccard_overlap >= 1.0: # a value larger than 1 encodes the leave as-is option + return dict() + + for _ in range(self.trials): + # check the aspect ratio limitations + r = self.min_scale + (self.max_scale - self.min_scale) * torch.rand(2) + new_w = int(orig_w * r[0]) + new_h = int(orig_h * r[1]) + aspect_ratio = new_w / new_h + if not (self.min_aspect_ratio <= aspect_ratio <= self.max_aspect_ratio): + continue + + # check for 0 area crops + r = torch.rand(2) + left = int((orig_w - new_w) * r[0]) + top = int((orig_h - new_h) * r[1]) + right = left + new_w + bottom = top + new_h + if left == right or top == bottom: + continue + + # check for any valid boxes with centers within the crop area + xyxy_bboxes = F.convert_format_bounding_box( + bboxes.as_subclass(torch.Tensor), bboxes.format, datapoints.BoundingBoxFormat.XYXY + ) + cx = 0.5 * (xyxy_bboxes[..., 0] + xyxy_bboxes[..., 2]) + cy = 0.5 * (xyxy_bboxes[..., 1] + xyxy_bboxes[..., 3]) + is_within_crop_area = (left < cx) & (cx < right) & (top < cy) & (cy < bottom) + if not is_within_crop_area.any(): + continue + + # check at least 1 box with jaccard limitations + xyxy_bboxes = xyxy_bboxes[is_within_crop_area] + ious = box_iou( + xyxy_bboxes, + torch.tensor([[left, top, right, bottom]], dtype=xyxy_bboxes.dtype, device=xyxy_bboxes.device), + ) + if ious.max() < min_jaccard_overlap: + continue + + return dict(top=top, left=left, height=new_h, width=new_w, is_within_crop_area=is_within_crop_area) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + + if len(params) < 1: + return inpt + + output = F.crop(inpt, top=params["top"], left=params["left"], height=params["height"], width=params["width"]) + + if isinstance(output, datapoints.BoundingBox): + # We "mark" the invalid boxes as degenreate, and they can be + # removed by a later call to SanitizeBoundingBox() + output[~params["is_within_crop_area"]] = 0 + + return output + + +class ScaleJitter(Transform): + """[BETA] Perform Large Scale Jitter on the input according to + `"Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" `_. + + .. v2betastatus:: ScaleJitter transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + target_size (tuple of int): Target size. This parameter defines base scale for jittering, + e.g. ``min(target_size[0] / width, target_size[1] / height)``. + scale_range (tuple of float, optional): Minimum and maximum of the scale range. Default, ``(0.1, 2.0)``. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``, + ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + antialias (bool, optional): Whether to apply antialiasing. + It only affects **tensors** with bilinear or bicubic modes and it is + ignored otherwise: on PIL images, antialiasing is always applied on + bilinear or bicubic modes; on other modes (for PIL images and + tensors), antialiasing makes no sense and this parameter is ignored. + Possible values are: + + - ``True``: will apply antialiasing for bilinear or bicubic modes. + Other mode aren't affected. This is probably what you want to use. + - ``False``: will not apply antialiasing for tensors on any mode. PIL + images are still antialiased on bilinear or bicubic modes, because + PIL doesn't support no antialias. + - ``None``: equivalent to ``False`` for tensors and ``True`` for + PIL images. This value exists for legacy reasons and you probably + don't want to use it unless you really know what you are doing. + + The current default is ``None`` **but will change to** ``True`` **in + v0.17** for the PIL and Tensor backends to be consistent. + """ + + def __init__( + self, + target_size: Tuple[int, int], + scale_range: Tuple[float, float] = (0.1, 2.0), + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + antialias: Optional[Union[str, bool]] = "warn", + ): + super().__init__() + self.target_size = target_size + self.scale_range = scale_range + self.interpolation = _check_interpolation(interpolation) + self.antialias = antialias + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + orig_height, orig_width = query_spatial_size(flat_inputs) + + scale = self.scale_range[0] + torch.rand(1) * (self.scale_range[1] - self.scale_range[0]) + r = min(self.target_size[1] / orig_height, self.target_size[0] / orig_width) * scale + new_width = int(orig_width * r) + new_height = int(orig_height * r) + + return dict(size=(new_height, new_width)) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + return F.resize(inpt, size=params["size"], interpolation=self.interpolation, antialias=self.antialias) + + +class RandomShortestSize(Transform): + """[BETA] Randomly resize the input. + + .. v2betastatus:: RandomShortestSize transform + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + min_size (int or sequence of int): Minimum spatial size. Single integer value or a sequence of integer values. + max_size (int, optional): Maximum spatial size. Default, None. + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``, + ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + antialias (bool, optional): Whether to apply antialiasing. + It only affects **tensors** with bilinear or bicubic modes and it is + ignored otherwise: on PIL images, antialiasing is always applied on + bilinear or bicubic modes; on other modes (for PIL images and + tensors), antialiasing makes no sense and this parameter is ignored. + Possible values are: + + - ``True``: will apply antialiasing for bilinear or bicubic modes. + Other mode aren't affected. This is probably what you want to use. + - ``False``: will not apply antialiasing for tensors on any mode. PIL + images are still antialiased on bilinear or bicubic modes, because + PIL doesn't support no antialias. + - ``None``: equivalent to ``False`` for tensors and ``True`` for + PIL images. This value exists for legacy reasons and you probably + don't want to use it unless you really know what you are doing. + + The current default is ``None`` **but will change to** ``True`` **in + v0.17** for the PIL and Tensor backends to be consistent. + """ + + def __init__( + self, + min_size: Union[List[int], Tuple[int], int], + max_size: Optional[int] = None, + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + antialias: Optional[Union[str, bool]] = "warn", + ): + super().__init__() + self.min_size = [min_size] if isinstance(min_size, int) else list(min_size) + self.max_size = max_size + self.interpolation = _check_interpolation(interpolation) + self.antialias = antialias + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + orig_height, orig_width = query_spatial_size(flat_inputs) + + min_size = self.min_size[int(torch.randint(len(self.min_size), ()))] + r = min_size / min(orig_height, orig_width) + if self.max_size is not None: + r = min(r, self.max_size / max(orig_height, orig_width)) + + new_width = int(orig_width * r) + new_height = int(orig_height * r) + + return dict(size=(new_height, new_width)) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + return F.resize(inpt, size=params["size"], interpolation=self.interpolation, antialias=self.antialias) + + +class RandomResize(Transform): + """[BETA] Randomly resize the input. + + .. v2betastatus:: RandomResize transform + + This transformation can be used together with ``RandomCrop`` as data augmentations to train + models on image segmentation task. + + Output spatial size is randomly sampled from the interval ``[min_size, max_size]``: + + .. code-block:: python + + size = uniform_sample(min_size, max_size) + output_width = size + output_height = size + + If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, + :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) + it can have arbitrary number of leading batch dimensions. For example, + the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape. + + Args: + min_size (int): Minimum output size for random sampling + max_size (int): Maximum output size for random sampling + interpolation (InterpolationMode, optional): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``, + ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. + The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well. + antialias (bool, optional): Whether to apply antialiasing. + It only affects **tensors** with bilinear or bicubic modes and it is + ignored otherwise: on PIL images, antialiasing is always applied on + bilinear or bicubic modes; on other modes (for PIL images and + tensors), antialiasing makes no sense and this parameter is ignored. + Possible values are: + + - ``True``: will apply antialiasing for bilinear or bicubic modes. + Other mode aren't affected. This is probably what you want to use. + - ``False``: will not apply antialiasing for tensors on any mode. PIL + images are still antialiased on bilinear or bicubic modes, because + PIL doesn't support no antialias. + - ``None``: equivalent to ``False`` for tensors and ``True`` for + PIL images. This value exists for legacy reasons and you probably + don't want to use it unless you really know what you are doing. + + The current default is ``None`` **but will change to** ``True`` **in + v0.17** for the PIL and Tensor backends to be consistent. + """ + + def __init__( + self, + min_size: int, + max_size: int, + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + antialias: Optional[Union[str, bool]] = "warn", + ) -> None: + super().__init__() + self.min_size = min_size + self.max_size = max_size + self.interpolation = _check_interpolation(interpolation) + self.antialias = antialias + + def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: + size = int(torch.randint(self.min_size, self.max_size, ())) + return dict(size=[size]) + + def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + return F.resize(inpt, params["size"], interpolation=self.interpolation, antialias=self.antialias) diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__init__.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ffb34c877485e7c9bb6f438a18e9b538ed631b40 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__init__.py @@ -0,0 +1,171 @@ +from torchvision.transforms import InterpolationMode # usort: skip + +from ._utils import is_simple_tensor # usort: skip + +from ._meta import ( + clamp_bounding_box, + convert_format_bounding_box, + convert_dtype_image_tensor, + convert_dtype, + convert_dtype_video, + convert_image_dtype, + get_dimensions_image_tensor, + get_dimensions_image_pil, + get_dimensions, + get_num_frames_video, + get_num_frames, + get_image_num_channels, + get_num_channels_image_tensor, + get_num_channels_image_pil, + get_num_channels_video, + get_num_channels, + get_spatial_size_bounding_box, + get_spatial_size_image_tensor, + get_spatial_size_image_pil, + get_spatial_size_mask, + get_spatial_size_video, + get_spatial_size, +) # usort: skip + +from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video +from ._color import ( + adjust_brightness, + adjust_brightness_image_pil, + adjust_brightness_image_tensor, + adjust_brightness_video, + adjust_contrast, + adjust_contrast_image_pil, + adjust_contrast_image_tensor, + adjust_contrast_video, + adjust_gamma, + adjust_gamma_image_pil, + adjust_gamma_image_tensor, + adjust_gamma_video, + adjust_hue, + adjust_hue_image_pil, + adjust_hue_image_tensor, + adjust_hue_video, + adjust_saturation, + adjust_saturation_image_pil, + adjust_saturation_image_tensor, + adjust_saturation_video, + adjust_sharpness, + adjust_sharpness_image_pil, + adjust_sharpness_image_tensor, + adjust_sharpness_video, + autocontrast, + autocontrast_image_pil, + autocontrast_image_tensor, + autocontrast_video, + equalize, + equalize_image_pil, + equalize_image_tensor, + equalize_video, + invert, + invert_image_pil, + invert_image_tensor, + invert_video, + posterize, + posterize_image_pil, + posterize_image_tensor, + posterize_video, + rgb_to_grayscale, + rgb_to_grayscale_image_pil, + rgb_to_grayscale_image_tensor, + solarize, + solarize_image_pil, + solarize_image_tensor, + solarize_video, +) +from ._geometry import ( + affine, + affine_bounding_box, + affine_image_pil, + affine_image_tensor, + affine_mask, + affine_video, + center_crop, + center_crop_bounding_box, + center_crop_image_pil, + center_crop_image_tensor, + center_crop_mask, + center_crop_video, + crop, + crop_bounding_box, + crop_image_pil, + crop_image_tensor, + crop_mask, + crop_video, + elastic, + elastic_bounding_box, + elastic_image_pil, + elastic_image_tensor, + elastic_mask, + elastic_transform, + elastic_video, + five_crop, + five_crop_image_pil, + five_crop_image_tensor, + five_crop_video, + hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file + horizontal_flip, + horizontal_flip_bounding_box, + horizontal_flip_image_pil, + horizontal_flip_image_tensor, + horizontal_flip_mask, + horizontal_flip_video, + pad, + pad_bounding_box, + pad_image_pil, + pad_image_tensor, + pad_mask, + pad_video, + perspective, + perspective_bounding_box, + perspective_image_pil, + perspective_image_tensor, + perspective_mask, + perspective_video, + resize, + resize_bounding_box, + resize_image_pil, + resize_image_tensor, + resize_mask, + resize_video, + resized_crop, + resized_crop_bounding_box, + resized_crop_image_pil, + resized_crop_image_tensor, + resized_crop_mask, + resized_crop_video, + rotate, + rotate_bounding_box, + rotate_image_pil, + rotate_image_tensor, + rotate_mask, + rotate_video, + ten_crop, + ten_crop_image_pil, + ten_crop_image_tensor, + ten_crop_video, + vertical_flip, + vertical_flip_bounding_box, + vertical_flip_image_pil, + vertical_flip_image_tensor, + vertical_flip_mask, + vertical_flip_video, + vflip, +) +from ._misc import ( + gaussian_blur, + gaussian_blur_image_pil, + gaussian_blur_image_tensor, + gaussian_blur_video, + normalize, + normalize_image_tensor, + normalize_video, +) +from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video +from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image + +from ._deprecated import get_image_size, to_grayscale, to_tensor # usort: skip diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_color.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4e8623bec6be7487892f1aef9886daf0b39406a Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_color.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_misc.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ccfb37d03179424d6b9714a88821d59f499a2e5 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_misc.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_type_conversion.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_type_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c074c1bc6498e748ef80f78722ed822cdcc69d59 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/__pycache__/_type_conversion.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_augment.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_augment.py new file mode 100644 index 0000000000000000000000000000000000000000..9aedae814bdeccbf35f3aeae2549a727ee298b1c --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_augment.py @@ -0,0 +1,64 @@ +from typing import Union + +import PIL.Image + +import torch +from torchvision import datapoints +from torchvision.transforms.functional import pil_to_tensor, to_pil_image +from torchvision.utils import _log_api_usage_once + +from ._utils import is_simple_tensor + + +def erase_image_tensor( + image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False +) -> torch.Tensor: + if not inplace: + image = image.clone() + + image[..., i : i + h, j : j + w] = v + return image + + +@torch.jit.unused +def erase_image_pil( + image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False +) -> PIL.Image.Image: + t_img = pil_to_tensor(image) + output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace) + return to_pil_image(output, mode=image.mode) + + +def erase_video( + video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False +) -> torch.Tensor: + return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace) + + +def erase( + inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT], + i: int, + j: int, + h: int, + w: int, + v: torch.Tensor, + inplace: bool = False, +) -> Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]: + if not torch.jit.is_scripting(): + _log_api_usage_once(erase) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace) + elif isinstance(inpt, datapoints.Image): + output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace) + return datapoints.Image.wrap_like(inpt, output) + elif isinstance(inpt, datapoints.Video): + output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace) + return datapoints.Video.wrap_like(inpt, output) + elif isinstance(inpt, PIL.Image.Image): + return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace) + else: + raise TypeError( + f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) diff --git a/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_color.py b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_color.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba7e5b36b355a65f0e07d2e4ddec2d9d481c95e --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/transforms/v2/functional/_color.py @@ -0,0 +1,672 @@ +from typing import Union + +import PIL.Image +import torch +from torch.nn.functional import conv2d +from torchvision import datapoints +from torchvision.transforms import _functional_pil as _FP +from torchvision.transforms._functional_tensor import _max_value + +from torchvision.utils import _log_api_usage_once + +from ._meta import _num_value_bits, convert_dtype_image_tensor +from ._utils import is_simple_tensor + + +def _rgb_to_grayscale_image_tensor( + image: torch.Tensor, num_output_channels: int = 1, preserve_dtype: bool = True +) -> torch.Tensor: + if image.shape[-3] == 1: + return image.clone() + + r, g, b = image.unbind(dim=-3) + l_img = r.mul(0.2989).add_(g, alpha=0.587).add_(b, alpha=0.114) + l_img = l_img.unsqueeze(dim=-3) + if preserve_dtype: + l_img = l_img.to(image.dtype) + if num_output_channels == 3: + l_img = l_img.expand(image.shape) + return l_img + + +def rgb_to_grayscale_image_tensor(image: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor: + return _rgb_to_grayscale_image_tensor(image, num_output_channels=num_output_channels, preserve_dtype=True) + + +rgb_to_grayscale_image_pil = _FP.to_grayscale + + +def rgb_to_grayscale( + inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT], num_output_channels: int = 1 +) -> Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]: + if not torch.jit.is_scripting(): + _log_api_usage_once(rgb_to_grayscale) + if num_output_channels not in (1, 3): + raise ValueError(f"num_output_channels must be 1 or 3, got {num_output_channels}.") + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return rgb_to_grayscale_image_tensor(inpt, num_output_channels=num_output_channels) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.rgb_to_grayscale(num_output_channels=num_output_channels) + elif isinstance(inpt, PIL.Image.Image): + return rgb_to_grayscale_image_pil(inpt, num_output_channels=num_output_channels) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def _blend(image1: torch.Tensor, image2: torch.Tensor, ratio: float) -> torch.Tensor: + ratio = float(ratio) + fp = image1.is_floating_point() + bound = _max_value(image1.dtype) + output = image1.mul(ratio).add_(image2, alpha=(1.0 - ratio)).clamp_(0, bound) + return output if fp else output.to(image1.dtype) + + +def adjust_brightness_image_tensor(image: torch.Tensor, brightness_factor: float) -> torch.Tensor: + if brightness_factor < 0: + raise ValueError(f"brightness_factor ({brightness_factor}) is not non-negative.") + + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + + fp = image.is_floating_point() + bound = _max_value(image.dtype) + output = image.mul(brightness_factor).clamp_(0, bound) + return output if fp else output.to(image.dtype) + + +adjust_brightness_image_pil = _FP.adjust_brightness + + +def adjust_brightness_video(video: torch.Tensor, brightness_factor: float) -> torch.Tensor: + return adjust_brightness_image_tensor(video, brightness_factor=brightness_factor) + + +def adjust_brightness(inpt: datapoints._InputTypeJIT, brightness_factor: float) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(adjust_brightness) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return adjust_brightness_image_tensor(inpt, brightness_factor=brightness_factor) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.adjust_brightness(brightness_factor=brightness_factor) + elif isinstance(inpt, PIL.Image.Image): + return adjust_brightness_image_pil(inpt, brightness_factor=brightness_factor) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def adjust_saturation_image_tensor(image: torch.Tensor, saturation_factor: float) -> torch.Tensor: + if saturation_factor < 0: + raise ValueError(f"saturation_factor ({saturation_factor}) is not non-negative.") + + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + + if c == 1: # Match PIL behaviour + return image + + grayscale_image = _rgb_to_grayscale_image_tensor(image, num_output_channels=1, preserve_dtype=False) + if not image.is_floating_point(): + grayscale_image = grayscale_image.floor_() + + return _blend(image, grayscale_image, saturation_factor) + + +adjust_saturation_image_pil = _FP.adjust_saturation + + +def adjust_saturation_video(video: torch.Tensor, saturation_factor: float) -> torch.Tensor: + return adjust_saturation_image_tensor(video, saturation_factor=saturation_factor) + + +def adjust_saturation(inpt: datapoints._InputTypeJIT, saturation_factor: float) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(adjust_saturation) + + if isinstance(inpt, torch.Tensor) and ( + torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) + ): + return adjust_saturation_image_tensor(inpt, saturation_factor=saturation_factor) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.adjust_saturation(saturation_factor=saturation_factor) + elif isinstance(inpt, PIL.Image.Image): + return adjust_saturation_image_pil(inpt, saturation_factor=saturation_factor) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def adjust_contrast_image_tensor(image: torch.Tensor, contrast_factor: float) -> torch.Tensor: + if contrast_factor < 0: + raise ValueError(f"contrast_factor ({contrast_factor}) is not non-negative.") + + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + fp = image.is_floating_point() + if c == 3: + grayscale_image = _rgb_to_grayscale_image_tensor(image, num_output_channels=1, preserve_dtype=False) + if not fp: + grayscale_image = grayscale_image.floor_() + else: + grayscale_image = image if fp else image.to(torch.float32) + mean = torch.mean(grayscale_image, dim=(-3, -2, -1), keepdim=True) + return _blend(image, mean, contrast_factor) + + +adjust_contrast_image_pil = _FP.adjust_contrast + + +def adjust_contrast_video(video: torch.Tensor, contrast_factor: float) -> torch.Tensor: + return adjust_contrast_image_tensor(video, contrast_factor=contrast_factor) + + +def adjust_contrast(inpt: datapoints._InputTypeJIT, contrast_factor: float) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(adjust_contrast) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return adjust_contrast_image_tensor(inpt, contrast_factor=contrast_factor) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.adjust_contrast(contrast_factor=contrast_factor) + elif isinstance(inpt, PIL.Image.Image): + return adjust_contrast_image_pil(inpt, contrast_factor=contrast_factor) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def adjust_sharpness_image_tensor(image: torch.Tensor, sharpness_factor: float) -> torch.Tensor: + num_channels, height, width = image.shape[-3:] + if num_channels not in (1, 3): + raise TypeError(f"Input image tensor can have 1 or 3 channels, but found {num_channels}") + + if sharpness_factor < 0: + raise ValueError(f"sharpness_factor ({sharpness_factor}) is not non-negative.") + + if image.numel() == 0 or height <= 2 or width <= 2: + return image + + bound = _max_value(image.dtype) + fp = image.is_floating_point() + shape = image.shape + + if image.ndim > 4: + image = image.reshape(-1, num_channels, height, width) + needs_unsquash = True + else: + needs_unsquash = False + + # The following is a normalized 3x3 kernel with 1s in the edges and a 5 in the middle. + kernel_dtype = image.dtype if fp else torch.float32 + a, b = 1.0 / 13.0, 5.0 / 13.0 + kernel = torch.tensor([[a, a, a], [a, b, a], [a, a, a]], dtype=kernel_dtype, device=image.device) + kernel = kernel.expand(num_channels, 1, 3, 3) + + # We copy and cast at the same time to avoid modifications on the original data + output = image.to(dtype=kernel_dtype, copy=True) + blurred_degenerate = conv2d(output, kernel, groups=num_channels) + if not fp: + # it is better to round before cast + blurred_degenerate = blurred_degenerate.round_() + + # Create a view on the underlying output while pointing at the same data. We do this to avoid indexing twice. + view = output[..., 1:-1, 1:-1] + + # We speed up blending by minimizing flops and doing in-place. The 2 blend options are mathematically equivalent: + # x+(1-r)*(y-x) = x + (1-r)*y - (1-r)*x = x*r + y*(1-r) + view.add_(blurred_degenerate.sub_(view), alpha=(1.0 - sharpness_factor)) + + # The actual data of output have been modified by the above. We only need to clamp and cast now. + output = output.clamp_(0, bound) + if not fp: + output = output.to(image.dtype) + + if needs_unsquash: + output = output.reshape(shape) + + return output + + +adjust_sharpness_image_pil = _FP.adjust_sharpness + + +def adjust_sharpness_video(video: torch.Tensor, sharpness_factor: float) -> torch.Tensor: + return adjust_sharpness_image_tensor(video, sharpness_factor=sharpness_factor) + + +def adjust_sharpness(inpt: datapoints._InputTypeJIT, sharpness_factor: float) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(adjust_sharpness) + + if isinstance(inpt, torch.Tensor) and ( + torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) + ): + return adjust_sharpness_image_tensor(inpt, sharpness_factor=sharpness_factor) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.adjust_sharpness(sharpness_factor=sharpness_factor) + elif isinstance(inpt, PIL.Image.Image): + return adjust_sharpness_image_pil(inpt, sharpness_factor=sharpness_factor) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def _rgb_to_hsv(image: torch.Tensor) -> torch.Tensor: + r, g, _ = image.unbind(dim=-3) + + # Implementation is based on + # https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/src/libImaging/Convert.c#L330 + minc, maxc = torch.aminmax(image, dim=-3) + + # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN + # from happening in the results, because + # + S channel has division by `maxc`, which is zero only if `maxc = minc` + # + H channel has division by `(maxc - minc)`. + # + # Instead of overwriting NaN afterwards, we just prevent it from occurring so + # we don't need to deal with it in case we save the NaN in a buffer in + # backprop, if it is ever supported, but it doesn't hurt to do so. + eqc = maxc == minc + + channels_range = maxc - minc + # Since `eqc => channels_range = 0`, replacing denominator with 1 when `eqc` is fine. + ones = torch.ones_like(maxc) + s = channels_range / torch.where(eqc, ones, maxc) + # Note that `eqc => maxc = minc = r = g = b`. So the following calculation + # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it + # would not matter what values `rc`, `gc`, and `bc` have here, and thus + # replacing denominator with 1 when `eqc` is fine. + channels_range_divisor = torch.where(eqc, ones, channels_range).unsqueeze_(dim=-3) + rc, gc, bc = ((maxc.unsqueeze(dim=-3) - image) / channels_range_divisor).unbind(dim=-3) + + mask_maxc_neq_r = maxc != r + mask_maxc_eq_g = maxc == g + + hg = rc.add(2.0).sub_(bc).mul_(mask_maxc_eq_g & mask_maxc_neq_r) + hr = bc.sub_(gc).mul_(~mask_maxc_neq_r) + hb = gc.add_(4.0).sub_(rc).mul_(mask_maxc_neq_r.logical_and_(mask_maxc_eq_g.logical_not_())) + + h = hr.add_(hg).add_(hb) + h = h.mul_(1.0 / 6.0).add_(1.0).fmod_(1.0) + return torch.stack((h, s, maxc), dim=-3) + + +def _hsv_to_rgb(img: torch.Tensor) -> torch.Tensor: + h, s, v = img.unbind(dim=-3) + h6 = h.mul(6) + i = torch.floor(h6) + f = h6.sub_(i) + i = i.to(dtype=torch.int32) + + sxf = s * f + one_minus_s = 1.0 - s + q = (1.0 - sxf).mul_(v).clamp_(0.0, 1.0) + t = sxf.add_(one_minus_s).mul_(v).clamp_(0.0, 1.0) + p = one_minus_s.mul_(v).clamp_(0.0, 1.0) + i.remainder_(6) + + mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1) + + a1 = torch.stack((v, q, p, p, t, v), dim=-3) + a2 = torch.stack((t, v, v, q, p, p), dim=-3) + a3 = torch.stack((p, p, t, v, v, q), dim=-3) + a4 = torch.stack((a1, a2, a3), dim=-4) + + return (a4.mul_(mask.unsqueeze(dim=-4))).sum(dim=-3) + + +def adjust_hue_image_tensor(image: torch.Tensor, hue_factor: float) -> torch.Tensor: + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].") + + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + + if c == 1: # Match PIL behaviour + return image + + if image.numel() == 0: + # exit earlier on empty images + return image + + orig_dtype = image.dtype + image = convert_dtype_image_tensor(image, torch.float32) + + image = _rgb_to_hsv(image) + h, s, v = image.unbind(dim=-3) + h.add_(hue_factor).remainder_(1.0) + image = torch.stack((h, s, v), dim=-3) + image_hue_adj = _hsv_to_rgb(image) + + return convert_dtype_image_tensor(image_hue_adj, orig_dtype) + + +adjust_hue_image_pil = _FP.adjust_hue + + +def adjust_hue_video(video: torch.Tensor, hue_factor: float) -> torch.Tensor: + return adjust_hue_image_tensor(video, hue_factor=hue_factor) + + +def adjust_hue(inpt: datapoints._InputTypeJIT, hue_factor: float) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(adjust_hue) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return adjust_hue_image_tensor(inpt, hue_factor=hue_factor) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.adjust_hue(hue_factor=hue_factor) + elif isinstance(inpt, PIL.Image.Image): + return adjust_hue_image_pil(inpt, hue_factor=hue_factor) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def adjust_gamma_image_tensor(image: torch.Tensor, gamma: float, gain: float = 1.0) -> torch.Tensor: + if gamma < 0: + raise ValueError("Gamma should be a non-negative real number") + + # The input image is either assumed to be at [0, 1] scale (if float) or is converted to that scale (if integer). + # Since the gamma is non-negative, the output remains at [0, 1] scale. + if not torch.is_floating_point(image): + output = convert_dtype_image_tensor(image, torch.float32).pow_(gamma) + else: + output = image.pow(gamma) + + if gain != 1.0: + # The clamp operation is needed only if multiplication is performed. It's only when gain != 1, that the scale + # of the output can go beyond [0, 1]. + output = output.mul_(gain).clamp_(0.0, 1.0) + + return convert_dtype_image_tensor(output, image.dtype) + + +adjust_gamma_image_pil = _FP.adjust_gamma + + +def adjust_gamma_video(video: torch.Tensor, gamma: float, gain: float = 1) -> torch.Tensor: + return adjust_gamma_image_tensor(video, gamma=gamma, gain=gain) + + +def adjust_gamma(inpt: datapoints._InputTypeJIT, gamma: float, gain: float = 1) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(adjust_gamma) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return adjust_gamma_image_tensor(inpt, gamma=gamma, gain=gain) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.adjust_gamma(gamma=gamma, gain=gain) + elif isinstance(inpt, PIL.Image.Image): + return adjust_gamma_image_pil(inpt, gamma=gamma, gain=gain) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def posterize_image_tensor(image: torch.Tensor, bits: int) -> torch.Tensor: + if image.is_floating_point(): + levels = 1 << bits + return image.mul(levels).floor_().clamp_(0, levels - 1).mul_(1.0 / levels) + else: + num_value_bits = _num_value_bits(image.dtype) + if bits >= num_value_bits: + return image + + mask = ((1 << bits) - 1) << (num_value_bits - bits) + return image & mask + + +posterize_image_pil = _FP.posterize + + +def posterize_video(video: torch.Tensor, bits: int) -> torch.Tensor: + return posterize_image_tensor(video, bits=bits) + + +def posterize(inpt: datapoints._InputTypeJIT, bits: int) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(posterize) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return posterize_image_tensor(inpt, bits=bits) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.posterize(bits=bits) + elif isinstance(inpt, PIL.Image.Image): + return posterize_image_pil(inpt, bits=bits) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def solarize_image_tensor(image: torch.Tensor, threshold: float) -> torch.Tensor: + if threshold > _max_value(image.dtype): + raise TypeError(f"Threshold should be less or equal the maximum value of the dtype, but got {threshold}") + + return torch.where(image >= threshold, invert_image_tensor(image), image) + + +solarize_image_pil = _FP.solarize + + +def solarize_video(video: torch.Tensor, threshold: float) -> torch.Tensor: + return solarize_image_tensor(video, threshold=threshold) + + +def solarize(inpt: datapoints._InputTypeJIT, threshold: float) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(solarize) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return solarize_image_tensor(inpt, threshold=threshold) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.solarize(threshold=threshold) + elif isinstance(inpt, PIL.Image.Image): + return solarize_image_pil(inpt, threshold=threshold) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def autocontrast_image_tensor(image: torch.Tensor) -> torch.Tensor: + c = image.shape[-3] + if c not in [1, 3]: + raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}") + + if image.numel() == 0: + # exit earlier on empty images + return image + + bound = _max_value(image.dtype) + fp = image.is_floating_point() + float_image = image if fp else image.to(torch.float32) + + minimum = float_image.amin(dim=(-2, -1), keepdim=True) + maximum = float_image.amax(dim=(-2, -1), keepdim=True) + + eq_idxs = maximum == minimum + inv_scale = maximum.sub_(minimum).mul_(1.0 / bound) + minimum[eq_idxs] = 0.0 + inv_scale[eq_idxs] = 1.0 + + if fp: + diff = float_image.sub(minimum) + else: + diff = float_image.sub_(minimum) + + return diff.div_(inv_scale).clamp_(0, bound).to(image.dtype) + + +autocontrast_image_pil = _FP.autocontrast + + +def autocontrast_video(video: torch.Tensor) -> torch.Tensor: + return autocontrast_image_tensor(video) + + +def autocontrast(inpt: datapoints._InputTypeJIT) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(autocontrast) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return autocontrast_image_tensor(inpt) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.autocontrast() + elif isinstance(inpt, PIL.Image.Image): + return autocontrast_image_pil(inpt) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def equalize_image_tensor(image: torch.Tensor) -> torch.Tensor: + if image.numel() == 0: + return image + + # 1. The algorithm below can easily be extended to support arbitrary integer dtypes. However, the histogram that + # would be needed to computed will have at least `torch.iinfo(dtype).max + 1` values. That is perfectly fine for + # `torch.int8`, `torch.uint8`, and `torch.int16`, at least questionable for `torch.int32` and completely + # unfeasible for `torch.int64`. + # 2. Floating point inputs need to be binned for this algorithm. Apart from converting them to an integer dtype, we + # could also use PyTorch's builtin histogram functionality. However, that has its own set of issues: in addition + # to being slow in general, PyTorch's implementation also doesn't support batches. In total, that makes it slower + # and more complicated to implement than a simple conversion and a fast histogram implementation for integers. + # Since we need to convert in most cases anyway and out of the acceptable dtypes mentioned in 1. `torch.uint8` is + # by far the most common, we choose it as base. + output_dtype = image.dtype + image = convert_dtype_image_tensor(image, torch.uint8) + + # The histogram is computed by using the flattened image as index. For example, a pixel value of 127 in the image + # corresponds to adding 1 to index 127 in the histogram. + batch_shape = image.shape[:-2] + flat_image = image.flatten(start_dim=-2).to(torch.long) + hist = flat_image.new_zeros(batch_shape + (256,), dtype=torch.int32) + hist.scatter_add_(dim=-1, index=flat_image, src=hist.new_ones(1).expand_as(flat_image)) + cum_hist = hist.cumsum(dim=-1) + + # The simplest form of lookup-table (LUT) that also achieves histogram equalization is + # `lut = cum_hist / flat_image.shape[-1] * 255` + # However, PIL uses a more elaborate scheme: + # https://github.com/python-pillow/Pillow/blob/eb59cb61d5239ee69cbbf12709a0c6fd7314e6d7/src/PIL/ImageOps.py#L368-L385 + # `lut = ((cum_hist + num_non_max_pixels // (2 * 255)) // num_non_max_pixels) * 255` + + # The last non-zero element in the histogram is the first element in the cumulative histogram with the maximum + # value. Thus, the "max" in `num_non_max_pixels` does not refer to 255 as the maximum value of uint8 images, but + # rather the maximum value in the image, which might be or not be 255. + index = cum_hist.argmax(dim=-1) + num_non_max_pixels = flat_image.shape[-1] - hist.gather(dim=-1, index=index.unsqueeze_(-1)) + + # This is performance optimization that saves us one multiplication later. With this, the LUT computation simplifies + # to `lut = (cum_hist + step // 2) // step` and thus saving the final multiplication by 255 while keeping the + # division count the same. PIL uses the variable name `step` for this, so we keep that for easier comparison. + step = num_non_max_pixels.div_(255, rounding_mode="floor") + + # Although it looks like we could return early if we find `step == 0` like PIL does, that is unfortunately not as + # easy due to our support for batched images. We can only return early if `(step == 0).all()` holds. If it doesn't, + # we have to go through the computation below anyway. Since `step == 0` is an edge case anyway, it makes no sense to + # pay the runtime cost for checking it every time. + valid_equalization = step.ne(0).unsqueeze_(-1) + + # `lut[k]` is computed with `cum_hist[k-1]` with `lut[0] == (step // 2) // step == 0`. Thus, we perform the + # computation only for `lut[1:]` with `cum_hist[:-1]` and add `lut[0] == 0` afterwards. + cum_hist = cum_hist[..., :-1] + ( + cum_hist.add_(step // 2) + # We need the `clamp_`(min=1) call here to avoid zero division since they fail for integer dtypes. This has no + # effect on the returned result of this kernel since images inside the batch with `step == 0` are returned as is + # instead of equalized version. + .div_(step.clamp_(min=1), rounding_mode="floor") + # We need the `clamp_` call here since PILs LUT computation scheme can produce values outside the valid value + # range of uint8 images + .clamp_(0, 255) + ) + lut = cum_hist.to(torch.uint8) + lut = torch.cat([lut.new_zeros(1).expand(batch_shape + (1,)), lut], dim=-1) + equalized_image = lut.gather(dim=-1, index=flat_image).view_as(image) + + output = torch.where(valid_equalization, equalized_image, image) + return convert_dtype_image_tensor(output, output_dtype) + + +equalize_image_pil = _FP.equalize + + +def equalize_video(video: torch.Tensor) -> torch.Tensor: + return equalize_image_tensor(video) + + +def equalize(inpt: datapoints._InputTypeJIT) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(equalize) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return equalize_image_tensor(inpt) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.equalize() + elif isinstance(inpt, PIL.Image.Image): + return equalize_image_pil(inpt) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) + + +def invert_image_tensor(image: torch.Tensor) -> torch.Tensor: + if image.is_floating_point(): + return 1.0 - image + elif image.dtype == torch.uint8: + return image.bitwise_not() + else: # signed integer dtypes + # We can't use `Tensor.bitwise_not` here, since we want to retain the leading zero bit that encodes the sign + return image.bitwise_xor((1 << _num_value_bits(image.dtype)) - 1) + + +invert_image_pil = _FP.invert + + +def invert_video(video: torch.Tensor) -> torch.Tensor: + return invert_image_tensor(video) + + +def invert(inpt: datapoints._InputTypeJIT) -> datapoints._InputTypeJIT: + if not torch.jit.is_scripting(): + _log_api_usage_once(invert) + + if torch.jit.is_scripting() or is_simple_tensor(inpt): + return invert_image_tensor(inpt) + elif isinstance(inpt, datapoints._datapoint.Datapoint): + return inpt.invert() + elif isinstance(inpt, PIL.Image.Image): + return invert_image_pil(inpt) + else: + raise TypeError( + f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, " + f"but got {type(inpt)} instead." + ) diff --git a/wemm/lib/python3.10/site-packages/torchvision/utils.py b/wemm/lib/python3.10/site-packages/torchvision/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ebaf82a46f8830a3cba2cc976b5fd89983d7b454 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/utils.py @@ -0,0 +1,577 @@ +import collections +import math +import pathlib +import warnings +from itertools import repeat +from types import FunctionType +from typing import Any, BinaryIO, List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL import Image, ImageColor, ImageDraw, ImageFont + +__all__ = [ + "make_grid", + "save_image", + "draw_bounding_boxes", + "draw_segmentation_masks", + "draw_keypoints", + "flow_to_image", +] + + +@torch.no_grad() +def make_grid( + tensor: Union[torch.Tensor, List[torch.Tensor]], + nrow: int = 8, + padding: int = 2, + normalize: bool = False, + value_range: Optional[Tuple[int, int]] = None, + scale_each: bool = False, + pad_value: float = 0.0, + **kwargs, +) -> torch.Tensor: + """ + Make a grid of images. + + Args: + tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) + or a list of images all of the same size. + nrow (int, optional): Number of images displayed in each row of the grid. + The final grid size is ``(B / nrow, nrow)``. Default: ``8``. + padding (int, optional): amount of padding. Default: ``2``. + normalize (bool, optional): If True, shift the image to the range (0, 1), + by the min and max values specified by ``value_range``. Default: ``False``. + value_range (tuple, optional): tuple (min, max) where min and max are numbers, + then these numbers are used to normalize the image. By default, min and max + are computed from the tensor. + scale_each (bool, optional): If ``True``, scale each image in the batch of + images separately rather than the (min, max) over all images. Default: ``False``. + pad_value (float, optional): Value for the padded pixels. Default: ``0``. + + Returns: + grid (Tensor): the tensor containing grid of images. + """ + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(make_grid) + if not torch.is_tensor(tensor): + if isinstance(tensor, list): + for t in tensor: + if not torch.is_tensor(t): + raise TypeError(f"tensor or list of tensors expected, got a list containing {type(t)}") + else: + raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}") + + # if list of tensors, convert to a 4D mini-batch Tensor + if isinstance(tensor, list): + tensor = torch.stack(tensor, dim=0) + + if tensor.dim() == 2: # single image H x W + tensor = tensor.unsqueeze(0) + if tensor.dim() == 3: # single image + if tensor.size(0) == 1: # if single-channel, convert to 3-channel + tensor = torch.cat((tensor, tensor, tensor), 0) + tensor = tensor.unsqueeze(0) + + if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images + tensor = torch.cat((tensor, tensor, tensor), 1) + + if normalize is True: + tensor = tensor.clone() # avoid modifying tensor in-place + if value_range is not None and not isinstance(value_range, tuple): + raise TypeError("value_range has to be a tuple (min, max) if specified. min and max are numbers") + + def norm_ip(img, low, high): + img.clamp_(min=low, max=high) + img.sub_(low).div_(max(high - low, 1e-5)) + + def norm_range(t, value_range): + if value_range is not None: + norm_ip(t, value_range[0], value_range[1]) + else: + norm_ip(t, float(t.min()), float(t.max())) + + if scale_each is True: + for t in tensor: # loop over mini-batch dimension + norm_range(t, value_range) + else: + norm_range(tensor, value_range) + + if not isinstance(tensor, torch.Tensor): + raise TypeError("tensor should be of type torch.Tensor") + if tensor.size(0) == 1: + return tensor.squeeze(0) + + # make the mini-batch of images into a grid + nmaps = tensor.size(0) + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) + num_channels = tensor.size(1) + grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value) + k = 0 + for y in range(ymaps): + for x in range(xmaps): + if k >= nmaps: + break + # Tensor.copy_() is a valid method but seems to be missing from the stubs + # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_ + grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined] + 2, x * width + padding, width - padding + ).copy_(tensor[k]) + k = k + 1 + return grid + + +@torch.no_grad() +def save_image( + tensor: Union[torch.Tensor, List[torch.Tensor]], + fp: Union[str, pathlib.Path, BinaryIO], + format: Optional[str] = None, + **kwargs, +) -> None: + """ + Save a given Tensor into an image file. + + Args: + tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, + saves the tensor as a grid of images by calling ``make_grid``. + fp (string or file object): A filename or a file object + format(Optional): If omitted, the format to use is determined from the filename extension. + If a file object was used instead of a filename, this parameter should always be used. + **kwargs: Other arguments are documented in ``make_grid``. + """ + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(save_image) + grid = make_grid(tensor, **kwargs) + # Add 0.5 after unnormalizing to [0, 255] to round to the nearest integer + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + im = Image.fromarray(ndarr) + im.save(fp, format=format) + + +@torch.no_grad() +def draw_bounding_boxes( + image: torch.Tensor, + boxes: torch.Tensor, + labels: Optional[List[str]] = None, + colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None, + fill: Optional[bool] = False, + width: int = 1, + font: Optional[str] = None, + font_size: Optional[int] = None, +) -> torch.Tensor: + + """ + Draws bounding boxes on given image. + The values of the input image should be uint8 between 0 and 255. + If fill is True, Resulting Tensor should be saved as PNG image. + + Args: + image (Tensor): Tensor of shape (C x H x W) and dtype uint8. + boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that + the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and + `0 <= ymin < ymax < H`. + labels (List[str]): List containing the labels of bounding boxes. + colors (color or list of colors, optional): List containing the colors + of the boxes or single color for all boxes. The color can be represented as + PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. + By default, random colors are generated for boxes. + fill (bool): If `True` fills the bounding box with specified color. + width (int): Width of bounding box. + font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may + also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`, + `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS. + font_size (int): The requested font size in points. + + Returns: + img (Tensor[C, H, W]): Image Tensor of dtype uint8 with bounding boxes plotted. + """ + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(draw_bounding_boxes) + if not isinstance(image, torch.Tensor): + raise TypeError(f"Tensor expected, got {type(image)}") + elif image.dtype != torch.uint8: + raise ValueError(f"Tensor uint8 expected, got {image.dtype}") + elif image.dim() != 3: + raise ValueError("Pass individual images, not batches") + elif image.size(0) not in {1, 3}: + raise ValueError("Only grayscale and RGB images are supported") + elif (boxes[:, 0] > boxes[:, 2]).any() or (boxes[:, 1] > boxes[:, 3]).any(): + raise ValueError( + "Boxes need to be in (xmin, ymin, xmax, ymax) format. Use torchvision.ops.box_convert to convert them" + ) + + num_boxes = boxes.shape[0] + + if num_boxes == 0: + warnings.warn("boxes doesn't contain any box. No box was drawn") + return image + + if labels is None: + labels: Union[List[str], List[None]] = [None] * num_boxes # type: ignore[no-redef] + elif len(labels) != num_boxes: + raise ValueError( + f"Number of boxes ({num_boxes}) and labels ({len(labels)}) mismatch. Please specify labels for each box." + ) + + if colors is None: + colors = _generate_color_palette(num_boxes) + elif isinstance(colors, list): + if len(colors) < num_boxes: + raise ValueError(f"Number of colors ({len(colors)}) is less than number of boxes ({num_boxes}). ") + else: # colors specifies a single color for all boxes + colors = [colors] * num_boxes + + colors = [(ImageColor.getrgb(color) if isinstance(color, str) else color) for color in colors] + + if font is None: + if font_size is not None: + warnings.warn("Argument 'font_size' will be ignored since 'font' is not set.") + txt_font = ImageFont.load_default() + else: + txt_font = ImageFont.truetype(font=font, size=font_size or 10) + + # Handle Grayscale images + if image.size(0) == 1: + image = torch.tile(image, (3, 1, 1)) + + ndarr = image.permute(1, 2, 0).cpu().numpy() + img_to_draw = Image.fromarray(ndarr) + img_boxes = boxes.to(torch.int64).tolist() + + if fill: + draw = ImageDraw.Draw(img_to_draw, "RGBA") + else: + draw = ImageDraw.Draw(img_to_draw) + + for bbox, color, label in zip(img_boxes, colors, labels): # type: ignore[arg-type] + if fill: + fill_color = color + (100,) + draw.rectangle(bbox, width=width, outline=color, fill=fill_color) + else: + draw.rectangle(bbox, width=width, outline=color) + + if label is not None: + margin = width + 1 + draw.text((bbox[0] + margin, bbox[1] + margin), label, fill=color, font=txt_font) + + return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8) + + +@torch.no_grad() +def draw_segmentation_masks( + image: torch.Tensor, + masks: torch.Tensor, + alpha: float = 0.8, + colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None, +) -> torch.Tensor: + + """ + Draws segmentation masks on given RGB image. + The values of the input image should be uint8 between 0 and 255. + + Args: + image (Tensor): Tensor of shape (3, H, W) and dtype uint8. + masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool. + alpha (float): Float number between 0 and 1 denoting the transparency of the masks. + 0 means full transparency, 1 means no transparency. + colors (color or list of colors, optional): List containing the colors + of the masks or single color for all masks. The color can be represented as + PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. + By default, random colors are generated for each mask. + + Returns: + img (Tensor[C, H, W]): Image Tensor, with segmentation masks drawn on top. + """ + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(draw_segmentation_masks) + if not isinstance(image, torch.Tensor): + raise TypeError(f"The image must be a tensor, got {type(image)}") + elif image.dtype != torch.uint8: + raise ValueError(f"The image dtype must be uint8, got {image.dtype}") + elif image.dim() != 3: + raise ValueError("Pass individual images, not batches") + elif image.size()[0] != 3: + raise ValueError("Pass an RGB image. Other Image formats are not supported") + if masks.ndim == 2: + masks = masks[None, :, :] + if masks.ndim != 3: + raise ValueError("masks must be of shape (H, W) or (batch_size, H, W)") + if masks.dtype != torch.bool: + raise ValueError(f"The masks must be of dtype bool. Got {masks.dtype}") + if masks.shape[-2:] != image.shape[-2:]: + raise ValueError("The image and the masks must have the same height and width") + + num_masks = masks.size()[0] + if colors is not None and num_masks > len(colors): + raise ValueError(f"There are more masks ({num_masks}) than colors ({len(colors)})") + + if num_masks == 0: + warnings.warn("masks doesn't contain any mask. No mask was drawn") + return image + + if colors is None: + colors = _generate_color_palette(num_masks) + + if not isinstance(colors, list): + colors = [colors] + if not isinstance(colors[0], (tuple, str)): + raise ValueError("colors must be a tuple or a string, or a list thereof") + if isinstance(colors[0], tuple) and len(colors[0]) != 3: + raise ValueError("It seems that you passed a tuple of colors instead of a list of colors") + + out_dtype = torch.uint8 + + colors_ = [] + for color in colors: + if isinstance(color, str): + color = ImageColor.getrgb(color) + colors_.append(torch.tensor(color, dtype=out_dtype)) + + img_to_draw = image.detach().clone() + # TODO: There might be a way to vectorize this + for mask, color in zip(masks, colors_): + img_to_draw[:, mask] = color[:, None] + + out = image * (1 - alpha) + img_to_draw * alpha + return out.to(out_dtype) + + +@torch.no_grad() +def draw_keypoints( + image: torch.Tensor, + keypoints: torch.Tensor, + connectivity: Optional[List[Tuple[int, int]]] = None, + colors: Optional[Union[str, Tuple[int, int, int]]] = None, + radius: int = 2, + width: int = 3, +) -> torch.Tensor: + + """ + Draws Keypoints on given RGB image. + The values of the input image should be uint8 between 0 and 255. + + Args: + image (Tensor): Tensor of shape (3, H, W) and dtype uint8. + keypoints (Tensor): Tensor of shape (num_instances, K, 2) the K keypoints location for each of the N instances, + in the format [x, y]. + connectivity (List[Tuple[int, int]]]): A List of tuple where, + each tuple contains pair of keypoints to be connected. + colors (str, Tuple): The color can be represented as + PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. + radius (int): Integer denoting radius of keypoint. + width (int): Integer denoting width of line connecting keypoints. + + Returns: + img (Tensor[C, H, W]): Image Tensor of dtype uint8 with keypoints drawn. + """ + + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(draw_keypoints) + if not isinstance(image, torch.Tensor): + raise TypeError(f"The image must be a tensor, got {type(image)}") + elif image.dtype != torch.uint8: + raise ValueError(f"The image dtype must be uint8, got {image.dtype}") + elif image.dim() != 3: + raise ValueError("Pass individual images, not batches") + elif image.size()[0] != 3: + raise ValueError("Pass an RGB image. Other Image formats are not supported") + + if keypoints.ndim != 3: + raise ValueError("keypoints must be of shape (num_instances, K, 2)") + + ndarr = image.permute(1, 2, 0).cpu().numpy() + img_to_draw = Image.fromarray(ndarr) + draw = ImageDraw.Draw(img_to_draw) + img_kpts = keypoints.to(torch.int64).tolist() + + for kpt_id, kpt_inst in enumerate(img_kpts): + for inst_id, kpt in enumerate(kpt_inst): + x1 = kpt[0] - radius + x2 = kpt[0] + radius + y1 = kpt[1] - radius + y2 = kpt[1] + radius + draw.ellipse([x1, y1, x2, y2], fill=colors, outline=None, width=0) + + if connectivity: + for connection in connectivity: + start_pt_x = kpt_inst[connection[0]][0] + start_pt_y = kpt_inst[connection[0]][1] + + end_pt_x = kpt_inst[connection[1]][0] + end_pt_y = kpt_inst[connection[1]][1] + + draw.line( + ((start_pt_x, start_pt_y), (end_pt_x, end_pt_y)), + width=width, + ) + + return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8) + + +# Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization +@torch.no_grad() +def flow_to_image(flow: torch.Tensor) -> torch.Tensor: + + """ + Converts a flow to an RGB image. + + Args: + flow (Tensor): Flow of shape (N, 2, H, W) or (2, H, W) and dtype torch.float. + + Returns: + img (Tensor): Image Tensor of dtype uint8 where each color corresponds + to a given flow direction. Shape is (N, 3, H, W) or (3, H, W) depending on the input. + """ + + if flow.dtype != torch.float: + raise ValueError(f"Flow should be of dtype torch.float, got {flow.dtype}.") + + orig_shape = flow.shape + if flow.ndim == 3: + flow = flow[None] # Add batch dim + + if flow.ndim != 4 or flow.shape[1] != 2: + raise ValueError(f"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.") + + max_norm = torch.sum(flow**2, dim=1).sqrt().max() + epsilon = torch.finfo((flow).dtype).eps + normalized_flow = flow / (max_norm + epsilon) + img = _normalized_flow_to_image(normalized_flow) + + if len(orig_shape) == 3: + img = img[0] # Remove batch dim + return img + + +@torch.no_grad() +def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor: + + """ + Converts a batch of normalized flow to an RGB image. + + Args: + normalized_flow (torch.Tensor): Normalized flow tensor of shape (N, 2, H, W) + Returns: + img (Tensor(N, 3, H, W)): Flow visualization image of dtype uint8. + """ + + N, _, H, W = normalized_flow.shape + device = normalized_flow.device + flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device) + colorwheel = _make_colorwheel().to(device) # shape [55x3] + num_cols = colorwheel.shape[0] + norm = torch.sum(normalized_flow**2, dim=1).sqrt() + a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi + fk = (a + 1) / 2 * (num_cols - 1) + k0 = torch.floor(fk).to(torch.long) + k1 = k0 + 1 + k1[k1 == num_cols] = 0 + f = fk - k0 + + for c in range(colorwheel.shape[1]): + tmp = colorwheel[:, c] + col0 = tmp[k0] / 255.0 + col1 = tmp[k1] / 255.0 + col = (1 - f) * col0 + f * col1 + col = 1 - norm * (1 - col) + flow_image[:, c, :, :] = torch.floor(255 * col) + return flow_image + + +def _make_colorwheel() -> torch.Tensor: + """ + Generates a color wheel for optical flow visualization as presented in: + Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) + URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf. + + Returns: + colorwheel (Tensor[55, 3]): Colorwheel Tensor. + """ + + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + colorwheel = torch.zeros((ncols, 3)) + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = torch.floor(255 * torch.arange(0, RY) / RY) + col = col + RY + # YG + colorwheel[col : col + YG, 0] = 255 - torch.floor(255 * torch.arange(0, YG) / YG) + colorwheel[col : col + YG, 1] = 255 + col = col + YG + # GC + colorwheel[col : col + GC, 1] = 255 + colorwheel[col : col + GC, 2] = torch.floor(255 * torch.arange(0, GC) / GC) + col = col + GC + # CB + colorwheel[col : col + CB, 1] = 255 - torch.floor(255 * torch.arange(CB) / CB) + colorwheel[col : col + CB, 2] = 255 + col = col + CB + # BM + colorwheel[col : col + BM, 2] = 255 + colorwheel[col : col + BM, 0] = torch.floor(255 * torch.arange(0, BM) / BM) + col = col + BM + # MR + colorwheel[col : col + MR, 2] = 255 - torch.floor(255 * torch.arange(MR) / MR) + colorwheel[col : col + MR, 0] = 255 + return colorwheel + + +def _generate_color_palette(num_objects: int): + palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1]) + return [tuple((i * palette) % 255) for i in range(num_objects)] + + +def _log_api_usage_once(obj: Any) -> None: + + """ + Logs API usage(module and name) within an organization. + In a large ecosystem, it's often useful to track the PyTorch and + TorchVision APIs usage. This API provides the similar functionality to the + logging module in the Python stdlib. It can be used for debugging purpose + to log which methods are used and by default it is inactive, unless the user + manually subscribes a logger via the `SetAPIUsageLogger method `_. + Please note it is triggered only once for the same API call within a process. + It does not collect any data from open-source users since it is no-op by default. + For more information, please refer to + * PyTorch note: https://pytorch.org/docs/stable/notes/large_scale_deployments.html#api-usage-logging; + * Logging policy: https://github.com/pytorch/vision/issues/5052; + + Args: + obj (class instance or method): an object to extract info from. + """ + module = obj.__module__ + if not module.startswith("torchvision"): + module = f"torchvision.internal.{module}" + name = obj.__class__.__name__ + if isinstance(obj, FunctionType): + name = obj.__name__ + torch._C._log_api_usage_once(f"{module}.{name}") + + +def _make_ntuple(x: Any, n: int) -> Tuple[Any, ...]: + """ + Make n-tuple from input x. If x is an iterable, then we just convert it to tuple. + Otherwise, we will make a tuple of length n, all with value of x. + reference: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/utils.py#L8 + + Args: + x (Any): input value + n (int): length of the resulting tuple + """ + if isinstance(x, collections.abc.Iterable): + return tuple(x) + return tuple(repeat(x, n)) diff --git a/wemm/lib/python3.10/site-packages/torchvision/version.py b/wemm/lib/python3.10/site-packages/torchvision/version.py new file mode 100644 index 0000000000000000000000000000000000000000..5e82f34125554a916713eef3ccb88b484aea8395 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/version.py @@ -0,0 +1,5 @@ +__version__ = '0.15.1+cu118' +git_version = '42759b1cc82bed60481c2802811595833e2ddd9b' +from torchvision.extension import _check_cuda_version +if _check_cuda_version() > 0: + cuda = _check_cuda_version()