diff --git a/wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2018-06-18/endpoint-rule-set-1.json.gz b/wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2018-06-18/endpoint-rule-set-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..815953a27a5fb1b600e2ee9f71f18c4e5fa6594e --- /dev/null +++ b/wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2018-06-18/endpoint-rule-set-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e91ddb21316f7400642cbf0078ae107bdae9b6daf96f89c9e74ca89c2c63dedd +size 1839 diff --git a/wemm/lib/python3.10/site-packages/botocore/data/fsx/2018-03-01/endpoint-rule-set-1.json.gz b/wemm/lib/python3.10/site-packages/botocore/data/fsx/2018-03-01/endpoint-rule-set-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..6fe9aa636425b3cc4b0e442bae5e524bae828de5 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/botocore/data/fsx/2018-03-01/endpoint-rule-set-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab077c76d6b89219c42c47f5f8085f8fdd7bc8ad3de06eb9275fcd8f9730c7f7 +size 1287 diff --git a/wemm/lib/python3.10/site-packages/botocore/data/macie2/2020-01-01/endpoint-rule-set-1.json.gz b/wemm/lib/python3.10/site-packages/botocore/data/macie2/2020-01-01/endpoint-rule-set-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..5e7c3cdbc3eed34dfc2fb701baea5de43b04521b --- /dev/null +++ b/wemm/lib/python3.10/site-packages/botocore/data/macie2/2020-01-01/endpoint-rule-set-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9f335150b42d830a5467ceeecd7d2e1fb89a6435c22006420913b80ff01f621 +size 1289 diff --git a/wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc b/wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c272d636e7dbaf8e6be33700f03f60e548dff08 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc b/wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad11d1e44402234dfa812f551e99f6ed15c8e95f Binary files /dev/null and b/wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06cb3c65e00d8c6048e29371782d6d5c468a7666 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/idna/py.typed b/wemm/lib/python3.10/site-packages/idna/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/wemm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8490fb6b2f9075327e7f621d4e6bb1752e3b11e Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/__pycache__/utils.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9783ebff550193d1b876caaad9be39964468bb56 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/__pycache__/utils.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c26c137185e56050b77922f43b173f4588df6e3 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__init__.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d3357e3dc23c17ac09b941efec9a13c4f862d3c --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/__init__.py @@ -0,0 +1,145 @@ +from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel +from ._stereo_matching import ( + CarlaStereo, + CREStereo, + ETH3DStereo, + FallingThingsStereo, + InStereo2k, + Kitti2012Stereo, + Kitti2015Stereo, + Middlebury2014Stereo, + SceneFlowStereo, + SintelStereo, +) +from .caltech import Caltech101, Caltech256 +from .celeba import CelebA +from .cifar import CIFAR10, CIFAR100 +from .cityscapes import Cityscapes +from .clevr import CLEVRClassification +from .coco import CocoCaptions, CocoDetection +from .country211 import Country211 +from .dtd import DTD +from .eurosat import EuroSAT +from .fakedata import FakeData +from .fer2013 import FER2013 +from .fgvc_aircraft import FGVCAircraft +from .flickr import Flickr30k, Flickr8k +from .flowers102 import Flowers102 +from .folder import DatasetFolder, ImageFolder +from .food101 import Food101 +from .gtsrb import GTSRB +from .hmdb51 import HMDB51 +from .imagenet import ImageNet +from .inaturalist import INaturalist +from .kinetics import Kinetics +from .kitti import Kitti +from .lfw import LFWPairs, LFWPeople +from .lsun import LSUN, LSUNClass +from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST +from .moving_mnist import MovingMNIST +from .omniglot import Omniglot +from .oxford_iiit_pet import OxfordIIITPet +from .pcam import PCAM +from .phototour import PhotoTour +from .places365 import Places365 +from .rendered_sst2 import RenderedSST2 +from .sbd import SBDataset +from .sbu import SBU +from .semeion import SEMEION +from .stanford_cars import StanfordCars +from .stl10 import STL10 +from .sun397 import SUN397 +from .svhn import SVHN +from .ucf101 import UCF101 +from .usps import USPS +from .vision import VisionDataset +from .voc import VOCDetection, VOCSegmentation +from .widerface import WIDERFace + +__all__ = ( + "LSUN", + "LSUNClass", + "ImageFolder", + "DatasetFolder", + "FakeData", + "CocoCaptions", + "CocoDetection", + "CIFAR10", + "CIFAR100", + "EMNIST", + "FashionMNIST", + "QMNIST", + "MNIST", + "KMNIST", + "StanfordCars", + "STL10", + "SUN397", + "SVHN", + "PhotoTour", + "SEMEION", + "Omniglot", + "SBU", + "Flickr8k", + "Flickr30k", + "Flowers102", + "VOCSegmentation", + "VOCDetection", + "Cityscapes", + "ImageNet", + "Caltech101", + "Caltech256", + "CelebA", + "WIDERFace", + "SBDataset", + "VisionDataset", + "USPS", + "Kinetics", + "HMDB51", + "UCF101", + "Places365", + "Kitti", + "INaturalist", + "LFWPeople", + "LFWPairs", + "KittiFlow", + "Sintel", + "FlyingChairs", + "FlyingThings3D", + "HD1K", + "Food101", + "DTD", + "FER2013", + "GTSRB", + "CLEVRClassification", + "OxfordIIITPet", + "PCAM", + "Country211", + "FGVCAircraft", + "EuroSAT", + "RenderedSST2", + "Kitti2012Stereo", + "Kitti2015Stereo", + "CarlaStereo", + "Middlebury2014Stereo", + "CREStereo", + "FallingThingsStereo", + "SceneFlowStereo", + "SintelStereo", + "InStereo2k", + "ETH3DStereo", +) + + +# We override current module's attributes to handle the import: +# from torchvision.datasets import wrap_dataset_for_transforms_v2 +# with beta state v2 warning from torchvision.datapoints +# We also want to avoid raising the warning when importing other attributes +# from torchvision.datasets +# Ref: https://peps.python.org/pep-0562/ +def __getattr__(name): + if name in ("wrap_dataset_for_transforms_v2",): + from torchvision.datapoints._dataset_wrapper import wrap_dataset_for_transforms_v2 + + return wrap_dataset_for_transforms_v2 + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/_optical_flow.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/_optical_flow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f1720b161b80f972e32ede4bc248e43b04147e1 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/_optical_flow.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/celeba.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/celeba.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7487cade839bc9b9c71390e0a48d79933c16ff55 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/celeba.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/flowers102.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/flowers102.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9568dede26ec730931b95ac7c8bd7e41fe60f350 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/flowers102.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/inaturalist.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/inaturalist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67e8816e3e5350fbfe76a6fd69d0f4eb873d351a Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/inaturalist.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/pcam.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/pcam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3286552fee708f62e514181ad32fb4fc35a6cc3 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/pcam.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sbd.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sbd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..854ee9a43035c9af879bcba8e880cd43c5c62102 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sbd.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sun397.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sun397.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b521620c8af41fbba57c0c1fdac24fce61b59a1a Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sun397.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/widerface.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/widerface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42e01f1d33cb9ea3396d6b481cb0f396ee9ea4ab Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/widerface.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/celeba.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/celeba.py new file mode 100644 index 0000000000000000000000000000000000000000..d055f92f1944a4d106ce9c40c3074b6af308166f --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/celeba.py @@ -0,0 +1,189 @@ +import csv +import os +from collections import namedtuple +from typing import Any, Callable, List, Optional, Tuple, Union + +import PIL +import torch + +from .utils import check_integrity, download_file_from_google_drive, extract_archive, verify_str_arg +from .vision import VisionDataset + +CSV = namedtuple("CSV", ["header", "index", "data"]) + + +class CelebA(VisionDataset): + """`Large-scale CelebFaces Attributes (CelebA) Dataset `_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + split (string): One of {'train', 'valid', 'test', 'all'}. + Accordingly dataset is selected. + target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``, + or ``landmarks``. Can also be a list to output a tuple with all specified target types. + The targets represent: + + - ``attr`` (Tensor shape=(40,) dtype=int): binary (0, 1) labels for attributes + - ``identity`` (int): label for each person (data points with the same identity are the same person) + - ``bbox`` (Tensor shape=(4,) dtype=int): bounding box (x, y, width, height) + - ``landmarks`` (Tensor shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x, + righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y) + + Defaults to ``attr``. If empty, ``None`` will be returned as target. + + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.PILToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + base_folder = "celeba" + # There currently does not appear to be an easy way to extract 7z in python (without introducing additional + # dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available + # right now. + file_list = [ + # File ID MD5 Hash Filename + ("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"), + # ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc","b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"), + # ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"), + ("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"), + ("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"), + ("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"), + ("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"), + # ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"), + ("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"), + ] + + def __init__( + self, + root: str, + split: str = "train", + target_type: Union[List[str], str] = "attr", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + self.split = split + if isinstance(target_type, list): + self.target_type = target_type + else: + self.target_type = [target_type] + + if not self.target_type and self.target_transform is not None: + raise RuntimeError("target_transform is specified but target_type is empty") + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") + + split_map = { + "train": 0, + "valid": 1, + "test": 2, + "all": None, + } + split_ = split_map[verify_str_arg(split.lower(), "split", ("train", "valid", "test", "all"))] + splits = self._load_csv("list_eval_partition.txt") + identity = self._load_csv("identity_CelebA.txt") + bbox = self._load_csv("list_bbox_celeba.txt", header=1) + landmarks_align = self._load_csv("list_landmarks_align_celeba.txt", header=1) + attr = self._load_csv("list_attr_celeba.txt", header=1) + + mask = slice(None) if split_ is None else (splits.data == split_).squeeze() + + if mask == slice(None): # if split == "all" + self.filename = splits.index + else: + self.filename = [splits.index[i] for i in torch.squeeze(torch.nonzero(mask))] + self.identity = identity.data[mask] + self.bbox = bbox.data[mask] + self.landmarks_align = landmarks_align.data[mask] + self.attr = attr.data[mask] + # map from {-1, 1} to {0, 1} + self.attr = torch.div(self.attr + 1, 2, rounding_mode="floor") + self.attr_names = attr.header + + def _load_csv( + self, + filename: str, + header: Optional[int] = None, + ) -> CSV: + with open(os.path.join(self.root, self.base_folder, filename)) as csv_file: + data = list(csv.reader(csv_file, delimiter=" ", skipinitialspace=True)) + + if header is not None: + headers = data[header] + data = data[header + 1 :] + else: + headers = [] + + indices = [row[0] for row in data] + data = [row[1:] for row in data] + data_int = [list(map(int, i)) for i in data] + + return CSV(headers, indices, torch.tensor(data_int)) + + def _check_integrity(self) -> bool: + for (_, md5, filename) in self.file_list: + fpath = os.path.join(self.root, self.base_folder, filename) + _, ext = os.path.splitext(filename) + # Allow original archive to be deleted (zip and 7z) + # Only need the extracted images + if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5): + return False + + # Should check a hash of the images + return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba")) + + def download(self) -> None: + if self._check_integrity(): + print("Files already downloaded and verified") + return + + for (file_id, md5, filename) in self.file_list: + download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5) + + extract_archive(os.path.join(self.root, self.base_folder, "img_align_celeba.zip")) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index])) + + target: Any = [] + for t in self.target_type: + if t == "attr": + target.append(self.attr[index, :]) + elif t == "identity": + target.append(self.identity[index, 0]) + elif t == "bbox": + target.append(self.bbox[index, :]) + elif t == "landmarks": + target.append(self.landmarks_align[index, :]) + else: + # TODO: refactor with utils.verify_str_arg + raise ValueError(f'Target type "{t}" is not recognized.') + + if self.transform is not None: + X = self.transform(X) + + if target: + target = tuple(target) if len(target) > 1 else target[0] + + if self.target_transform is not None: + target = self.target_transform(target) + else: + target = None + + return X, target + + def __len__(self) -> int: + return len(self.attr) + + def extra_repr(self) -> str: + lines = ["Target type: {target_type}", "Split: {split}"] + return "\n".join(lines).format(**self.__dict__) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/country211.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/country211.py new file mode 100644 index 0000000000000000000000000000000000000000..59598fd44e26098d476103c09a703d2d37cd9857 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/country211.py @@ -0,0 +1,58 @@ +from pathlib import Path +from typing import Callable, Optional + +from .folder import ImageFolder +from .utils import download_and_extract_archive, verify_str_arg + + +class Country211(ImageFolder): + """`The Country211 Data Set `_ from OpenAI. + + This dataset was built by filtering the images from the YFCC100m dataset + that have GPS coordinate corresponding to a ISO-3166 country code. The + dataset is balanced by sampling 150 train images, 50 validation images, and + 100 test images for each country. + + Args: + root (string): Root directory of the dataset. + split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``. + transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop``. + target_transform (callable, optional): A function/transform that takes in the target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and puts it into + ``root/country211/``. If dataset is already downloaded, it is not downloaded again. + """ + + _URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz" + _MD5 = "84988d7644798601126c29e9877aab6a" + + def __init__( + self, + root: str, + split: str = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + self._split = verify_str_arg(split, "split", ("train", "valid", "test")) + + root = Path(root).expanduser() + self.root = str(root) + self._base_folder = root / "country211" + + if download: + self._download() + + if not self._check_exists(): + raise RuntimeError("Dataset not found. You can use download=True to download it") + + super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform) + self.root = str(root) + + def _check_exists(self) -> bool: + return self._base_folder.exists() and self._base_folder.is_dir() + + def _download(self) -> None: + if self._check_exists(): + return + download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/eurosat.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/eurosat.py new file mode 100644 index 0000000000000000000000000000000000000000..bec6df5312d3e8d8958b4aa3b6b2a9c828baa657 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/eurosat.py @@ -0,0 +1,58 @@ +import os +from typing import Callable, Optional + +from .folder import ImageFolder +from .utils import download_and_extract_archive + + +class EuroSAT(ImageFolder): + """RGB version of the `EuroSAT `_ Dataset. + + Args: + root (string): Root directory of dataset where ``root/eurosat`` exists. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. Default is False. + """ + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + self.root = os.path.expanduser(root) + self._base_folder = os.path.join(self.root, "eurosat") + self._data_folder = os.path.join(self._base_folder, "2750") + + if download: + self.download() + + if not self._check_exists(): + raise RuntimeError("Dataset not found. You can use download=True to download it") + + super().__init__(self._data_folder, transform=transform, target_transform=target_transform) + self.root = os.path.expanduser(root) + + def __len__(self) -> int: + return len(self.samples) + + def _check_exists(self) -> bool: + return os.path.exists(self._data_folder) + + def download(self) -> None: + + if self._check_exists(): + return + + os.makedirs(self._base_folder, exist_ok=True) + download_and_extract_archive( + "https://madm.dfki.de/files/sentinel/EuroSAT.zip", + download_root=self._base_folder, + md5="c8fa014336c82ac7804f0398fcb19387", + ) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/fer2013.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/fer2013.py new file mode 100644 index 0000000000000000000000000000000000000000..bcd20c1e4a2dbe27b8b592cf7336c8e3e18154ee --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/fer2013.py @@ -0,0 +1,75 @@ +import csv +import pathlib +from typing import Any, Callable, Optional, Tuple + +import torch +from PIL import Image + +from .utils import check_integrity, verify_str_arg +from .vision import VisionDataset + + +class FER2013(VisionDataset): + """`FER2013 + `_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``root/fer2013`` exists. + split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``. + transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the target and transforms it. + """ + + _RESOURCES = { + "train": ("train.csv", "3f0dfb3d3fd99c811a1299cb947e3131"), + "test": ("test.csv", "b02c2298636a634e8c2faabbf3ea9a23"), + } + + def __init__( + self, + root: str, + split: str = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + ) -> None: + self._split = verify_str_arg(split, "split", self._RESOURCES.keys()) + super().__init__(root, transform=transform, target_transform=target_transform) + + base_folder = pathlib.Path(self.root) / "fer2013" + file_name, md5 = self._RESOURCES[self._split] + data_file = base_folder / file_name + if not check_integrity(str(data_file), md5=md5): + raise RuntimeError( + f"{file_name} not found in {base_folder} or corrupted. " + f"You can download it from " + f"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge" + ) + + with open(data_file, "r", newline="") as file: + self._samples = [ + ( + torch.tensor([int(idx) for idx in row["pixels"].split()], dtype=torch.uint8).reshape(48, 48), + int(row["emotion"]) if "emotion" in row else None, + ) + for row in csv.DictReader(file) + ] + + def __len__(self) -> int: + return len(self._samples) + + def __getitem__(self, idx: int) -> Tuple[Any, Any]: + image_tensor, target = self._samples[idx] + image = Image.fromarray(image_tensor.numpy()) + + if self.transform is not None: + image = self.transform(image) + + if self.target_transform is not None: + target = self.target_transform(target) + + return image, target + + def extra_repr(self) -> str: + return f"split={self._split}" diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/fgvc_aircraft.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/fgvc_aircraft.py new file mode 100644 index 0000000000000000000000000000000000000000..aa705b305d80b37be71b8577575a038dc6c82e7f --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/fgvc_aircraft.py @@ -0,0 +1,114 @@ +from __future__ import annotations + +import os +from typing import Any, Callable, Optional, Tuple + +import PIL.Image + +from .utils import download_and_extract_archive, verify_str_arg +from .vision import VisionDataset + + +class FGVCAircraft(VisionDataset): + """`FGVC Aircraft `_ Dataset. + + The dataset contains 10,000 images of aircraft, with 100 images for each of 100 + different aircraft model variants, most of which are airplanes. + Aircraft models are organized in a three-levels hierarchy. The three levels, from + finer to coarser, are: + + - ``variant``, e.g. Boeing 737-700. A variant collapses all the models that are visually + indistinguishable into one class. The dataset comprises 100 different variants. + - ``family``, e.g. Boeing 737. The dataset comprises 70 different families. + - ``manufacturer``, e.g. Boeing. The dataset comprises 30 different manufacturers. + + Args: + root (string): Root directory of the FGVC Aircraft dataset. + split (string, optional): The dataset split, supports ``train``, ``val``, + ``trainval`` and ``test``. + annotation_level (str, optional): The annotation level, supports ``variant``, + ``family`` and ``manufacturer``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + _URL = "https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz" + + def __init__( + self, + root: str, + split: str = "trainval", + annotation_level: str = "variant", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + self._split = verify_str_arg(split, "split", ("train", "val", "trainval", "test")) + self._annotation_level = verify_str_arg( + annotation_level, "annotation_level", ("variant", "family", "manufacturer") + ) + + self._data_path = os.path.join(self.root, "fgvc-aircraft-2013b") + if download: + self._download() + + if not self._check_exists(): + raise RuntimeError("Dataset not found. You can use download=True to download it") + + annotation_file = os.path.join( + self._data_path, + "data", + { + "variant": "variants.txt", + "family": "families.txt", + "manufacturer": "manufacturers.txt", + }[self._annotation_level], + ) + with open(annotation_file, "r") as f: + self.classes = [line.strip() for line in f] + + self.class_to_idx = dict(zip(self.classes, range(len(self.classes)))) + + image_data_folder = os.path.join(self._data_path, "data", "images") + labels_file = os.path.join(self._data_path, "data", f"images_{self._annotation_level}_{self._split}.txt") + + self._image_files = [] + self._labels = [] + + with open(labels_file, "r") as f: + for line in f: + image_name, label_name = line.strip().split(" ", 1) + self._image_files.append(os.path.join(image_data_folder, f"{image_name}.jpg")) + self._labels.append(self.class_to_idx[label_name]) + + def __len__(self) -> int: + return len(self._image_files) + + def __getitem__(self, idx: int) -> Tuple[Any, Any]: + image_file, label = self._image_files[idx], self._labels[idx] + image = PIL.Image.open(image_file).convert("RGB") + + if self.transform: + image = self.transform(image) + + if self.target_transform: + label = self.target_transform(label) + + return image, label + + def _download(self) -> None: + """ + Download the FGVC Aircraft dataset archive and extract it under root. + """ + if self._check_exists(): + return + download_and_extract_archive(self._URL, self.root) + + def _check_exists(self) -> bool: + return os.path.exists(self._data_path) and os.path.isdir(self._data_path) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/folder.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/folder.py new file mode 100644 index 0000000000000000000000000000000000000000..40d5e26d2427e34cee1e96c3b3d85bddf77d5c16 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/folder.py @@ -0,0 +1,317 @@ +import os +import os.path +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union + +from PIL import Image + +from .vision import VisionDataset + + +def has_file_allowed_extension(filename: str, extensions: Union[str, Tuple[str, ...]]) -> bool: + """Checks if a file is an allowed extension. + + Args: + filename (string): path to a file + extensions (tuple of strings): extensions to consider (lowercase) + + Returns: + bool: True if the filename ends with one of given extensions + """ + return filename.lower().endswith(extensions if isinstance(extensions, str) else tuple(extensions)) + + +def is_image_file(filename: str) -> bool: + """Checks if a file is an allowed image extension. + + Args: + filename (string): path to a file + + Returns: + bool: True if the filename ends with a known image extension + """ + return has_file_allowed_extension(filename, IMG_EXTENSIONS) + + +def find_classes(directory: str) -> Tuple[List[str], Dict[str, int]]: + """Finds the class folders in a dataset. + + See :class:`DatasetFolder` for details. + """ + classes = sorted(entry.name for entry in os.scandir(directory) if entry.is_dir()) + if not classes: + raise FileNotFoundError(f"Couldn't find any class folder in {directory}.") + + class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)} + return classes, class_to_idx + + +def make_dataset( + directory: str, + class_to_idx: Optional[Dict[str, int]] = None, + extensions: Optional[Union[str, Tuple[str, ...]]] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, +) -> List[Tuple[str, int]]: + """Generates a list of samples of a form (path_to_sample, class). + + See :class:`DatasetFolder` for details. + + Note: The class_to_idx parameter is here optional and will use the logic of the ``find_classes`` function + by default. + """ + directory = os.path.expanduser(directory) + + if class_to_idx is None: + _, class_to_idx = find_classes(directory) + elif not class_to_idx: + raise ValueError("'class_to_index' must have at least one entry to collect any samples.") + + both_none = extensions is None and is_valid_file is None + both_something = extensions is not None and is_valid_file is not None + if both_none or both_something: + raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") + + if extensions is not None: + + def is_valid_file(x: str) -> bool: + return has_file_allowed_extension(x, extensions) # type: ignore[arg-type] + + is_valid_file = cast(Callable[[str], bool], is_valid_file) + + instances = [] + available_classes = set() + for target_class in sorted(class_to_idx.keys()): + class_index = class_to_idx[target_class] + target_dir = os.path.join(directory, target_class) + if not os.path.isdir(target_dir): + continue + for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)): + for fname in sorted(fnames): + path = os.path.join(root, fname) + if is_valid_file(path): + item = path, class_index + instances.append(item) + + if target_class not in available_classes: + available_classes.add(target_class) + + empty_classes = set(class_to_idx.keys()) - available_classes + if empty_classes: + msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. " + if extensions is not None: + msg += f"Supported extensions are: {extensions if isinstance(extensions, str) else ', '.join(extensions)}" + raise FileNotFoundError(msg) + + return instances + + +class DatasetFolder(VisionDataset): + """A generic data loader. + + This default directory structure can be customized by overriding the + :meth:`find_classes` method. + + Args: + root (string): Root directory path. + loader (callable): A function to load a sample given its path. + extensions (tuple[string]): A list of allowed extensions. + both extensions and is_valid_file should not be passed. + transform (callable, optional): A function/transform that takes in + a sample and returns a transformed version. + E.g, ``transforms.RandomCrop`` for images. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + is_valid_file (callable, optional): A function that takes path of a file + and check if the file is a valid file (used to check of corrupt files) + both extensions and is_valid_file should not be passed. + + Attributes: + classes (list): List of the class names sorted alphabetically. + class_to_idx (dict): Dict with items (class_name, class_index). + samples (list): List of (sample path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__( + self, + root: str, + loader: Callable[[str], Any], + extensions: Optional[Tuple[str, ...]] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + classes, class_to_idx = self.find_classes(self.root) + samples = self.make_dataset(self.root, class_to_idx, extensions, is_valid_file) + + self.loader = loader + self.extensions = extensions + + self.classes = classes + self.class_to_idx = class_to_idx + self.samples = samples + self.targets = [s[1] for s in samples] + + @staticmethod + def make_dataset( + directory: str, + class_to_idx: Dict[str, int], + extensions: Optional[Tuple[str, ...]] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, + ) -> List[Tuple[str, int]]: + """Generates a list of samples of a form (path_to_sample, class). + + This can be overridden to e.g. read files from a compressed zip file instead of from the disk. + + Args: + directory (str): root dataset directory, corresponding to ``self.root``. + class_to_idx (Dict[str, int]): Dictionary mapping class name to class index. + extensions (optional): A list of allowed extensions. + Either extensions or is_valid_file should be passed. Defaults to None. + is_valid_file (optional): A function that takes path of a file + and checks if the file is a valid file + (used to check of corrupt files) both extensions and + is_valid_file should not be passed. Defaults to None. + + Raises: + ValueError: In case ``class_to_idx`` is empty. + ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None. + FileNotFoundError: In case no valid file was found for any class. + + Returns: + List[Tuple[str, int]]: samples of a form (path_to_sample, class) + """ + if class_to_idx is None: + # prevent potential bug since make_dataset() would use the class_to_idx logic of the + # find_classes() function, instead of using that of the find_classes() method, which + # is potentially overridden and thus could have a different logic. + raise ValueError("The class_to_idx parameter cannot be None.") + return make_dataset(directory, class_to_idx, extensions=extensions, is_valid_file=is_valid_file) + + def find_classes(self, directory: str) -> Tuple[List[str], Dict[str, int]]: + """Find the class folders in a dataset structured as follows:: + + directory/ + ├── class_x + │ ├── xxx.ext + │ ├── xxy.ext + │ └── ... + │ └── xxz.ext + └── class_y + ├── 123.ext + ├── nsdf3.ext + └── ... + └── asd932_.ext + + This method can be overridden to only consider + a subset of classes, or to adapt to a different dataset directory structure. + + Args: + directory(str): Root directory path, corresponding to ``self.root`` + + Raises: + FileNotFoundError: If ``dir`` has no class folders. + + Returns: + (Tuple[List[str], Dict[str, int]]): List of all classes and dictionary mapping each class to an index. + """ + return find_classes(directory) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (sample, target) where target is class_index of the target class. + """ + path, target = self.samples[index] + sample = self.loader(path) + if self.transform is not None: + sample = self.transform(sample) + if self.target_transform is not None: + target = self.target_transform(target) + + return sample, target + + def __len__(self) -> int: + return len(self.samples) + + +IMG_EXTENSIONS = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp") + + +def pil_loader(path: str) -> Image.Image: + # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) + with open(path, "rb") as f: + img = Image.open(f) + return img.convert("RGB") + + +# TODO: specify the return type +def accimage_loader(path: str) -> Any: + import accimage + + try: + return accimage.Image(path) + except OSError: + # Potentially a decoding problem, fall back to PIL.Image + return pil_loader(path) + + +def default_loader(path: str) -> Any: + from torchvision import get_image_backend + + if get_image_backend() == "accimage": + return accimage_loader(path) + else: + return pil_loader(path) + + +class ImageFolder(DatasetFolder): + """A generic data loader where the images are arranged in this way by default: :: + + root/dog/xxx.png + root/dog/xxy.png + root/dog/[...]/xxz.png + + root/cat/123.png + root/cat/nsdf3.png + root/cat/[...]/asd932_.png + + This class inherits from :class:`~torchvision.datasets.DatasetFolder` so + the same methods can be overridden to customize the dataset. + + Args: + root (string): Root directory path. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + is_valid_file (callable, optional): A function that takes path of an Image file + and check if the file is a valid file (used to check of corrupt files) + + Attributes: + classes (list): List of the class names sorted alphabetically. + class_to_idx (dict): Dict with items (class_name, class_index). + imgs (list): List of (image path, class_index) tuples + """ + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + loader: Callable[[str], Any] = default_loader, + is_valid_file: Optional[Callable[[str], bool]] = None, + ): + super().__init__( + root, + loader, + IMG_EXTENSIONS if is_valid_file is None else None, + transform=transform, + target_transform=target_transform, + is_valid_file=is_valid_file, + ) + self.imgs = self.samples diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/food101.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/food101.py new file mode 100644 index 0000000000000000000000000000000000000000..d2557a82736dc80713b6cb807924d8723f1b0723 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/food101.py @@ -0,0 +1,93 @@ +import json +from pathlib import Path +from typing import Any, Callable, Optional, Tuple + +import PIL.Image + +from .utils import download_and_extract_archive, verify_str_arg +from .vision import VisionDataset + + +class Food101(VisionDataset): + """`The Food-101 Data Set `_. + + The Food-101 is a challenging data set of 101 food categories with 101,000 images. + For each class, 250 manually reviewed test images are provided as well as 750 training images. + On purpose, the training images were not cleaned, and thus still contain some amount of noise. + This comes mostly in the form of intense colors and sometimes wrong labels. All images were + rescaled to have a maximum side length of 512 pixels. + + + Args: + root (string): Root directory of the dataset. + split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``. + transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop``. + target_transform (callable, optional): A function/transform that takes in the target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. Default is False. + """ + + _URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz" + _MD5 = "85eeb15f3717b99a5da872d97d918f87" + + def __init__( + self, + root: str, + split: str = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + self._split = verify_str_arg(split, "split", ("train", "test")) + self._base_folder = Path(self.root) / "food-101" + self._meta_folder = self._base_folder / "meta" + self._images_folder = self._base_folder / "images" + + if download: + self._download() + + if not self._check_exists(): + raise RuntimeError("Dataset not found. You can use download=True to download it") + + self._labels = [] + self._image_files = [] + with open(self._meta_folder / f"{split}.json") as f: + metadata = json.loads(f.read()) + + self.classes = sorted(metadata.keys()) + self.class_to_idx = dict(zip(self.classes, range(len(self.classes)))) + + for class_label, im_rel_paths in metadata.items(): + self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths) + self._image_files += [ + self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths + ] + + def __len__(self) -> int: + return len(self._image_files) + + def __getitem__(self, idx: int) -> Tuple[Any, Any]: + image_file, label = self._image_files[idx], self._labels[idx] + image = PIL.Image.open(image_file).convert("RGB") + + if self.transform: + image = self.transform(image) + + if self.target_transform: + label = self.target_transform(label) + + return image, label + + def extra_repr(self) -> str: + return f"split={self._split}" + + def _check_exists(self) -> bool: + return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder)) + + def _download(self) -> None: + if self._check_exists(): + return + download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/imagenet.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/imagenet.py new file mode 100644 index 0000000000000000000000000000000000000000..4b86bf2f2b9589afc120c3f8d8c07e74efee42f1 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/imagenet.py @@ -0,0 +1,212 @@ +import os +import shutil +import tempfile +from contextlib import contextmanager +from typing import Any, Dict, Iterator, List, Optional, Tuple + +import torch + +from .folder import ImageFolder +from .utils import check_integrity, extract_archive, verify_str_arg + +ARCHIVE_META = { + "train": ("ILSVRC2012_img_train.tar", "1d675b47d978889d74fa0da5fadfb00e"), + "val": ("ILSVRC2012_img_val.tar", "29b22e2961454d5413ddabcf34fc5622"), + "devkit": ("ILSVRC2012_devkit_t12.tar.gz", "fa75699e90414af021442c21a62c3abf"), +} + +META_FILE = "meta.bin" + + +class ImageNet(ImageFolder): + """`ImageNet `_ 2012 Classification Dataset. + + Args: + root (string): Root directory of the ImageNet Dataset. + split (string, optional): The dataset split, supports ``train``, or ``val``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + + Attributes: + classes (list): List of the class name tuples. + class_to_idx (dict): Dict with items (class_name, class_index). + wnids (list): List of the WordNet IDs. + wnid_to_idx (dict): Dict with items (wordnet_id, class_index). + imgs (list): List of (image path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__(self, root: str, split: str = "train", **kwargs: Any) -> None: + root = self.root = os.path.expanduser(root) + self.split = verify_str_arg(split, "split", ("train", "val")) + + self.parse_archives() + wnid_to_classes = load_meta_file(self.root)[0] + + super().__init__(self.split_folder, **kwargs) + self.root = root + + self.wnids = self.classes + self.wnid_to_idx = self.class_to_idx + self.classes = [wnid_to_classes[wnid] for wnid in self.wnids] + self.class_to_idx = {cls: idx for idx, clss in enumerate(self.classes) for cls in clss} + + def parse_archives(self) -> None: + if not check_integrity(os.path.join(self.root, META_FILE)): + parse_devkit_archive(self.root) + + if not os.path.isdir(self.split_folder): + if self.split == "train": + parse_train_archive(self.root) + elif self.split == "val": + parse_val_archive(self.root) + + @property + def split_folder(self) -> str: + return os.path.join(self.root, self.split) + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__) + + +def load_meta_file(root: str, file: Optional[str] = None) -> Tuple[Dict[str, str], List[str]]: + if file is None: + file = META_FILE + file = os.path.join(root, file) + + if check_integrity(file): + return torch.load(file) + else: + msg = ( + "The meta file {} is not present in the root directory or is corrupted. " + "This file is automatically created by the ImageNet dataset." + ) + raise RuntimeError(msg.format(file, root)) + + +def _verify_archive(root: str, file: str, md5: str) -> None: + if not check_integrity(os.path.join(root, file), md5): + msg = ( + "The archive {} is not present in the root directory or is corrupted. " + "You need to download it externally and place it in {}." + ) + raise RuntimeError(msg.format(file, root)) + + +def parse_devkit_archive(root: str, file: Optional[str] = None) -> None: + """Parse the devkit archive of the ImageNet2012 classification dataset and save + the meta information in a binary file. + + Args: + root (str): Root directory containing the devkit archive + file (str, optional): Name of devkit archive. Defaults to + 'ILSVRC2012_devkit_t12.tar.gz' + """ + import scipy.io as sio + + def parse_meta_mat(devkit_root: str) -> Tuple[Dict[int, str], Dict[str, Tuple[str, ...]]]: + metafile = os.path.join(devkit_root, "data", "meta.mat") + meta = sio.loadmat(metafile, squeeze_me=True)["synsets"] + nums_children = list(zip(*meta))[4] + meta = [meta[idx] for idx, num_children in enumerate(nums_children) if num_children == 0] + idcs, wnids, classes = list(zip(*meta))[:3] + classes = [tuple(clss.split(", ")) for clss in classes] + idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)} + wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)} + return idx_to_wnid, wnid_to_classes + + def parse_val_groundtruth_txt(devkit_root: str) -> List[int]: + file = os.path.join(devkit_root, "data", "ILSVRC2012_validation_ground_truth.txt") + with open(file) as txtfh: + val_idcs = txtfh.readlines() + return [int(val_idx) for val_idx in val_idcs] + + @contextmanager + def get_tmp_dir() -> Iterator[str]: + tmp_dir = tempfile.mkdtemp() + try: + yield tmp_dir + finally: + shutil.rmtree(tmp_dir) + + archive_meta = ARCHIVE_META["devkit"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + + _verify_archive(root, file, md5) + + with get_tmp_dir() as tmp_dir: + extract_archive(os.path.join(root, file), tmp_dir) + + devkit_root = os.path.join(tmp_dir, "ILSVRC2012_devkit_t12") + idx_to_wnid, wnid_to_classes = parse_meta_mat(devkit_root) + val_idcs = parse_val_groundtruth_txt(devkit_root) + val_wnids = [idx_to_wnid[idx] for idx in val_idcs] + + torch.save((wnid_to_classes, val_wnids), os.path.join(root, META_FILE)) + + +def parse_train_archive(root: str, file: Optional[str] = None, folder: str = "train") -> None: + """Parse the train images archive of the ImageNet2012 classification dataset and + prepare it for usage with the ImageNet dataset. + + Args: + root (str): Root directory containing the train images archive + file (str, optional): Name of train images archive. Defaults to + 'ILSVRC2012_img_train.tar' + folder (str, optional): Optional name for train images folder. Defaults to + 'train' + """ + archive_meta = ARCHIVE_META["train"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + + _verify_archive(root, file, md5) + + train_root = os.path.join(root, folder) + extract_archive(os.path.join(root, file), train_root) + + archives = [os.path.join(train_root, archive) for archive in os.listdir(train_root)] + for archive in archives: + extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True) + + +def parse_val_archive( + root: str, file: Optional[str] = None, wnids: Optional[List[str]] = None, folder: str = "val" +) -> None: + """Parse the validation images archive of the ImageNet2012 classification dataset + and prepare it for usage with the ImageNet dataset. + + Args: + root (str): Root directory containing the validation images archive + file (str, optional): Name of validation images archive. Defaults to + 'ILSVRC2012_img_val.tar' + wnids (list, optional): List of WordNet IDs of the validation images. If None + is given, the IDs are loaded from the meta file in the root directory + folder (str, optional): Optional name for validation images folder. Defaults to + 'val' + """ + archive_meta = ARCHIVE_META["val"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + if wnids is None: + wnids = load_meta_file(root)[1] + + _verify_archive(root, file, md5) + + val_root = os.path.join(root, folder) + extract_archive(os.path.join(root, file), val_root) + + images = sorted(os.path.join(val_root, image) for image in os.listdir(val_root)) + + for wnid in set(wnids): + os.mkdir(os.path.join(val_root, wnid)) + + for wnid, img_file in zip(wnids, images): + shutil.move(img_file, os.path.join(val_root, wnid, os.path.basename(img_file))) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/inaturalist.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/inaturalist.py new file mode 100644 index 0000000000000000000000000000000000000000..50b32ef0f4affc9adaf9935144df84b6e7dc7ae6 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/inaturalist.py @@ -0,0 +1,241 @@ +import os +import os.path +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from PIL import Image + +from .utils import download_and_extract_archive, verify_str_arg +from .vision import VisionDataset + +CATEGORIES_2021 = ["kingdom", "phylum", "class", "order", "family", "genus"] + +DATASET_URLS = { + "2017": "https://ml-inat-competition-datasets.s3.amazonaws.com/2017/train_val_images.tar.gz", + "2018": "https://ml-inat-competition-datasets.s3.amazonaws.com/2018/train_val2018.tar.gz", + "2019": "https://ml-inat-competition-datasets.s3.amazonaws.com/2019/train_val2019.tar.gz", + "2021_train": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train.tar.gz", + "2021_train_mini": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train_mini.tar.gz", + "2021_valid": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/val.tar.gz", +} + +DATASET_MD5 = { + "2017": "7c784ea5e424efaec655bd392f87301f", + "2018": "b1c6952ce38f31868cc50ea72d066cc3", + "2019": "c60a6e2962c9b8ccbd458d12c8582644", + "2021_train": "e0526d53c7f7b2e3167b2b43bb2690ed", + "2021_train_mini": "db6ed8330e634445efc8fec83ae81442", + "2021_valid": "f6f6e0e242e3d4c9569ba56400938afc", +} + + +class INaturalist(VisionDataset): + """`iNaturalist `_ Dataset. + + Args: + root (string): Root directory of dataset where the image files are stored. + This class does not require/use annotation files. + version (string, optional): Which version of the dataset to download/use. One of + '2017', '2018', '2019', '2021_train', '2021_train_mini', '2021_valid'. + Default: `2021_train`. + target_type (string or list, optional): Type of target to use, for 2021 versions, one of: + + - ``full``: the full category (species) + - ``kingdom``: e.g. "Animalia" + - ``phylum``: e.g. "Arthropoda" + - ``class``: e.g. "Insecta" + - ``order``: e.g. "Coleoptera" + - ``family``: e.g. "Cleridae" + - ``genus``: e.g. "Trichodes" + + for 2017-2019 versions, one of: + + - ``full``: the full (numeric) category + - ``super``: the super category, e.g. "Amphibians" + + Can also be a list to output a tuple with all specified target types. + Defaults to ``full``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + def __init__( + self, + root: str, + version: str = "2021_train", + target_type: Union[List[str], str] = "full", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + self.version = verify_str_arg(version, "version", DATASET_URLS.keys()) + + super().__init__(os.path.join(root, version), transform=transform, target_transform=target_transform) + + os.makedirs(root, exist_ok=True) + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") + + self.all_categories: List[str] = [] + + # map: category type -> name of category -> index + self.categories_index: Dict[str, Dict[str, int]] = {} + + # list indexed by category id, containing mapping from category type -> index + self.categories_map: List[Dict[str, int]] = [] + + if not isinstance(target_type, list): + target_type = [target_type] + if self.version[:4] == "2021": + self.target_type = [verify_str_arg(t, "target_type", ("full", *CATEGORIES_2021)) for t in target_type] + self._init_2021() + else: + self.target_type = [verify_str_arg(t, "target_type", ("full", "super")) for t in target_type] + self._init_pre2021() + + # index of all files: (full category id, filename) + self.index: List[Tuple[int, str]] = [] + + for dir_index, dir_name in enumerate(self.all_categories): + files = os.listdir(os.path.join(self.root, dir_name)) + for fname in files: + self.index.append((dir_index, fname)) + + def _init_2021(self) -> None: + """Initialize based on 2021 layout""" + + self.all_categories = sorted(os.listdir(self.root)) + + # map: category type -> name of category -> index + self.categories_index = {k: {} for k in CATEGORIES_2021} + + for dir_index, dir_name in enumerate(self.all_categories): + pieces = dir_name.split("_") + if len(pieces) != 8: + raise RuntimeError(f"Unexpected category name {dir_name}, wrong number of pieces") + if pieces[0] != f"{dir_index:05d}": + raise RuntimeError(f"Unexpected category id {pieces[0]}, expecting {dir_index:05d}") + cat_map = {} + for cat, name in zip(CATEGORIES_2021, pieces[1:7]): + if name in self.categories_index[cat]: + cat_id = self.categories_index[cat][name] + else: + cat_id = len(self.categories_index[cat]) + self.categories_index[cat][name] = cat_id + cat_map[cat] = cat_id + self.categories_map.append(cat_map) + + def _init_pre2021(self) -> None: + """Initialize based on 2017-2019 layout""" + + # map: category type -> name of category -> index + self.categories_index = {"super": {}} + + cat_index = 0 + super_categories = sorted(os.listdir(self.root)) + for sindex, scat in enumerate(super_categories): + self.categories_index["super"][scat] = sindex + subcategories = sorted(os.listdir(os.path.join(self.root, scat))) + for subcat in subcategories: + if self.version == "2017": + # this version does not use ids as directory names + subcat_i = cat_index + cat_index += 1 + else: + try: + subcat_i = int(subcat) + except ValueError: + raise RuntimeError(f"Unexpected non-numeric dir name: {subcat}") + if subcat_i >= len(self.categories_map): + old_len = len(self.categories_map) + self.categories_map.extend([{}] * (subcat_i - old_len + 1)) + self.all_categories.extend([""] * (subcat_i - old_len + 1)) + if self.categories_map[subcat_i]: + raise RuntimeError(f"Duplicate category {subcat}") + self.categories_map[subcat_i] = {"super": sindex} + self.all_categories[subcat_i] = os.path.join(scat, subcat) + + # validate the dictionary + for cindex, c in enumerate(self.categories_map): + if not c: + raise RuntimeError(f"Missing category {cindex}") + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where the type of target specified by target_type. + """ + + cat_id, fname = self.index[index] + img = Image.open(os.path.join(self.root, self.all_categories[cat_id], fname)) + + target: Any = [] + for t in self.target_type: + if t == "full": + target.append(cat_id) + else: + target.append(self.categories_map[cat_id][t]) + target = tuple(target) if len(target) > 1 else target[0] + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.index) + + def category_name(self, category_type: str, category_id: int) -> str: + """ + Args: + category_type(str): one of "full", "kingdom", "phylum", "class", "order", "family", "genus" or "super" + category_id(int): an index (class id) from this category + + Returns: + the name of the category + """ + if category_type == "full": + return self.all_categories[category_id] + else: + if category_type not in self.categories_index: + raise ValueError(f"Invalid category type '{category_type}'") + else: + for name, id in self.categories_index[category_type].items(): + if id == category_id: + return name + raise ValueError(f"Invalid category id {category_id} for {category_type}") + + def _check_integrity(self) -> bool: + return os.path.exists(self.root) and len(os.listdir(self.root)) > 0 + + def download(self) -> None: + if self._check_integrity(): + raise RuntimeError( + f"The directory {self.root} already exists. " + f"If you want to re-download or re-extract the images, delete the directory." + ) + + base_root = os.path.dirname(self.root) + + download_and_extract_archive( + DATASET_URLS[self.version], base_root, filename=f"{self.version}.tgz", md5=DATASET_MD5[self.version] + ) + + orig_dir_name = os.path.join(base_root, os.path.basename(DATASET_URLS[self.version]).rstrip(".tar.gz")) + if not os.path.exists(orig_dir_name): + raise RuntimeError(f"Unable to find downloaded files at {orig_dir_name}") + os.rename(orig_dir_name, self.root) + print(f"Dataset version '{self.version}' has been downloaded and prepared for use") diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/lfw.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/lfw.py new file mode 100644 index 0000000000000000000000000000000000000000..7a5aa45aa4db56d9f61fba663a45fcef47856d8d --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/lfw.py @@ -0,0 +1,255 @@ +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from PIL import Image + +from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg +from .vision import VisionDataset + + +class _LFW(VisionDataset): + + base_folder = "lfw-py" + download_url_prefix = "http://vis-www.cs.umass.edu/lfw/" + + file_dict = { + "original": ("lfw", "lfw.tgz", "a17d05bd522c52d84eca14327a23d494"), + "funneled": ("lfw_funneled", "lfw-funneled.tgz", "1b42dfed7d15c9b2dd63d5e5840c86ad"), + "deepfunneled": ("lfw-deepfunneled", "lfw-deepfunneled.tgz", "68331da3eb755a505a502b5aacb3c201"), + } + checksums = { + "pairs.txt": "9f1ba174e4e1c508ff7cdf10ac338a7d", + "pairsDevTest.txt": "5132f7440eb68cf58910c8a45a2ac10b", + "pairsDevTrain.txt": "4f27cbf15b2da4a85c1907eb4181ad21", + "people.txt": "450f0863dd89e85e73936a6d71a3474b", + "peopleDevTest.txt": "e4bf5be0a43b5dcd9dc5ccfcb8fb19c5", + "peopleDevTrain.txt": "54eaac34beb6d042ed3a7d883e247a21", + "lfw-names.txt": "a6d0a479bd074669f656265a6e693f6d", + } + annot_file = {"10fold": "", "train": "DevTrain", "test": "DevTest"} + names = "lfw-names.txt" + + def __init__( + self, + root: str, + split: str, + image_set: str, + view: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(os.path.join(root, self.base_folder), transform=transform, target_transform=target_transform) + + self.image_set = verify_str_arg(image_set.lower(), "image_set", self.file_dict.keys()) + images_dir, self.filename, self.md5 = self.file_dict[self.image_set] + + self.view = verify_str_arg(view.lower(), "view", ["people", "pairs"]) + self.split = verify_str_arg(split.lower(), "split", ["10fold", "train", "test"]) + self.labels_file = f"{self.view}{self.annot_file[self.split]}.txt" + self.data: List[Any] = [] + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") + + self.images_dir = os.path.join(self.root, images_dir) + + def _loader(self, path: str) -> Image.Image: + with open(path, "rb") as f: + img = Image.open(f) + return img.convert("RGB") + + def _check_integrity(self) -> bool: + st1 = check_integrity(os.path.join(self.root, self.filename), self.md5) + st2 = check_integrity(os.path.join(self.root, self.labels_file), self.checksums[self.labels_file]) + if not st1 or not st2: + return False + if self.view == "people": + return check_integrity(os.path.join(self.root, self.names), self.checksums[self.names]) + return True + + def download(self) -> None: + if self._check_integrity(): + print("Files already downloaded and verified") + return + url = f"{self.download_url_prefix}{self.filename}" + download_and_extract_archive(url, self.root, filename=self.filename, md5=self.md5) + download_url(f"{self.download_url_prefix}{self.labels_file}", self.root) + if self.view == "people": + download_url(f"{self.download_url_prefix}{self.names}", self.root) + + def _get_path(self, identity: str, no: Union[int, str]) -> str: + return os.path.join(self.images_dir, identity, f"{identity}_{int(no):04d}.jpg") + + def extra_repr(self) -> str: + return f"Alignment: {self.image_set}\nSplit: {self.split}" + + def __len__(self) -> int: + return len(self.data) + + +class LFWPeople(_LFW): + """`LFW `_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``lfw-py`` exists or will be saved to if download is set to True. + split (string, optional): The image split to use. Can be one of ``train``, ``test``, + ``10fold`` (default). + image_set (str, optional): Type of image funneling to use, ``original``, ``funneled`` or + ``deepfunneled``. Defaults to ``funneled``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomRotation`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + def __init__( + self, + root: str, + split: str = "10fold", + image_set: str = "funneled", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(root, split, image_set, "people", transform, target_transform, download) + + self.class_to_idx = self._get_classes() + self.data, self.targets = self._get_people() + + def _get_people(self) -> Tuple[List[str], List[int]]: + data, targets = [], [] + with open(os.path.join(self.root, self.labels_file)) as f: + lines = f.readlines() + n_folds, s = (int(lines[0]), 1) if self.split == "10fold" else (1, 0) + + for fold in range(n_folds): + n_lines = int(lines[s]) + people = [line.strip().split("\t") for line in lines[s + 1 : s + n_lines + 1]] + s += n_lines + 1 + for i, (identity, num_imgs) in enumerate(people): + for num in range(1, int(num_imgs) + 1): + img = self._get_path(identity, num) + data.append(img) + targets.append(self.class_to_idx[identity]) + + return data, targets + + def _get_classes(self) -> Dict[str, int]: + with open(os.path.join(self.root, self.names)) as f: + lines = f.readlines() + names = [line.strip().split()[0] for line in lines] + class_to_idx = {name: i for i, name in enumerate(names)} + return class_to_idx + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target) where target is the identity of the person. + """ + img = self._loader(self.data[index]) + target = self.targets[index] + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def extra_repr(self) -> str: + return super().extra_repr() + f"\nClasses (identities): {len(self.class_to_idx)}" + + +class LFWPairs(_LFW): + """`LFW `_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``lfw-py`` exists or will be saved to if download is set to True. + split (string, optional): The image split to use. Can be one of ``train``, ``test``, + ``10fold``. Defaults to ``10fold``. + image_set (str, optional): Type of image funneling to use, ``original``, ``funneled`` or + ``deepfunneled``. Defaults to ``funneled``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomRotation`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + def __init__( + self, + root: str, + split: str = "10fold", + image_set: str = "funneled", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(root, split, image_set, "pairs", transform, target_transform, download) + + self.pair_names, self.data, self.targets = self._get_pairs(self.images_dir) + + def _get_pairs(self, images_dir: str) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]], List[int]]: + pair_names, data, targets = [], [], [] + with open(os.path.join(self.root, self.labels_file)) as f: + lines = f.readlines() + if self.split == "10fold": + n_folds, n_pairs = lines[0].split("\t") + n_folds, n_pairs = int(n_folds), int(n_pairs) + else: + n_folds, n_pairs = 1, int(lines[0]) + s = 1 + + for fold in range(n_folds): + matched_pairs = [line.strip().split("\t") for line in lines[s : s + n_pairs]] + unmatched_pairs = [line.strip().split("\t") for line in lines[s + n_pairs : s + (2 * n_pairs)]] + s += 2 * n_pairs + for pair in matched_pairs: + img1, img2, same = self._get_path(pair[0], pair[1]), self._get_path(pair[0], pair[2]), 1 + pair_names.append((pair[0], pair[0])) + data.append((img1, img2)) + targets.append(same) + for pair in unmatched_pairs: + img1, img2, same = self._get_path(pair[0], pair[1]), self._get_path(pair[2], pair[3]), 0 + pair_names.append((pair[0], pair[2])) + data.append((img1, img2)) + targets.append(same) + + return pair_names, data, targets + + def __getitem__(self, index: int) -> Tuple[Any, Any, int]: + """ + Args: + index (int): Index + + Returns: + tuple: (image1, image2, target) where target is `0` for different indentities and `1` for same identities. + """ + img1, img2 = self.data[index] + img1, img2 = self._loader(img1), self._loader(img2) + target = self.targets[index] + + if self.transform is not None: + img1, img2 = self.transform(img1), self.transform(img2) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img1, img2, target diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/oxford_iiit_pet.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/oxford_iiit_pet.py new file mode 100644 index 0000000000000000000000000000000000000000..667ee13717d9d2d9810750a0e1463f8d27a053dc --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/oxford_iiit_pet.py @@ -0,0 +1,125 @@ +import os +import os.path +import pathlib +from typing import Any, Callable, Optional, Sequence, Tuple, Union + +from PIL import Image + +from .utils import download_and_extract_archive, verify_str_arg +from .vision import VisionDataset + + +class OxfordIIITPet(VisionDataset): + """`Oxford-IIIT Pet Dataset `_. + + Args: + root (string): Root directory of the dataset. + split (string, optional): The dataset split, supports ``"trainval"`` (default) or ``"test"``. + target_types (string, sequence of strings, optional): Types of target to use. Can be ``category`` (default) or + ``segmentation``. Can also be a list to output a tuple with all specified target types. The types represent: + + - ``category`` (int): Label for one of the 37 pet categories. + - ``segmentation`` (PIL image): Segmentation trimap of the image. + + If empty, ``None`` will be returned as target. + + transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop``. + target_transform (callable, optional): A function/transform that takes in the target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and puts it into + ``root/oxford-iiit-pet``. If dataset is already downloaded, it is not downloaded again. + """ + + _RESOURCES = ( + ("https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz", "5c4f3ee8e5d25df40f4fd59a7f44e54c"), + ("https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz", "95a8c909bbe2e81eed6a22bccdf3f68f"), + ) + _VALID_TARGET_TYPES = ("category", "segmentation") + + def __init__( + self, + root: str, + split: str = "trainval", + target_types: Union[Sequence[str], str] = "category", + transforms: Optional[Callable] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ): + self._split = verify_str_arg(split, "split", ("trainval", "test")) + if isinstance(target_types, str): + target_types = [target_types] + self._target_types = [ + verify_str_arg(target_type, "target_types", self._VALID_TARGET_TYPES) for target_type in target_types + ] + + super().__init__(root, transforms=transforms, transform=transform, target_transform=target_transform) + self._base_folder = pathlib.Path(self.root) / "oxford-iiit-pet" + self._images_folder = self._base_folder / "images" + self._anns_folder = self._base_folder / "annotations" + self._segs_folder = self._anns_folder / "trimaps" + + if download: + self._download() + + if not self._check_exists(): + raise RuntimeError("Dataset not found. You can use download=True to download it") + + image_ids = [] + self._labels = [] + with open(self._anns_folder / f"{self._split}.txt") as file: + for line in file: + image_id, label, *_ = line.strip().split() + image_ids.append(image_id) + self._labels.append(int(label) - 1) + + self.classes = [ + " ".join(part.title() for part in raw_cls.split("_")) + for raw_cls, _ in sorted( + {(image_id.rsplit("_", 1)[0], label) for image_id, label in zip(image_ids, self._labels)}, + key=lambda image_id_and_label: image_id_and_label[1], + ) + ] + self.class_to_idx = dict(zip(self.classes, range(len(self.classes)))) + + self._images = [self._images_folder / f"{image_id}.jpg" for image_id in image_ids] + self._segs = [self._segs_folder / f"{image_id}.png" for image_id in image_ids] + + def __len__(self) -> int: + return len(self._images) + + def __getitem__(self, idx: int) -> Tuple[Any, Any]: + image = Image.open(self._images[idx]).convert("RGB") + + target: Any = [] + for target_type in self._target_types: + if target_type == "category": + target.append(self._labels[idx]) + else: # target_type == "segmentation" + target.append(Image.open(self._segs[idx])) + + if not target: + target = None + elif len(target) == 1: + target = target[0] + else: + target = tuple(target) + + if self.transforms: + image, target = self.transforms(image, target) + + return image, target + + def _check_exists(self) -> bool: + for folder in (self._images_folder, self._anns_folder): + if not (os.path.exists(folder) and os.path.isdir(folder)): + return False + else: + return True + + def _download(self) -> None: + if self._check_exists(): + return + + for url, md5 in self._RESOURCES: + download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/phototour.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/phototour.py new file mode 100644 index 0000000000000000000000000000000000000000..edf1d2ee256ebf43dd637d0399e79cc8e4bd0cef --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/phototour.py @@ -0,0 +1,228 @@ +import os +from typing import Any, Callable, List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL import Image + +from .utils import download_url +from .vision import VisionDataset + + +class PhotoTour(VisionDataset): + """`Multi-view Stereo Correspondence `_ Dataset. + + .. note:: + + We only provide the newer version of the dataset, since the authors state that it + + is more suitable for training descriptors based on difference of Gaussian, or Harris corners, as the + patches are centred on real interest point detections, rather than being projections of 3D points as is the + case in the old dataset. + + The original dataset is available under http://phototour.cs.washington.edu/patches/default.htm. + + + Args: + root (string): Root directory where images are. + name (string): Name of the dataset to load. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + urls = { + "notredame_harris": [ + "http://matthewalunbrown.com/patchdata/notredame_harris.zip", + "notredame_harris.zip", + "69f8c90f78e171349abdf0307afefe4d", + ], + "yosemite_harris": [ + "http://matthewalunbrown.com/patchdata/yosemite_harris.zip", + "yosemite_harris.zip", + "a73253d1c6fbd3ba2613c45065c00d46", + ], + "liberty_harris": [ + "http://matthewalunbrown.com/patchdata/liberty_harris.zip", + "liberty_harris.zip", + "c731fcfb3abb4091110d0ae8c7ba182c", + ], + "notredame": [ + "http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip", + "notredame.zip", + "509eda8535847b8c0a90bbb210c83484", + ], + "yosemite": ["http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip", "yosemite.zip", "533b2e8eb7ede31be40abc317b2fd4f0"], + "liberty": ["http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip", "liberty.zip", "fdd9152f138ea5ef2091746689176414"], + } + means = { + "notredame": 0.4854, + "yosemite": 0.4844, + "liberty": 0.4437, + "notredame_harris": 0.4854, + "yosemite_harris": 0.4844, + "liberty_harris": 0.4437, + } + stds = { + "notredame": 0.1864, + "yosemite": 0.1818, + "liberty": 0.2019, + "notredame_harris": 0.1864, + "yosemite_harris": 0.1818, + "liberty_harris": 0.2019, + } + lens = { + "notredame": 468159, + "yosemite": 633587, + "liberty": 450092, + "liberty_harris": 379587, + "yosemite_harris": 450912, + "notredame_harris": 325295, + } + image_ext = "bmp" + info_file = "info.txt" + matches_files = "m50_100000_100000_0.txt" + + def __init__( + self, root: str, name: str, train: bool = True, transform: Optional[Callable] = None, download: bool = False + ) -> None: + super().__init__(root, transform=transform) + self.name = name + self.data_dir = os.path.join(self.root, name) + self.data_down = os.path.join(self.root, f"{name}.zip") + self.data_file = os.path.join(self.root, f"{name}.pt") + + self.train = train + self.mean = self.means[name] + self.std = self.stds[name] + + if download: + self.download() + + if not self._check_datafile_exists(): + self.cache() + + # load the serialized data + self.data, self.labels, self.matches = torch.load(self.data_file) + + def __getitem__(self, index: int) -> Union[torch.Tensor, Tuple[Any, Any, torch.Tensor]]: + """ + Args: + index (int): Index + + Returns: + tuple: (data1, data2, matches) + """ + if self.train: + data = self.data[index] + if self.transform is not None: + data = self.transform(data) + return data + m = self.matches[index] + data1, data2 = self.data[m[0]], self.data[m[1]] + if self.transform is not None: + data1 = self.transform(data1) + data2 = self.transform(data2) + return data1, data2, m[2] + + def __len__(self) -> int: + return len(self.data if self.train else self.matches) + + def _check_datafile_exists(self) -> bool: + return os.path.exists(self.data_file) + + def _check_downloaded(self) -> bool: + return os.path.exists(self.data_dir) + + def download(self) -> None: + if self._check_datafile_exists(): + print(f"# Found cached data {self.data_file}") + return + + if not self._check_downloaded(): + # download files + url = self.urls[self.name][0] + filename = self.urls[self.name][1] + md5 = self.urls[self.name][2] + fpath = os.path.join(self.root, filename) + + download_url(url, self.root, filename, md5) + + print(f"# Extracting data {self.data_down}\n") + + import zipfile + + with zipfile.ZipFile(fpath, "r") as z: + z.extractall(self.data_dir) + + os.unlink(fpath) + + def cache(self) -> None: + # process and save as torch files + print(f"# Caching data {self.data_file}") + + dataset = ( + read_image_file(self.data_dir, self.image_ext, self.lens[self.name]), + read_info_file(self.data_dir, self.info_file), + read_matches_files(self.data_dir, self.matches_files), + ) + + with open(self.data_file, "wb") as f: + torch.save(dataset, f) + + def extra_repr(self) -> str: + split = "Train" if self.train is True else "Test" + return f"Split: {split}" + + +def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor: + """Return a Tensor containing the patches""" + + def PIL2array(_img: Image.Image) -> np.ndarray: + """Convert PIL image type to numpy 2D array""" + return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64) + + def find_files(_data_dir: str, _image_ext: str) -> List[str]: + """Return a list with the file names of the images containing the patches""" + files = [] + # find those files with the specified extension + for file_dir in os.listdir(_data_dir): + if file_dir.endswith(_image_ext): + files.append(os.path.join(_data_dir, file_dir)) + return sorted(files) # sort files in ascend order to keep relations + + patches = [] + list_files = find_files(data_dir, image_ext) + + for fpath in list_files: + img = Image.open(fpath) + for y in range(0, img.height, 64): + for x in range(0, img.width, 64): + patch = img.crop((x, y, x + 64, y + 64)) + patches.append(PIL2array(patch)) + return torch.ByteTensor(np.array(patches[:n])) + + +def read_info_file(data_dir: str, info_file: str) -> torch.Tensor: + """Return a Tensor containing the list of labels + Read the file and keep only the ID of the 3D point. + """ + with open(os.path.join(data_dir, info_file)) as f: + labels = [int(line.split()[0]) for line in f] + return torch.LongTensor(labels) + + +def read_matches_files(data_dir: str, matches_file: str) -> torch.Tensor: + """Return a Tensor containing the ground truth matches + Read the file and keep only 3D point ID. + Matches are represented with a 1, non matches with a 0. + """ + matches = [] + with open(os.path.join(data_dir, matches_file)) as f: + for line in f: + line_split = line.split() + matches.append([int(line_split[0]), int(line_split[3]), int(line_split[1] == line_split[4])]) + return torch.LongTensor(matches) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/samplers/__pycache__/clip_sampler.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/datasets/samplers/__pycache__/clip_sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed8bd6f10e2666ba74ec8c8dafd06abb1a93a63e Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/datasets/samplers/__pycache__/clip_sampler.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/stl10.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/stl10.py new file mode 100644 index 0000000000000000000000000000000000000000..f47d0c32a2cd88cb83393eeb833e44838725efb6 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/stl10.py @@ -0,0 +1,174 @@ +import os.path +from typing import Any, Callable, cast, Optional, Tuple + +import numpy as np +from PIL import Image + +from .utils import check_integrity, download_and_extract_archive, verify_str_arg +from .vision import VisionDataset + + +class STL10(VisionDataset): + """`STL10 `_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``stl10_binary`` exists. + split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}. + Accordingly, dataset is selected. + folds (int, optional): One of {0-9} or None. + For training, loads one of the 10 pre-defined folds of 1k samples for the + standard evaluation procedure. If no value is passed, loads the 5k samples. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + base_folder = "stl10_binary" + url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz" + filename = "stl10_binary.tar.gz" + tgz_md5 = "91f7769df0f17e558f3565bffb0c7dfb" + class_names_file = "class_names.txt" + folds_list_file = "fold_indices.txt" + train_list = [ + ["train_X.bin", "918c2871b30a85fa023e0c44e0bee87f"], + ["train_y.bin", "5a34089d4802c674881badbb80307741"], + ["unlabeled_X.bin", "5242ba1fed5e4be9e1e742405eb56ca4"], + ] + + test_list = [["test_X.bin", "7f263ba9f9e0b06b93213547f721ac82"], ["test_y.bin", "36f9794fa4beb8a2c72628de14fa638e"]] + splits = ("train", "train+unlabeled", "unlabeled", "test") + + def __init__( + self, + root: str, + split: str = "train", + folds: Optional[int] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + self.split = verify_str_arg(split, "split", self.splits) + self.folds = self._verify_folds(folds) + + if download: + self.download() + elif not self._check_integrity(): + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") + + # now load the picked numpy arrays + self.labels: Optional[np.ndarray] + if self.split == "train": + self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0]) + self.labels = cast(np.ndarray, self.labels) + self.__load_folds(folds) + + elif self.split == "train+unlabeled": + self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0]) + self.labels = cast(np.ndarray, self.labels) + self.__load_folds(folds) + unlabeled_data, _ = self.__loadfile(self.train_list[2][0]) + self.data = np.concatenate((self.data, unlabeled_data)) + self.labels = np.concatenate((self.labels, np.asarray([-1] * unlabeled_data.shape[0]))) + + elif self.split == "unlabeled": + self.data, _ = self.__loadfile(self.train_list[2][0]) + self.labels = np.asarray([-1] * self.data.shape[0]) + else: # self.split == 'test': + self.data, self.labels = self.__loadfile(self.test_list[0][0], self.test_list[1][0]) + + class_file = os.path.join(self.root, self.base_folder, self.class_names_file) + if os.path.isfile(class_file): + with open(class_file) as f: + self.classes = f.read().splitlines() + + def _verify_folds(self, folds: Optional[int]) -> Optional[int]: + if folds is None: + return folds + elif isinstance(folds, int): + if folds in range(10): + return folds + msg = "Value for argument folds should be in the range [0, 10), but got {}." + raise ValueError(msg.format(folds)) + else: + msg = "Expected type None or int for argument folds, but got type {}." + raise ValueError(msg.format(type(folds))) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + target: Optional[int] + if self.labels is not None: + img, target = self.data[index], int(self.labels[index]) + else: + img, target = self.data[index], None + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(np.transpose(img, (1, 2, 0))) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return self.data.shape[0] + + def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + labels = None + if labels_file: + path_to_labels = os.path.join(self.root, self.base_folder, labels_file) + with open(path_to_labels, "rb") as f: + labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based + + path_to_data = os.path.join(self.root, self.base_folder, data_file) + with open(path_to_data, "rb") as f: + # read whole file in uint8 chunks + everything = np.fromfile(f, dtype=np.uint8) + images = np.reshape(everything, (-1, 3, 96, 96)) + images = np.transpose(images, (0, 1, 3, 2)) + + return images, labels + + def _check_integrity(self) -> bool: + for filename, md5 in self.train_list + self.test_list: + fpath = os.path.join(self.root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print("Files already downloaded and verified") + return + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) + self._check_integrity() + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__) + + def __load_folds(self, folds: Optional[int]) -> None: + # loads one of the folds if specified + if folds is None: + return + path_to_folds = os.path.join(self.root, self.base_folder, self.folds_list_file) + with open(path_to_folds) as f: + str_idx = f.read().splitlines()[folds] + list_idx = np.fromstring(str_idx, dtype=np.int64, sep=" ") + self.data = self.data[list_idx, :, :, :] + if self.labels is not None: + self.labels = self.labels[list_idx] diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/sun397.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/sun397.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1ffef9b98c33ed6927aa164e70672b467c6609 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/sun397.py @@ -0,0 +1,76 @@ +from pathlib import Path +from typing import Any, Callable, Optional, Tuple + +import PIL.Image + +from .utils import download_and_extract_archive +from .vision import VisionDataset + + +class SUN397(VisionDataset): + """`The SUN397 Data Set `_. + + The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of + 397 categories with 108'754 images. + + Args: + root (string): Root directory of the dataset. + transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop``. + target_transform (callable, optional): A function/transform that takes in the target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + _DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz" + _DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a" + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + self._data_dir = Path(self.root) / "SUN397" + + if download: + self._download() + + if not self._check_exists(): + raise RuntimeError("Dataset not found. You can use download=True to download it") + + with open(self._data_dir / "ClassName.txt") as f: + self.classes = [c[3:].strip() for c in f] + + self.class_to_idx = dict(zip(self.classes, range(len(self.classes)))) + self._image_files = list(self._data_dir.rglob("sun_*.jpg")) + + self._labels = [ + self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files + ] + + def __len__(self) -> int: + return len(self._image_files) + + def __getitem__(self, idx: int) -> Tuple[Any, Any]: + image_file, label = self._image_files[idx], self._labels[idx] + image = PIL.Image.open(image_file).convert("RGB") + + if self.transform: + image = self.transform(image) + + if self.target_transform: + label = self.target_transform(label) + + return image, label + + def _check_exists(self) -> bool: + return self._data_dir.is_dir() + + def _download(self) -> None: + if self._check_exists(): + return + download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/ucf101.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/ucf101.py new file mode 100644 index 0000000000000000000000000000000000000000..60e83e158a327706777fdd75c6fb29d3b6cd1e18 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/ucf101.py @@ -0,0 +1,130 @@ +import os +from typing import Any, Callable, Dict, List, Optional, Tuple + +from torch import Tensor + +from .folder import find_classes, make_dataset +from .video_utils import VideoClips +from .vision import VisionDataset + + +class UCF101(VisionDataset): + """ + `UCF101 `_ dataset. + + UCF101 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. The dataset itself can be downloaded from the dataset website; + annotations that ``annotation_path`` should be pointing to can be downloaded from `here + `_. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the UCF101 Dataset. + annotation_path (str): path to the folder containing the split files; + see docstring above for download instructions of these files + frames_per_clip (int): number of frames in a clip. + step_between_clips (int, optional): number of frames between each clip. + fold (int, optional): which fold to use. Should be between 1 and 3. + train (bool, optional): if ``True``, creates a dataset from the train split, + otherwise from the ``test`` split. + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + output_format (str, optional): The format of the output video tensors (before transforms). + Can be either "THWC" (default) or "TCHW". + + Returns: + tuple: A 3-tuple with the following entries: + + - video (Tensor[T, H, W, C] or Tensor[T, C, H, W]): The `T` video frames + - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + - label (int): class of the video clip + """ + + def __init__( + self, + root: str, + annotation_path: str, + frames_per_clip: int, + step_between_clips: int = 1, + frame_rate: Optional[int] = None, + fold: int = 1, + train: bool = True, + transform: Optional[Callable] = None, + _precomputed_metadata: Optional[Dict[str, Any]] = None, + num_workers: int = 1, + _video_width: int = 0, + _video_height: int = 0, + _video_min_dimension: int = 0, + _audio_samples: int = 0, + output_format: str = "THWC", + ) -> None: + super().__init__(root) + if not 1 <= fold <= 3: + raise ValueError(f"fold should be between 1 and 3, got {fold}") + + extensions = ("avi",) + self.fold = fold + self.train = train + + self.classes, class_to_idx = find_classes(self.root) + self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None) + video_list = [x[0] for x in self.samples] + video_clips = VideoClips( + video_list, + frames_per_clip, + step_between_clips, + frame_rate, + _precomputed_metadata, + num_workers=num_workers, + _video_width=_video_width, + _video_height=_video_height, + _video_min_dimension=_video_min_dimension, + _audio_samples=_audio_samples, + output_format=output_format, + ) + # we bookkeep the full version of video clips because we want to be able + # to return the metadata of full version rather than the subset version of + # video clips + self.full_video_clips = video_clips + self.indices = self._select_fold(video_list, annotation_path, fold, train) + self.video_clips = video_clips.subset(self.indices) + self.transform = transform + + @property + def metadata(self) -> Dict[str, Any]: + return self.full_video_clips.metadata + + def _select_fold(self, video_list: List[str], annotation_path: str, fold: int, train: bool) -> List[int]: + name = "train" if train else "test" + name = f"{name}list{fold:02d}.txt" + f = os.path.join(annotation_path, name) + selected_files = set() + with open(f) as fid: + data = fid.readlines() + data = [x.strip().split(" ")[0] for x in data] + data = [os.path.join(self.root, *x.split("/")) for x in data] + selected_files.update(data) + indices = [i for i in range(len(video_list)) if video_list[i] in selected_files] + return indices + + def __len__(self) -> int: + return self.video_clips.num_clips() + + def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]: + video, audio, info, video_idx = self.video_clips.get_clip(idx) + label = self.samples[self.indices[video_idx]][1] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, label diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/usps.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/usps.py new file mode 100644 index 0000000000000000000000000000000000000000..d61d8c30368232479df308f5ae45d40f4ae07976 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/usps.py @@ -0,0 +1,95 @@ +import os +from typing import Any, Callable, Optional, Tuple + +import numpy as np +from PIL import Image + +from .utils import download_url +from .vision import VisionDataset + + +class USPS(VisionDataset): + """`USPS `_ Dataset. + The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``. + The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]`` + and make pixel values in ``[0, 255]``. + + Args: + root (string): Root directory of dataset to store``USPS`` data files. + train (bool, optional): If True, creates dataset from ``usps.bz2``, + otherwise from ``usps.t.bz2``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + split_list = { + "train": [ + "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2", + "usps.bz2", + "ec16c51db3855ca6c91edd34d0e9b197", + ], + "test": [ + "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2", + "usps.t.bz2", + "8ea070ee2aca1ac39742fdd1ef5ed118", + ], + } + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + split = "train" if train else "test" + url, filename, checksum = self.split_list[split] + full_path = os.path.join(self.root, filename) + + if download and not os.path.exists(full_path): + download_url(url, self.root, filename, md5=checksum) + + import bz2 + + with bz2.open(full_path) as fp: + raw_data = [line.decode().split() for line in fp.readlines()] + tmp_list = [[x.split(":")[-1] for x in data[1:]] for data in raw_data] + imgs = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16)) + imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8) + targets = [int(d[0]) - 1 for d in raw_data] + + self.data = imgs + self.targets = targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img, mode="L") + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.data) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/video_utils.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/video_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bb1974b7a4f68b7e156f9b7e1355eaadfbb6c3f8 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/video_utils.py @@ -0,0 +1,419 @@ +import bisect +import math +import warnings +from fractions import Fraction +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, TypeVar, Union + +import torch +from torchvision.io import _probe_video_from_file, _read_video_from_file, read_video, read_video_timestamps + +from .utils import tqdm + +T = TypeVar("T") + + +def pts_convert(pts: int, timebase_from: Fraction, timebase_to: Fraction, round_func: Callable = math.floor) -> int: + """convert pts between different time bases + Args: + pts: presentation timestamp, float + timebase_from: original timebase. Fraction + timebase_to: new timebase. Fraction + round_func: rounding function. + """ + new_pts = Fraction(pts, 1) * timebase_from / timebase_to + return round_func(new_pts) + + +def unfold(tensor: torch.Tensor, size: int, step: int, dilation: int = 1) -> torch.Tensor: + """ + similar to tensor.unfold, but with the dilation + and specialized for 1d tensors + + Returns all consecutive windows of `size` elements, with + `step` between windows. The distance between each element + in a window is given by `dilation`. + """ + if tensor.dim() != 1: + raise ValueError(f"tensor should have 1 dimension instead of {tensor.dim()}") + o_stride = tensor.stride(0) + numel = tensor.numel() + new_stride = (step * o_stride, dilation * o_stride) + new_size = ((numel - (dilation * (size - 1) + 1)) // step + 1, size) + if new_size[0] < 1: + new_size = (0, size) + return torch.as_strided(tensor, new_size, new_stride) + + +class _VideoTimestampsDataset: + """ + Dataset used to parallelize the reading of the timestamps + of a list of videos, given their paths in the filesystem. + + Used in VideoClips and defined at top level, so it can be + pickled when forking. + """ + + def __init__(self, video_paths: List[str]) -> None: + self.video_paths = video_paths + + def __len__(self) -> int: + return len(self.video_paths) + + def __getitem__(self, idx: int) -> Tuple[List[int], Optional[float]]: + return read_video_timestamps(self.video_paths[idx]) + + +def _collate_fn(x: T) -> T: + """ + Dummy collate function to be used with _VideoTimestampsDataset + """ + return x + + +class VideoClips: + """ + Given a list of video files, computes all consecutive subvideos of size + `clip_length_in_frames`, where the distance between each subvideo in the + same video is defined by `frames_between_clips`. + If `frame_rate` is specified, it will also resample all the videos to have + the same frame rate, and the clips will refer to this frame rate. + + Creating this instance the first time is time-consuming, as it needs to + decode all the videos in `video_paths`. It is recommended that you + cache the results after instantiation of the class. + + Recreating the clips for different clip lengths is fast, and can be done + with the `compute_clips` method. + + Args: + video_paths (List[str]): paths to the video files + clip_length_in_frames (int): size of a clip in number of frames + frames_between_clips (int): step (in frames) between each clip + frame_rate (int, optional): if specified, it will resample the video + so that it has `frame_rate`, and then the clips will be defined + on the resampled video + num_workers (int): how many subprocesses to use for data loading. + 0 means that the data will be loaded in the main process. (default: 0) + output_format (str): The format of the output video tensors. Can be either "THWC" (default) or "TCHW". + """ + + def __init__( + self, + video_paths: List[str], + clip_length_in_frames: int = 16, + frames_between_clips: int = 1, + frame_rate: Optional[int] = None, + _precomputed_metadata: Optional[Dict[str, Any]] = None, + num_workers: int = 0, + _video_width: int = 0, + _video_height: int = 0, + _video_min_dimension: int = 0, + _video_max_dimension: int = 0, + _audio_samples: int = 0, + _audio_channels: int = 0, + output_format: str = "THWC", + ) -> None: + + self.video_paths = video_paths + self.num_workers = num_workers + + # these options are not valid for pyav backend + self._video_width = _video_width + self._video_height = _video_height + self._video_min_dimension = _video_min_dimension + self._video_max_dimension = _video_max_dimension + self._audio_samples = _audio_samples + self._audio_channels = _audio_channels + self.output_format = output_format.upper() + if self.output_format not in ("THWC", "TCHW"): + raise ValueError(f"output_format should be either 'THWC' or 'TCHW', got {output_format}.") + + if _precomputed_metadata is None: + self._compute_frame_pts() + else: + self._init_from_metadata(_precomputed_metadata) + self.compute_clips(clip_length_in_frames, frames_between_clips, frame_rate) + + def _compute_frame_pts(self) -> None: + self.video_pts = [] + self.video_fps = [] + + # strategy: use a DataLoader to parallelize read_video_timestamps + # so need to create a dummy dataset first + import torch.utils.data + + dl: torch.utils.data.DataLoader = torch.utils.data.DataLoader( + _VideoTimestampsDataset(self.video_paths), # type: ignore[arg-type] + batch_size=16, + num_workers=self.num_workers, + collate_fn=_collate_fn, + ) + + with tqdm(total=len(dl)) as pbar: + for batch in dl: + pbar.update(1) + clips, fps = list(zip(*batch)) + # we need to specify dtype=torch.long because for empty list, + # torch.as_tensor will use torch.float as default dtype. This + # happens when decoding fails and no pts is returned in the list. + clips = [torch.as_tensor(c, dtype=torch.long) for c in clips] + self.video_pts.extend(clips) + self.video_fps.extend(fps) + + def _init_from_metadata(self, metadata: Dict[str, Any]) -> None: + self.video_paths = metadata["video_paths"] + assert len(self.video_paths) == len(metadata["video_pts"]) + self.video_pts = metadata["video_pts"] + assert len(self.video_paths) == len(metadata["video_fps"]) + self.video_fps = metadata["video_fps"] + + @property + def metadata(self) -> Dict[str, Any]: + _metadata = { + "video_paths": self.video_paths, + "video_pts": self.video_pts, + "video_fps": self.video_fps, + } + return _metadata + + def subset(self, indices: List[int]) -> "VideoClips": + video_paths = [self.video_paths[i] for i in indices] + video_pts = [self.video_pts[i] for i in indices] + video_fps = [self.video_fps[i] for i in indices] + metadata = { + "video_paths": video_paths, + "video_pts": video_pts, + "video_fps": video_fps, + } + return type(self)( + video_paths, + self.num_frames, + self.step, + self.frame_rate, + _precomputed_metadata=metadata, + num_workers=self.num_workers, + _video_width=self._video_width, + _video_height=self._video_height, + _video_min_dimension=self._video_min_dimension, + _video_max_dimension=self._video_max_dimension, + _audio_samples=self._audio_samples, + _audio_channels=self._audio_channels, + output_format=self.output_format, + ) + + @staticmethod + def compute_clips_for_video( + video_pts: torch.Tensor, num_frames: int, step: int, fps: int, frame_rate: Optional[int] = None + ) -> Tuple[torch.Tensor, Union[List[slice], torch.Tensor]]: + if fps is None: + # if for some reason the video doesn't have fps (because doesn't have a video stream) + # set the fps to 1. The value doesn't matter, because video_pts is empty anyway + fps = 1 + if frame_rate is None: + frame_rate = fps + total_frames = len(video_pts) * (float(frame_rate) / fps) + _idxs = VideoClips._resample_video_idx(int(math.floor(total_frames)), fps, frame_rate) + video_pts = video_pts[_idxs] + clips = unfold(video_pts, num_frames, step) + if not clips.numel(): + warnings.warn( + "There aren't enough frames in the current video to get a clip for the given clip length and " + "frames between clips. The video (and potentially others) will be skipped." + ) + idxs: Union[List[slice], torch.Tensor] + if isinstance(_idxs, slice): + idxs = [_idxs] * len(clips) + else: + idxs = unfold(_idxs, num_frames, step) + return clips, idxs + + def compute_clips(self, num_frames: int, step: int, frame_rate: Optional[int] = None) -> None: + """ + Compute all consecutive sequences of clips from video_pts. + Always returns clips of size `num_frames`, meaning that the + last few frames in a video can potentially be dropped. + + Args: + num_frames (int): number of frames for the clip + step (int): distance between two clips + frame_rate (int, optional): The frame rate + """ + self.num_frames = num_frames + self.step = step + self.frame_rate = frame_rate + self.clips = [] + self.resampling_idxs = [] + for video_pts, fps in zip(self.video_pts, self.video_fps): + clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate) + self.clips.append(clips) + self.resampling_idxs.append(idxs) + clip_lengths = torch.as_tensor([len(v) for v in self.clips]) + self.cumulative_sizes = clip_lengths.cumsum(0).tolist() + + def __len__(self) -> int: + return self.num_clips() + + def num_videos(self) -> int: + return len(self.video_paths) + + def num_clips(self) -> int: + """ + Number of subclips that are available in the video list. + """ + return self.cumulative_sizes[-1] + + def get_clip_location(self, idx: int) -> Tuple[int, int]: + """ + Converts a flattened representation of the indices into a video_idx, clip_idx + representation. + """ + video_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if video_idx == 0: + clip_idx = idx + else: + clip_idx = idx - self.cumulative_sizes[video_idx - 1] + return video_idx, clip_idx + + @staticmethod + def _resample_video_idx(num_frames: int, original_fps: int, new_fps: int) -> Union[slice, torch.Tensor]: + step = float(original_fps) / new_fps + if step.is_integer(): + # optimization: if step is integer, don't need to perform + # advanced indexing + step = int(step) + return slice(None, None, step) + idxs = torch.arange(num_frames, dtype=torch.float32) * step + idxs = idxs.floor().to(torch.int64) + return idxs + + def get_clip(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any], int]: + """ + Gets a subclip from a list of videos. + + Args: + idx (int): index of the subclip. Must be between 0 and num_clips(). + + Returns: + video (Tensor) + audio (Tensor) + info (Dict) + video_idx (int): index of the video in `video_paths` + """ + if idx >= self.num_clips(): + raise IndexError(f"Index {idx} out of range ({self.num_clips()} number of clips)") + video_idx, clip_idx = self.get_clip_location(idx) + video_path = self.video_paths[video_idx] + clip_pts = self.clips[video_idx][clip_idx] + + from torchvision import get_video_backend + + backend = get_video_backend() + + if backend == "pyav": + # check for invalid options + if self._video_width != 0: + raise ValueError("pyav backend doesn't support _video_width != 0") + if self._video_height != 0: + raise ValueError("pyav backend doesn't support _video_height != 0") + if self._video_min_dimension != 0: + raise ValueError("pyav backend doesn't support _video_min_dimension != 0") + if self._video_max_dimension != 0: + raise ValueError("pyav backend doesn't support _video_max_dimension != 0") + if self._audio_samples != 0: + raise ValueError("pyav backend doesn't support _audio_samples != 0") + + if backend == "pyav": + start_pts = clip_pts[0].item() + end_pts = clip_pts[-1].item() + video, audio, info = read_video(video_path, start_pts, end_pts) + else: + _info = _probe_video_from_file(video_path) + video_fps = _info.video_fps + audio_fps = None + + video_start_pts = cast(int, clip_pts[0].item()) + video_end_pts = cast(int, clip_pts[-1].item()) + + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase = Fraction(0, 1) + video_timebase = Fraction(_info.video_timebase.numerator, _info.video_timebase.denominator) + if _info.has_audio: + audio_timebase = Fraction(_info.audio_timebase.numerator, _info.audio_timebase.denominator) + audio_start_pts = pts_convert(video_start_pts, video_timebase, audio_timebase, math.floor) + audio_end_pts = pts_convert(video_end_pts, video_timebase, audio_timebase, math.ceil) + audio_fps = _info.audio_sample_rate + video, audio, _ = _read_video_from_file( + video_path, + video_width=self._video_width, + video_height=self._video_height, + video_min_dimension=self._video_min_dimension, + video_max_dimension=self._video_max_dimension, + video_pts_range=(video_start_pts, video_end_pts), + video_timebase=video_timebase, + audio_samples=self._audio_samples, + audio_channels=self._audio_channels, + audio_pts_range=(audio_start_pts, audio_end_pts), + audio_timebase=audio_timebase, + ) + + info = {"video_fps": video_fps} + if audio_fps is not None: + info["audio_fps"] = audio_fps + + if self.frame_rate is not None: + resampling_idx = self.resampling_idxs[video_idx][clip_idx] + if isinstance(resampling_idx, torch.Tensor): + resampling_idx = resampling_idx - resampling_idx[0] + video = video[resampling_idx] + info["video_fps"] = self.frame_rate + assert len(video) == self.num_frames, f"{video.shape} x {self.num_frames}" + + if self.output_format == "TCHW": + # [T,H,W,C] --> [T,C,H,W] + video = video.permute(0, 3, 1, 2) + + return video, audio, info, video_idx + + def __getstate__(self) -> Dict[str, Any]: + video_pts_sizes = [len(v) for v in self.video_pts] + # To be back-compatible, we convert data to dtype torch.long as needed + # because for empty list, in legacy implementation, torch.as_tensor will + # use torch.float as default dtype. This happens when decoding fails and + # no pts is returned in the list. + video_pts = [x.to(torch.int64) for x in self.video_pts] + # video_pts can be an empty list if no frames have been decoded + if video_pts: + video_pts = torch.cat(video_pts) # type: ignore[assignment] + # avoid bug in https://github.com/pytorch/pytorch/issues/32351 + # TODO: Revert it once the bug is fixed. + video_pts = video_pts.numpy() # type: ignore[attr-defined] + + # make a copy of the fields of self + d = self.__dict__.copy() + d["video_pts_sizes"] = video_pts_sizes + d["video_pts"] = video_pts + # delete the following attributes to reduce the size of dictionary. They + # will be re-computed in "__setstate__()" + del d["clips"] + del d["resampling_idxs"] + del d["cumulative_sizes"] + + # for backwards-compatibility + d["_version"] = 2 + return d + + def __setstate__(self, d: Dict[str, Any]) -> None: + # for backwards-compatibility + if "_version" not in d: + self.__dict__ = d + return + + video_pts = torch.as_tensor(d["video_pts"], dtype=torch.int64) + video_pts = torch.split(video_pts, d["video_pts_sizes"], dim=0) + # don't need this info anymore + del d["video_pts_sizes"] + + d["video_pts"] = video_pts + self.__dict__ = d + # recompute attributes "clips", "resampling_idxs" and other derivative ones + self.compute_clips(self.num_frames, self.step, self.frame_rate) diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/voc.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/voc.py new file mode 100644 index 0000000000000000000000000000000000000000..dc29bca576691fb1fe09e833224f2e4a9c009fa5 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/voc.py @@ -0,0 +1,224 @@ +import collections +import os +from xml.etree.ElementTree import Element as ET_Element + +from .vision import VisionDataset + +try: + from defusedxml.ElementTree import parse as ET_parse +except ImportError: + from xml.etree.ElementTree import parse as ET_parse +from typing import Any, Callable, Dict, List, Optional, Tuple + +from PIL import Image + +from .utils import download_and_extract_archive, verify_str_arg + +DATASET_YEAR_DICT = { + "2012": { + "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar", + "filename": "VOCtrainval_11-May-2012.tar", + "md5": "6cd6e144f989b92b3379bac3b3de84fd", + "base_dir": os.path.join("VOCdevkit", "VOC2012"), + }, + "2011": { + "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar", + "filename": "VOCtrainval_25-May-2011.tar", + "md5": "6c3384ef61512963050cb5d687e5bf1e", + "base_dir": os.path.join("TrainVal", "VOCdevkit", "VOC2011"), + }, + "2010": { + "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar", + "filename": "VOCtrainval_03-May-2010.tar", + "md5": "da459979d0c395079b5c75ee67908abb", + "base_dir": os.path.join("VOCdevkit", "VOC2010"), + }, + "2009": { + "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar", + "filename": "VOCtrainval_11-May-2009.tar", + "md5": "a3e00b113cfcfebf17e343f59da3caa1", + "base_dir": os.path.join("VOCdevkit", "VOC2009"), + }, + "2008": { + "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar", + "filename": "VOCtrainval_11-May-2012.tar", + "md5": "2629fa636546599198acfcfbfcf1904a", + "base_dir": os.path.join("VOCdevkit", "VOC2008"), + }, + "2007": { + "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar", + "filename": "VOCtrainval_06-Nov-2007.tar", + "md5": "c52e279531787c972589f7e41ab4ae64", + "base_dir": os.path.join("VOCdevkit", "VOC2007"), + }, + "2007-test": { + "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar", + "filename": "VOCtest_06-Nov-2007.tar", + "md5": "b6e924de25625d8de591ea690078ad9f", + "base_dir": os.path.join("VOCdevkit", "VOC2007"), + }, +} + + +class _VOCBase(VisionDataset): + _SPLITS_DIR: str + _TARGET_DIR: str + _TARGET_FILE_EXT: str + + def __init__( + self, + root: str, + year: str = "2012", + image_set: str = "train", + download: bool = False, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ): + super().__init__(root, transforms, transform, target_transform) + + self.year = verify_str_arg(year, "year", valid_values=[str(yr) for yr in range(2007, 2013)]) + + valid_image_sets = ["train", "trainval", "val"] + if year == "2007": + valid_image_sets.append("test") + self.image_set = verify_str_arg(image_set, "image_set", valid_image_sets) + + key = "2007-test" if year == "2007" and image_set == "test" else year + dataset_year_dict = DATASET_YEAR_DICT[key] + + self.url = dataset_year_dict["url"] + self.filename = dataset_year_dict["filename"] + self.md5 = dataset_year_dict["md5"] + + base_dir = dataset_year_dict["base_dir"] + voc_root = os.path.join(self.root, base_dir) + + if download: + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.md5) + + if not os.path.isdir(voc_root): + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") + + splits_dir = os.path.join(voc_root, "ImageSets", self._SPLITS_DIR) + split_f = os.path.join(splits_dir, image_set.rstrip("\n") + ".txt") + with open(os.path.join(split_f)) as f: + file_names = [x.strip() for x in f.readlines()] + + image_dir = os.path.join(voc_root, "JPEGImages") + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + + target_dir = os.path.join(voc_root, self._TARGET_DIR) + self.targets = [os.path.join(target_dir, x + self._TARGET_FILE_EXT) for x in file_names] + + assert len(self.images) == len(self.targets) + + def __len__(self) -> int: + return len(self.images) + + +class VOCSegmentation(_VOCBase): + """`Pascal VOC `_ Segmentation Dataset. + + Args: + root (string): Root directory of the VOC Dataset. + year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``. + image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If + ``year=="2007"``, can also be ``"test"``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + _SPLITS_DIR = "Segmentation" + _TARGET_DIR = "SegmentationClass" + _TARGET_FILE_EXT = ".png" + + @property + def masks(self) -> List[str]: + return self.targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is the image segmentation. + """ + img = Image.open(self.images[index]).convert("RGB") + target = Image.open(self.masks[index]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + +class VOCDetection(_VOCBase): + """`Pascal VOC `_ Detection Dataset. + + Args: + root (string): Root directory of the VOC Dataset. + year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``. + image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If + ``year=="2007"``, can also be ``"test"``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + (default: alphabetic indexing of VOC's 20 classes). + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, required): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + _SPLITS_DIR = "Main" + _TARGET_DIR = "Annotations" + _TARGET_FILE_EXT = ".xml" + + @property + def annotations(self) -> List[str]: + return self.targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a dictionary of the XML tree. + """ + img = Image.open(self.images[index]).convert("RGB") + target = self.parse_voc_xml(ET_parse(self.annotations[index]).getroot()) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + @staticmethod + def parse_voc_xml(node: ET_Element) -> Dict[str, Any]: + voc_dict: Dict[str, Any] = {} + children = list(node) + if children: + def_dic: Dict[str, Any] = collections.defaultdict(list) + for dc in map(VOCDetection.parse_voc_xml, children): + for ind, v in dc.items(): + def_dic[ind].append(v) + if node.tag == "annotation": + def_dic["object"] = [def_dic["object"]] + voc_dict = {node.tag: {ind: v[0] if len(v) == 1 else v for ind, v in def_dic.items()}} + if node.text: + text = node.text.strip() + if not children: + voc_dict[node.tag] = text + return voc_dict diff --git a/wemm/lib/python3.10/site-packages/torchvision/datasets/widerface.py b/wemm/lib/python3.10/site-packages/torchvision/datasets/widerface.py new file mode 100644 index 0000000000000000000000000000000000000000..b46c7982d8bf6542b85a2706bedf0da821ec84ff --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/datasets/widerface.py @@ -0,0 +1,191 @@ +import os +from os.path import abspath, expanduser +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from PIL import Image + +from .utils import download_and_extract_archive, download_file_from_google_drive, extract_archive, verify_str_arg +from .vision import VisionDataset + + +class WIDERFace(VisionDataset): + """`WIDERFace `_ Dataset. + + Args: + root (string): Root directory where images and annotations are downloaded to. + Expects the following folder structure if download=False: + + .. code:: + + + └── widerface + ├── wider_face_split ('wider_face_split.zip' if compressed) + ├── WIDER_train ('WIDER_train.zip' if compressed) + ├── WIDER_val ('WIDER_val.zip' if compressed) + └── WIDER_test ('WIDER_test.zip' if compressed) + split (string): The dataset split to use. One of {``train``, ``val``, ``test``}. + Defaults to ``train``. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + BASE_FOLDER = "widerface" + FILE_LIST = [ + # File ID MD5 Hash Filename + ("15hGDLhsx8bLgLcIRD5DhYt5iBxnjNF1M", "3fedf70df600953d25982bcd13d91ba2", "WIDER_train.zip"), + ("1GUCogbp16PMGa39thoMMeWxp7Rp5oM8Q", "dfa7d7e790efa35df3788964cf0bbaea", "WIDER_val.zip"), + ("1HIfDbVEWKmsYKJZm4lchTBDLW5N7dY5T", "e5d8f4248ed24c334bbd12f49c29dd40", "WIDER_test.zip"), + ] + ANNOTATIONS_FILE = ( + "http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip", + "0e3767bcf0e326556d407bf5bff5d27c", + "wider_face_split.zip", + ) + + def __init__( + self, + root: str, + split: str = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super().__init__( + root=os.path.join(root, self.BASE_FOLDER), transform=transform, target_transform=target_transform + ) + # check arguments + self.split = verify_str_arg(split, "split", ("train", "val", "test")) + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download and prepare it") + + self.img_info: List[Dict[str, Union[str, Dict[str, torch.Tensor]]]] = [] + if self.split in ("train", "val"): + self.parse_train_val_annotations_file() + else: + self.parse_test_annotations_file() + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a dict of annotations for all faces in the image. + target=None for the test split. + """ + + # stay consistent with other datasets and return a PIL Image + img = Image.open(self.img_info[index]["img_path"]) + + if self.transform is not None: + img = self.transform(img) + + target = None if self.split == "test" else self.img_info[index]["annotations"] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.img_info) + + def extra_repr(self) -> str: + lines = ["Split: {split}"] + return "\n".join(lines).format(**self.__dict__) + + def parse_train_val_annotations_file(self) -> None: + filename = "wider_face_train_bbx_gt.txt" if self.split == "train" else "wider_face_val_bbx_gt.txt" + filepath = os.path.join(self.root, "wider_face_split", filename) + + with open(filepath) as f: + lines = f.readlines() + file_name_line, num_boxes_line, box_annotation_line = True, False, False + num_boxes, box_counter = 0, 0 + labels = [] + for line in lines: + line = line.rstrip() + if file_name_line: + img_path = os.path.join(self.root, "WIDER_" + self.split, "images", line) + img_path = abspath(expanduser(img_path)) + file_name_line = False + num_boxes_line = True + elif num_boxes_line: + num_boxes = int(line) + num_boxes_line = False + box_annotation_line = True + elif box_annotation_line: + box_counter += 1 + line_split = line.split(" ") + line_values = [int(x) for x in line_split] + labels.append(line_values) + if box_counter >= num_boxes: + box_annotation_line = False + file_name_line = True + labels_tensor = torch.tensor(labels) + self.img_info.append( + { + "img_path": img_path, + "annotations": { + "bbox": labels_tensor[:, 0:4], # x, y, width, height + "blur": labels_tensor[:, 4], + "expression": labels_tensor[:, 5], + "illumination": labels_tensor[:, 6], + "occlusion": labels_tensor[:, 7], + "pose": labels_tensor[:, 8], + "invalid": labels_tensor[:, 9], + }, + } + ) + box_counter = 0 + labels.clear() + else: + raise RuntimeError(f"Error parsing annotation file {filepath}") + + def parse_test_annotations_file(self) -> None: + filepath = os.path.join(self.root, "wider_face_split", "wider_face_test_filelist.txt") + filepath = abspath(expanduser(filepath)) + with open(filepath) as f: + lines = f.readlines() + for line in lines: + line = line.rstrip() + img_path = os.path.join(self.root, "WIDER_test", "images", line) + img_path = abspath(expanduser(img_path)) + self.img_info.append({"img_path": img_path}) + + def _check_integrity(self) -> bool: + # Allow original archive to be deleted (zip). Only need the extracted images + all_files = self.FILE_LIST.copy() + all_files.append(self.ANNOTATIONS_FILE) + for (_, md5, filename) in all_files: + file, ext = os.path.splitext(filename) + extracted_dir = os.path.join(self.root, file) + if not os.path.exists(extracted_dir): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print("Files already downloaded and verified") + return + + # download and extract image data + for (file_id, md5, filename) in self.FILE_LIST: + download_file_from_google_drive(file_id, self.root, filename, md5) + filepath = os.path.join(self.root, filename) + extract_archive(filepath) + + # download and extract annotation files + download_and_extract_archive( + url=self.ANNOTATIONS_FILE[0], download_root=self.root, md5=self.ANNOTATIONS_FILE[1] + ) diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aaa2eebea3f16030f910998d97f6f93308db3b8 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a3c64934a48028a1e839bd03fe3f4075593a009 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bb04b262260db21181f84a6a11b023ebe85030f Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/densenet.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/densenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..879945cb42ac883e56320925c5a05bb23f4dc10b Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/densenet.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b0f25e9c8e76ccfa942d3253e2f6ba23b010a98 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/googlenet.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/googlenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a63151920033b1e10c132309ef3a0625d37e3326 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/googlenet.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/maxvit.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/maxvit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab181bcc81eb0871364b7e30eaeb0a7ea904d3b8 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/maxvit.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/regnet.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/regnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a165c8cb3c40a6ddd5968e15fa0870140af50fc Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/regnet.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/squeezenet.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/squeezenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e7fcbd21888a70ca183537df5a5eb327bbf0b14 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/squeezenet.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/vgg.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/vgg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..679e10dff274d43e78cb14ae52673dc73c75cb79 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/vgg.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__init__.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4146651c737971cc5a883b6750f2ded3051bc8ea --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__init__.py @@ -0,0 +1,7 @@ +from .faster_rcnn import * +from .fcos import * +from .keypoint_rcnn import * +from .mask_rcnn import * +from .retinanet import * +from .ssd import * +from .ssdlite import * diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/_utils.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df71fb718f3c5734c6905c40a53aefe844f06499 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/_utils.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/faster_rcnn.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/faster_rcnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5194f646205682c0ec4de69811db32ee4e6027e Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/faster_rcnn.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/fcos.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/fcos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e08bb165af5d35d7cbea63ddc4fdc13e1a66591 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/fcos.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/generalized_rcnn.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/generalized_rcnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ff75a62a3f45ea7500eba3dfcec737fc1afbac7 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/generalized_rcnn.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/keypoint_rcnn.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/keypoint_rcnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7836439e3300a5d77a48f821642202fac57cbe35 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/keypoint_rcnn.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/retinanet.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/retinanet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32c53b780bd1605cc6c25f8fe1693a1991d46f97 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/retinanet.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/roi_heads.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/roi_heads.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60e34b4f055db3b2bf0600173d1d175356bee9a9 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/roi_heads.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/rpn.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/rpn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7df3dcc94beb0c6423c02f117ea5da9dbcb0c127 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/rpn.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssd.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..551796110eca6f8665522f02bd6e25b7c47d94a1 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssd.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssdlite.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssdlite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18f613666d1a8885bf0636a5f4214cf4b480999f Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/detection/__pycache__/ssdlite.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/anchor_utils.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/anchor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..253f6502a9b6344f5a3da239f2394179a256424e --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/anchor_utils.py @@ -0,0 +1,268 @@ +import math +from typing import List, Optional + +import torch +from torch import nn, Tensor + +from .image_list import ImageList + + +class AnchorGenerator(nn.Module): + """ + Module that generates anchors for a set of feature maps and + image sizes. + + The module support computing anchors at multiple sizes and aspect ratios + per feature map. This module assumes aspect ratio = height / width for + each anchor. + + sizes and aspect_ratios should have the same number of elements, and it should + correspond to the number of feature maps. + + sizes[i] and aspect_ratios[i] can have an arbitrary number of elements, + and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors + per spatial location for feature map i. + + Args: + sizes (Tuple[Tuple[int]]): + aspect_ratios (Tuple[Tuple[float]]): + """ + + __annotations__ = { + "cell_anchors": List[torch.Tensor], + } + + def __init__( + self, + sizes=((128, 256, 512),), + aspect_ratios=((0.5, 1.0, 2.0),), + ): + super().__init__() + + if not isinstance(sizes[0], (list, tuple)): + # TODO change this + sizes = tuple((s,) for s in sizes) + if not isinstance(aspect_ratios[0], (list, tuple)): + aspect_ratios = (aspect_ratios,) * len(sizes) + + self.sizes = sizes + self.aspect_ratios = aspect_ratios + self.cell_anchors = [ + self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(sizes, aspect_ratios) + ] + + # TODO: https://github.com/pytorch/pytorch/issues/26792 + # For every (aspect_ratios, scales) combination, output a zero-centered anchor with those values. + # (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios) + # This method assumes aspect ratio = height / width for an anchor. + def generate_anchors( + self, + scales: List[int], + aspect_ratios: List[float], + dtype: torch.dtype = torch.float32, + device: torch.device = torch.device("cpu"), + ) -> Tensor: + scales = torch.as_tensor(scales, dtype=dtype, device=device) + aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device) + h_ratios = torch.sqrt(aspect_ratios) + w_ratios = 1 / h_ratios + + ws = (w_ratios[:, None] * scales[None, :]).view(-1) + hs = (h_ratios[:, None] * scales[None, :]).view(-1) + + base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2 + return base_anchors.round() + + def set_cell_anchors(self, dtype: torch.dtype, device: torch.device): + self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device) for cell_anchor in self.cell_anchors] + + def num_anchors_per_location(self) -> List[int]: + return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)] + + # For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2), + # output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a. + def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]: + anchors = [] + cell_anchors = self.cell_anchors + torch._assert(cell_anchors is not None, "cell_anchors should not be None") + torch._assert( + len(grid_sizes) == len(strides) == len(cell_anchors), + "Anchors should be Tuple[Tuple[int]] because each feature " + "map could potentially have different sizes and aspect ratios. " + "There needs to be a match between the number of " + "feature maps passed and the number of sizes / aspect ratios specified.", + ) + + for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors): + grid_height, grid_width = size + stride_height, stride_width = stride + device = base_anchors.device + + # For output anchor, compute [x_center, y_center, x_center, y_center] + shifts_x = torch.arange(0, grid_width, dtype=torch.int32, device=device) * stride_width + shifts_y = torch.arange(0, grid_height, dtype=torch.int32, device=device) * stride_height + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij") + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) + + # For every (base anchor, output anchor) pair, + # offset each zero-centered base anchor by the center of the output anchor. + anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) + + return anchors + + def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]: + grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] + image_size = image_list.tensors.shape[-2:] + dtype, device = feature_maps[0].dtype, feature_maps[0].device + strides = [ + [ + torch.empty((), dtype=torch.int64, device=device).fill_(image_size[0] // g[0]), + torch.empty((), dtype=torch.int64, device=device).fill_(image_size[1] // g[1]), + ] + for g in grid_sizes + ] + self.set_cell_anchors(dtype, device) + anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides) + anchors: List[List[torch.Tensor]] = [] + for _ in range(len(image_list.image_sizes)): + anchors_in_image = [anchors_per_feature_map for anchors_per_feature_map in anchors_over_all_feature_maps] + anchors.append(anchors_in_image) + anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors] + return anchors + + +class DefaultBoxGenerator(nn.Module): + """ + This module generates the default boxes of SSD for a set of feature maps and image sizes. + + Args: + aspect_ratios (List[List[int]]): A list with all the aspect ratios used in each feature map. + min_ratio (float): The minimum scale :math:`\text{s}_{\text{min}}` of the default boxes used in the estimation + of the scales of each feature map. It is used only if the ``scales`` parameter is not provided. + max_ratio (float): The maximum scale :math:`\text{s}_{\text{max}}` of the default boxes used in the estimation + of the scales of each feature map. It is used only if the ``scales`` parameter is not provided. + scales (List[float]], optional): The scales of the default boxes. If not provided it will be estimated using + the ``min_ratio`` and ``max_ratio`` parameters. + steps (List[int]], optional): It's a hyper-parameter that affects the tiling of default boxes. If not provided + it will be estimated from the data. + clip (bool): Whether the standardized values of default boxes should be clipped between 0 and 1. The clipping + is applied while the boxes are encoded in format ``(cx, cy, w, h)``. + """ + + def __init__( + self, + aspect_ratios: List[List[int]], + min_ratio: float = 0.15, + max_ratio: float = 0.9, + scales: Optional[List[float]] = None, + steps: Optional[List[int]] = None, + clip: bool = True, + ): + super().__init__() + if steps is not None and len(aspect_ratios) != len(steps): + raise ValueError("aspect_ratios and steps should have the same length") + self.aspect_ratios = aspect_ratios + self.steps = steps + self.clip = clip + num_outputs = len(aspect_ratios) + + # Estimation of default boxes scales + if scales is None: + if num_outputs > 1: + range_ratio = max_ratio - min_ratio + self.scales = [min_ratio + range_ratio * k / (num_outputs - 1.0) for k in range(num_outputs)] + self.scales.append(1.0) + else: + self.scales = [min_ratio, max_ratio] + else: + self.scales = scales + + self._wh_pairs = self._generate_wh_pairs(num_outputs) + + def _generate_wh_pairs( + self, num_outputs: int, dtype: torch.dtype = torch.float32, device: torch.device = torch.device("cpu") + ) -> List[Tensor]: + _wh_pairs: List[Tensor] = [] + for k in range(num_outputs): + # Adding the 2 default width-height pairs for aspect ratio 1 and scale s'k + s_k = self.scales[k] + s_prime_k = math.sqrt(self.scales[k] * self.scales[k + 1]) + wh_pairs = [[s_k, s_k], [s_prime_k, s_prime_k]] + + # Adding 2 pairs for each aspect ratio of the feature map k + for ar in self.aspect_ratios[k]: + sq_ar = math.sqrt(ar) + w = self.scales[k] * sq_ar + h = self.scales[k] / sq_ar + wh_pairs.extend([[w, h], [h, w]]) + + _wh_pairs.append(torch.as_tensor(wh_pairs, dtype=dtype, device=device)) + return _wh_pairs + + def num_anchors_per_location(self) -> List[int]: + # Estimate num of anchors based on aspect ratios: 2 default boxes + 2 * ratios of feaure map. + return [2 + 2 * len(r) for r in self.aspect_ratios] + + # Default Boxes calculation based on page 6 of SSD paper + def _grid_default_boxes( + self, grid_sizes: List[List[int]], image_size: List[int], dtype: torch.dtype = torch.float32 + ) -> Tensor: + default_boxes = [] + for k, f_k in enumerate(grid_sizes): + # Now add the default boxes for each width-height pair + if self.steps is not None: + x_f_k = image_size[1] / self.steps[k] + y_f_k = image_size[0] / self.steps[k] + else: + y_f_k, x_f_k = f_k + + shifts_x = ((torch.arange(0, f_k[1]) + 0.5) / x_f_k).to(dtype=dtype) + shifts_y = ((torch.arange(0, f_k[0]) + 0.5) / y_f_k).to(dtype=dtype) + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij") + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + + shifts = torch.stack((shift_x, shift_y) * len(self._wh_pairs[k]), dim=-1).reshape(-1, 2) + # Clipping the default boxes while the boxes are encoded in format (cx, cy, w, h) + _wh_pair = self._wh_pairs[k].clamp(min=0, max=1) if self.clip else self._wh_pairs[k] + wh_pairs = _wh_pair.repeat((f_k[0] * f_k[1]), 1) + + default_box = torch.cat((shifts, wh_pairs), dim=1) + + default_boxes.append(default_box) + + return torch.cat(default_boxes, dim=0) + + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"aspect_ratios={self.aspect_ratios}" + f", clip={self.clip}" + f", scales={self.scales}" + f", steps={self.steps}" + ")" + ) + return s + + def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]: + grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] + image_size = image_list.tensors.shape[-2:] + dtype, device = feature_maps[0].dtype, feature_maps[0].device + default_boxes = self._grid_default_boxes(grid_sizes, image_size, dtype=dtype) + default_boxes = default_boxes.to(device) + + dboxes = [] + x_y_size = torch.tensor([image_size[1], image_size[0]], device=default_boxes.device) + for _ in image_list.image_sizes: + dboxes_in_image = default_boxes + dboxes_in_image = torch.cat( + [ + (dboxes_in_image[:, :2] - 0.5 * dboxes_in_image[:, 2:]) * x_y_size, + (dboxes_in_image[:, :2] + 0.5 * dboxes_in_image[:, 2:]) * x_y_size, + ], + -1, + ) + dboxes.append(dboxes_in_image) + return dboxes diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/faster_rcnn.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/faster_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..bda05211106db127fe4d63ef9c8c112bb3b54627 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/faster_rcnn.py @@ -0,0 +1,843 @@ +from typing import Any, Callable, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import nn +from torchvision.ops import MultiScaleRoIAlign + +from ...ops import misc as misc_nn_ops +from ...transforms._presets import ObjectDetection +from .._api import register_model, Weights, WeightsEnum +from .._meta import _COCO_CATEGORIES +from .._utils import _ovewrite_value_param, handle_legacy_interface +from ..mobilenetv3 import mobilenet_v3_large, MobileNet_V3_Large_Weights +from ..resnet import resnet50, ResNet50_Weights +from ._utils import overwrite_eps +from .anchor_utils import AnchorGenerator +from .backbone_utils import _mobilenet_extractor, _resnet_fpn_extractor, _validate_trainable_layers +from .generalized_rcnn import GeneralizedRCNN +from .roi_heads import RoIHeads +from .rpn import RegionProposalNetwork, RPNHead +from .transform import GeneralizedRCNNTransform + + +__all__ = [ + "FasterRCNN", + "FasterRCNN_ResNet50_FPN_Weights", + "FasterRCNN_ResNet50_FPN_V2_Weights", + "FasterRCNN_MobileNet_V3_Large_FPN_Weights", + "FasterRCNN_MobileNet_V3_Large_320_FPN_Weights", + "fasterrcnn_resnet50_fpn", + "fasterrcnn_resnet50_fpn_v2", + "fasterrcnn_mobilenet_v3_large_fpn", + "fasterrcnn_mobilenet_v3_large_320_fpn", +] + + +def _default_anchorgen(): + anchor_sizes = ((32,), (64,), (128,), (256,), (512,)) + aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) + return AnchorGenerator(anchor_sizes, aspect_ratios) + + +class FasterRCNN(GeneralizedRCNN): + """ + Implements Faster R-CNN. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses for both the RPN and the R-CNN. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each image + - scores (Tensor[N]): the scores or each prediction + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain an out_channels attribute, which indicates the number of output + channels that each feature map has (and it should be the same for all feature maps). + The backbone should return a single Tensor or and OrderedDict[Tensor]. + num_classes (int): number of output classes of the model (including the background). + If box_predictor is specified, num_classes should be None. + min_size (int): minimum size of the image to be rescaled before feeding it to the backbone + max_size (int): maximum size of the image to be rescaled before feeding it to the backbone + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN + rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training + rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing + rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training + rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing + rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals + rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training of the RPN. + rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training of the RPN. + rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN + for computing the loss + rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training + of the RPN + rpn_score_thresh (float): during inference, only return proposals with a classification score + greater than rpn_score_thresh + box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes + box_head (nn.Module): module that takes the cropped feature maps as input + box_predictor (nn.Module): module that takes the output of box_head and returns the + classification logits and box regression deltas. + box_score_thresh (float): during inference, only return proposals with a classification score + greater than box_score_thresh + box_nms_thresh (float): NMS threshold for the prediction head. Used during inference + box_detections_per_img (int): maximum number of detections per image, for all classes. + box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be + considered as positive during training of the classification head + box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be + considered as negative during training of the classification head + box_batch_size_per_image (int): number of proposals that are sampled during training of the + classification head + box_positive_fraction (float): proportion of positive proposals in a mini-batch during training + of the classification head + bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the + bounding boxes + + Example:: + + >>> import torch + >>> import torchvision + >>> from torchvision.models.detection import FasterRCNN + >>> from torchvision.models.detection.rpn import AnchorGenerator + >>> # load a pre-trained model for classification and return + >>> # only the features + >>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features + >>> # FasterRCNN needs to know the number of + >>> # output channels in a backbone. For mobilenet_v2, it's 1280, + >>> # so we need to add it here + >>> backbone.out_channels = 1280 + >>> + >>> # let's make the RPN generate 5 x 3 anchors per spatial + >>> # location, with 5 different sizes and 3 different aspect + >>> # ratios. We have a Tuple[Tuple[int]] because each feature + >>> # map could potentially have different sizes and + >>> # aspect ratios + >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), + >>> aspect_ratios=((0.5, 1.0, 2.0),)) + >>> + >>> # let's define what are the feature maps that we will + >>> # use to perform the region of interest cropping, as well as + >>> # the size of the crop after rescaling. + >>> # if your backbone returns a Tensor, featmap_names is expected to + >>> # be ['0']. More generally, the backbone should return an + >>> # OrderedDict[Tensor], and in featmap_names you can choose which + >>> # feature maps to use. + >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=7, + >>> sampling_ratio=2) + >>> + >>> # put the pieces together inside a FasterRCNN model + >>> model = FasterRCNN(backbone, + >>> num_classes=2, + >>> rpn_anchor_generator=anchor_generator, + >>> box_roi_pool=roi_pooler) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + """ + + def __init__( + self, + backbone, + num_classes=None, + # transform parameters + min_size=800, + max_size=1333, + image_mean=None, + image_std=None, + # RPN parameters + rpn_anchor_generator=None, + rpn_head=None, + rpn_pre_nms_top_n_train=2000, + rpn_pre_nms_top_n_test=1000, + rpn_post_nms_top_n_train=2000, + rpn_post_nms_top_n_test=1000, + rpn_nms_thresh=0.7, + rpn_fg_iou_thresh=0.7, + rpn_bg_iou_thresh=0.3, + rpn_batch_size_per_image=256, + rpn_positive_fraction=0.5, + rpn_score_thresh=0.0, + # Box parameters + box_roi_pool=None, + box_head=None, + box_predictor=None, + box_score_thresh=0.05, + box_nms_thresh=0.5, + box_detections_per_img=100, + box_fg_iou_thresh=0.5, + box_bg_iou_thresh=0.5, + box_batch_size_per_image=512, + box_positive_fraction=0.25, + bbox_reg_weights=None, + **kwargs, + ): + + if not hasattr(backbone, "out_channels"): + raise ValueError( + "backbone should contain an attribute out_channels " + "specifying the number of output channels (assumed to be the " + "same for all the levels)" + ) + + if not isinstance(rpn_anchor_generator, (AnchorGenerator, type(None))): + raise TypeError( + f"rpn_anchor_generator should be of type AnchorGenerator or None instead of {type(rpn_anchor_generator)}" + ) + if not isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None))): + raise TypeError( + f"box_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(box_roi_pool)}" + ) + + if num_classes is not None: + if box_predictor is not None: + raise ValueError("num_classes should be None when box_predictor is specified") + else: + if box_predictor is None: + raise ValueError("num_classes should not be None when box_predictor is not specified") + + out_channels = backbone.out_channels + + if rpn_anchor_generator is None: + rpn_anchor_generator = _default_anchorgen() + if rpn_head is None: + rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0]) + + rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test) + rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test) + + rpn = RegionProposalNetwork( + rpn_anchor_generator, + rpn_head, + rpn_fg_iou_thresh, + rpn_bg_iou_thresh, + rpn_batch_size_per_image, + rpn_positive_fraction, + rpn_pre_nms_top_n, + rpn_post_nms_top_n, + rpn_nms_thresh, + score_thresh=rpn_score_thresh, + ) + + if box_roi_pool is None: + box_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2) + + if box_head is None: + resolution = box_roi_pool.output_size[0] + representation_size = 1024 + box_head = TwoMLPHead(out_channels * resolution**2, representation_size) + + if box_predictor is None: + representation_size = 1024 + box_predictor = FastRCNNPredictor(representation_size, num_classes) + + roi_heads = RoIHeads( + # Box + box_roi_pool, + box_head, + box_predictor, + box_fg_iou_thresh, + box_bg_iou_thresh, + box_batch_size_per_image, + box_positive_fraction, + bbox_reg_weights, + box_score_thresh, + box_nms_thresh, + box_detections_per_img, + ) + + if image_mean is None: + image_mean = [0.485, 0.456, 0.406] + if image_std is None: + image_std = [0.229, 0.224, 0.225] + transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std, **kwargs) + + super().__init__(backbone, rpn, roi_heads, transform) + + +class TwoMLPHead(nn.Module): + """ + Standard heads for FPN-based models + + Args: + in_channels (int): number of input channels + representation_size (int): size of the intermediate representation + """ + + def __init__(self, in_channels, representation_size): + super().__init__() + + self.fc6 = nn.Linear(in_channels, representation_size) + self.fc7 = nn.Linear(representation_size, representation_size) + + def forward(self, x): + x = x.flatten(start_dim=1) + + x = F.relu(self.fc6(x)) + x = F.relu(self.fc7(x)) + + return x + + +class FastRCNNConvFCHead(nn.Sequential): + def __init__( + self, + input_size: Tuple[int, int, int], + conv_layers: List[int], + fc_layers: List[int], + norm_layer: Optional[Callable[..., nn.Module]] = None, + ): + """ + Args: + input_size (Tuple[int, int, int]): the input size in CHW format. + conv_layers (list): feature dimensions of each Convolution layer + fc_layers (list): feature dimensions of each FCN layer + norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None + """ + in_channels, in_height, in_width = input_size + + blocks = [] + previous_channels = in_channels + for current_channels in conv_layers: + blocks.append(misc_nn_ops.Conv2dNormActivation(previous_channels, current_channels, norm_layer=norm_layer)) + previous_channels = current_channels + blocks.append(nn.Flatten()) + previous_channels = previous_channels * in_height * in_width + for current_channels in fc_layers: + blocks.append(nn.Linear(previous_channels, current_channels)) + blocks.append(nn.ReLU(inplace=True)) + previous_channels = current_channels + + super().__init__(*blocks) + for layer in self.modules(): + if isinstance(layer, nn.Conv2d): + nn.init.kaiming_normal_(layer.weight, mode="fan_out", nonlinearity="relu") + if layer.bias is not None: + nn.init.zeros_(layer.bias) + + +class FastRCNNPredictor(nn.Module): + """ + Standard classification + bounding box regression layers + for Fast R-CNN. + + Args: + in_channels (int): number of input channels + num_classes (int): number of output classes (including background) + """ + + def __init__(self, in_channels, num_classes): + super().__init__() + self.cls_score = nn.Linear(in_channels, num_classes) + self.bbox_pred = nn.Linear(in_channels, num_classes * 4) + + def forward(self, x): + if x.dim() == 4: + torch._assert( + list(x.shape[2:]) == [1, 1], + f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}", + ) + x = x.flatten(start_dim=1) + scores = self.cls_score(x) + bbox_deltas = self.bbox_pred(x) + + return scores, bbox_deltas + + +_COMMON_META = { + "categories": _COCO_CATEGORIES, + "min_size": (1, 1), +} + + +class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 41755286, + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-resnet-50-fpn", + "_metrics": { + "COCO-val2017": { + "box_map": 37.0, + } + }, + "_ops": 134.38, + "_file_size": 159.743, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_v2_coco-dd69338a.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 43712278, + "recipe": "https://github.com/pytorch/vision/pull/5763", + "_metrics": { + "COCO-val2017": { + "box_map": 46.7, + } + }, + "_ops": 280.371, + "_file_size": 167.104, + "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", + }, + ) + DEFAULT = COCO_V1 + + +class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 19386354, + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-mobilenetv3-large-fpn", + "_metrics": { + "COCO-val2017": { + "box_map": 32.8, + } + }, + "_ops": 4.494, + "_file_size": 74.239, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 19386354, + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-mobilenetv3-large-320-fpn", + "_metrics": { + "COCO-val2017": { + "box_map": 22.8, + } + }, + "_ops": 0.719, + "_file_size": 74.239, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", FasterRCNN_ResNet50_FPN_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1), +) +def fasterrcnn_resnet50_fpn( + *, + weights: Optional[FasterRCNN_ResNet50_FPN_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> FasterRCNN: + """ + Faster R-CNN model with a ResNet-50-FPN backbone from the `Faster R-CNN: Towards Real-Time Object + Detection with Region Proposal Networks `__ + paper. + + .. betastatus:: detection module + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and a targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the predicted labels for each detection + - scores (``Tensor[N]``): the scores of each detection + + For more details on the output, you may refer to :ref:`instance_seg_output`. + + Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT) + >>> # For training + >>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4) + >>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4] + >>> labels = torch.randint(1, 91, (4, 11)) + >>> images = list(image for image in images) + >>> targets = [] + >>> for i in range(len(images)): + >>> d = {} + >>> d['boxes'] = boxes[i] + >>> d['labels'] = labels[i] + >>> targets.append(d) + >>> output = model(images, targets) + >>> # For inference + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11) + + Args: + weights (:class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The + pretrained weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from + final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are + trainable. If ``None`` is passed (the default) this value is set to 3. + **kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights + :members: + """ + weights = FasterRCNN_ResNet50_FPN_Weights.verify(weights) + weights_backbone = ResNet50_Weights.verify(weights_backbone) + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + is_trained = weights is not None or weights_backbone is not None + trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3) + norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d + + backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer) + backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers) + model = FasterRCNN(backbone, num_classes=num_classes, **kwargs) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + if weights == FasterRCNN_ResNet50_FPN_Weights.COCO_V1: + overwrite_eps(model, 0.0) + + return model + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", FasterRCNN_ResNet50_FPN_V2_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1), +) +def fasterrcnn_resnet50_fpn_v2( + *, + weights: Optional[FasterRCNN_ResNet50_FPN_V2_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[ResNet50_Weights] = None, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> FasterRCNN: + """ + Constructs an improved Faster R-CNN model with a ResNet-50-FPN backbone from `Benchmarking Detection + Transfer Learning with Vision Transformers `__ paper. + + .. betastatus:: detection module + + It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See + :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more + details. + + Args: + weights (:class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_V2_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_V2_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The + pretrained weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from + final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are + trainable. If ``None`` is passed (the default) this value is set to 3. + **kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.FasterRCNN_ResNet50_FPN_V2_Weights + :members: + """ + weights = FasterRCNN_ResNet50_FPN_V2_Weights.verify(weights) + weights_backbone = ResNet50_Weights.verify(weights_backbone) + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + is_trained = weights is not None or weights_backbone is not None + trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3) + + backbone = resnet50(weights=weights_backbone, progress=progress) + backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers, norm_layer=nn.BatchNorm2d) + rpn_anchor_generator = _default_anchorgen() + rpn_head = RPNHead(backbone.out_channels, rpn_anchor_generator.num_anchors_per_location()[0], conv_depth=2) + box_head = FastRCNNConvFCHead( + (backbone.out_channels, 7, 7), [256, 256, 256, 256], [1024], norm_layer=nn.BatchNorm2d + ) + model = FasterRCNN( + backbone, + num_classes=num_classes, + rpn_anchor_generator=rpn_anchor_generator, + rpn_head=rpn_head, + box_head=box_head, + **kwargs, + ) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model + + +def _fasterrcnn_mobilenet_v3_large_fpn( + *, + weights: Optional[Union[FasterRCNN_MobileNet_V3_Large_FPN_Weights, FasterRCNN_MobileNet_V3_Large_320_FPN_Weights]], + progress: bool, + num_classes: Optional[int], + weights_backbone: Optional[MobileNet_V3_Large_Weights], + trainable_backbone_layers: Optional[int], + **kwargs: Any, +) -> FasterRCNN: + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + is_trained = weights is not None or weights_backbone is not None + trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 6, 3) + norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d + + backbone = mobilenet_v3_large(weights=weights_backbone, progress=progress, norm_layer=norm_layer) + backbone = _mobilenet_extractor(backbone, True, trainable_backbone_layers) + anchor_sizes = ( + ( + 32, + 64, + 128, + 256, + 512, + ), + ) * 3 + aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) + model = FasterRCNN( + backbone, num_classes, rpn_anchor_generator=AnchorGenerator(anchor_sizes, aspect_ratios), **kwargs + ) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1), +) +def fasterrcnn_mobilenet_v3_large_320_fpn( + *, + weights: Optional[FasterRCNN_MobileNet_V3_Large_320_FPN_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> FasterRCNN: + """ + Low resolution Faster R-CNN model with a MobileNetV3-Large backbone tuned for mobile use cases. + + .. betastatus:: detection module + + It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See + :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more + details. + + Example:: + + >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(weights=FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + weights (:class:`~torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_320_FPN_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_320_FPN_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The + pretrained weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from + final block. Valid values are between 0 and 6, with 6 meaning all backbone layers are + trainable. If ``None`` is passed (the default) this value is set to 3. + **kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_320_FPN_Weights + :members: + """ + weights = FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.verify(weights) + weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone) + + defaults = { + "min_size": 320, + "max_size": 640, + "rpn_pre_nms_top_n_test": 150, + "rpn_post_nms_top_n_test": 150, + "rpn_score_thresh": 0.05, + } + + kwargs = {**defaults, **kwargs} + return _fasterrcnn_mobilenet_v3_large_fpn( + weights=weights, + progress=progress, + num_classes=num_classes, + weights_backbone=weights_backbone, + trainable_backbone_layers=trainable_backbone_layers, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", FasterRCNN_MobileNet_V3_Large_FPN_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1), +) +def fasterrcnn_mobilenet_v3_large_fpn( + *, + weights: Optional[FasterRCNN_MobileNet_V3_Large_FPN_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> FasterRCNN: + """ + Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone. + + .. betastatus:: detection module + + It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See + :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more + details. + + Example:: + + >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(weights=FasterRCNN_MobileNet_V3_Large_FPN_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + weights (:class:`~torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_FPN_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_FPN_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The + pretrained weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from + final block. Valid values are between 0 and 6, with 6 meaning all backbone layers are + trainable. If ``None`` is passed (the default) this value is set to 3. + **kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.FasterRCNN_MobileNet_V3_Large_FPN_Weights + :members: + """ + weights = FasterRCNN_MobileNet_V3_Large_FPN_Weights.verify(weights) + weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone) + + defaults = { + "rpn_score_thresh": 0.05, + } + + kwargs = {**defaults, **kwargs} + return _fasterrcnn_mobilenet_v3_large_fpn( + weights=weights, + progress=progress, + num_classes=num_classes, + weights_backbone=weights_backbone, + trainable_backbone_layers=trainable_backbone_layers, + **kwargs, + ) diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/generalized_rcnn.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/generalized_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..b481265077fb5a582402d81aeb3516ffca063653 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/generalized_rcnn.py @@ -0,0 +1,118 @@ +""" +Implements the Generalized R-CNN framework +""" + +import warnings +from collections import OrderedDict +from typing import Dict, List, Optional, Tuple, Union + +import torch +from torch import nn, Tensor + +from ...utils import _log_api_usage_once + + +class GeneralizedRCNN(nn.Module): + """ + Main class for Generalized R-CNN. + + Args: + backbone (nn.Module): + rpn (nn.Module): + roi_heads (nn.Module): takes the features + the proposals from the RPN and computes + detections / masks from it. + transform (nn.Module): performs the data transformation from the inputs to feed into + the model + """ + + def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None: + super().__init__() + _log_api_usage_once(self) + self.transform = transform + self.backbone = backbone + self.rpn = rpn + self.roi_heads = roi_heads + # used only on torchscript mode + self._has_warned = False + + @torch.jit.unused + def eager_outputs(self, losses, detections): + # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Union[Dict[str, Tensor], List[Dict[str, Tensor]]] + if self.training: + return losses + + return detections + + def forward(self, images, targets=None): + # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + """ + Args: + images (list[Tensor]): images to be processed + targets (list[Dict[str, Tensor]]): ground-truth boxes present in the image (optional) + + Returns: + result (list[BoxList] or dict[Tensor]): the output from the model. + During training, it returns a dict[Tensor] which contains the losses. + During testing, it returns list[BoxList] contains additional fields + like `scores`, `labels` and `mask` (for Mask R-CNN models). + + """ + if self.training: + if targets is None: + torch._assert(False, "targets should not be none when in training mode") + else: + for target in targets: + boxes = target["boxes"] + if isinstance(boxes, torch.Tensor): + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) + else: + torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + + original_image_sizes: List[Tuple[int, int]] = [] + for img in images: + val = img.shape[-2:] + torch._assert( + len(val) == 2, + f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}", + ) + original_image_sizes.append((val[0], val[1])) + + images, targets = self.transform(images, targets) + + # Check for degenerate boxes + # TODO: Move this to a function + if targets is not None: + for target_idx, target in enumerate(targets): + boxes = target["boxes"] + degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] + if degenerate_boxes.any(): + # print the first degenerate box + bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] + degen_bb: List[float] = boxes[bb_idx].tolist() + torch._assert( + False, + "All bounding boxes should have positive height and width." + f" Found invalid box {degen_bb} for target at index {target_idx}.", + ) + + features = self.backbone(images.tensors) + if isinstance(features, torch.Tensor): + features = OrderedDict([("0", features)]) + proposals, proposal_losses = self.rpn(images, features, targets) + detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets) + detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) # type: ignore[operator] + + losses = {} + losses.update(detector_losses) + losses.update(proposal_losses) + + if torch.jit.is_scripting(): + if not self._has_warned: + warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting") + self._has_warned = True + return losses, detections + else: + return self.eager_outputs(losses, detections) diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/image_list.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/image_list.py new file mode 100644 index 0000000000000000000000000000000000000000..583866557e4c9ec178e7cc268272db3de1698e41 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/image_list.py @@ -0,0 +1,25 @@ +from typing import List, Tuple + +import torch +from torch import Tensor + + +class ImageList: + """ + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size, + and storing in a field the original sizes of each image + + Args: + tensors (tensor): Tensor containing images. + image_sizes (list[tuple[int, int]]): List of Tuples each containing size of images. + """ + + def __init__(self, tensors: Tensor, image_sizes: List[Tuple[int, int]]) -> None: + self.tensors = tensors + self.image_sizes = image_sizes + + def to(self, device: torch.device) -> "ImageList": + cast_tensor = self.tensors.to(device) + return ImageList(cast_tensor, self.image_sizes) diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/retinanet.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/retinanet.py new file mode 100644 index 0000000000000000000000000000000000000000..3a14c983a6450a63c777bf8114bef06577506a2c --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/retinanet.py @@ -0,0 +1,899 @@ +import math +import warnings +from collections import OrderedDict +from functools import partial +from typing import Any, Callable, Dict, List, Optional, Tuple + +import torch +from torch import nn, Tensor + +from ...ops import boxes as box_ops, misc as misc_nn_ops, sigmoid_focal_loss +from ...ops.feature_pyramid_network import LastLevelP6P7 +from ...transforms._presets import ObjectDetection +from ...utils import _log_api_usage_once +from .._api import register_model, Weights, WeightsEnum +from .._meta import _COCO_CATEGORIES +from .._utils import _ovewrite_value_param, handle_legacy_interface +from ..resnet import resnet50, ResNet50_Weights +from . import _utils as det_utils +from ._utils import _box_loss, overwrite_eps +from .anchor_utils import AnchorGenerator +from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers +from .transform import GeneralizedRCNNTransform + + +__all__ = [ + "RetinaNet", + "RetinaNet_ResNet50_FPN_Weights", + "RetinaNet_ResNet50_FPN_V2_Weights", + "retinanet_resnet50_fpn", + "retinanet_resnet50_fpn_v2", +] + + +def _sum(x: List[Tensor]) -> Tensor: + res = x[0] + for i in x[1:]: + res = res + i + return res + + +def _v1_to_v2_weights(state_dict, prefix): + for i in range(4): + for type in ["weight", "bias"]: + old_key = f"{prefix}conv.{2*i}.{type}" + new_key = f"{prefix}conv.{i}.0.{type}" + if old_key in state_dict: + state_dict[new_key] = state_dict.pop(old_key) + + +def _default_anchorgen(): + anchor_sizes = tuple((x, int(x * 2 ** (1.0 / 3)), int(x * 2 ** (2.0 / 3))) for x in [32, 64, 128, 256, 512]) + aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) + anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios) + return anchor_generator + + +class RetinaNetHead(nn.Module): + """ + A regression and classification head for use in RetinaNet. + + Args: + in_channels (int): number of channels of the input feature + num_anchors (int): number of anchors to be predicted + num_classes (int): number of classes to be predicted + norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None + """ + + def __init__(self, in_channels, num_anchors, num_classes, norm_layer: Optional[Callable[..., nn.Module]] = None): + super().__init__() + self.classification_head = RetinaNetClassificationHead( + in_channels, num_anchors, num_classes, norm_layer=norm_layer + ) + self.regression_head = RetinaNetRegressionHead(in_channels, num_anchors, norm_layer=norm_layer) + + def compute_loss(self, targets, head_outputs, anchors, matched_idxs): + # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor], List[Tensor]) -> Dict[str, Tensor] + return { + "classification": self.classification_head.compute_loss(targets, head_outputs, matched_idxs), + "bbox_regression": self.regression_head.compute_loss(targets, head_outputs, anchors, matched_idxs), + } + + def forward(self, x): + # type: (List[Tensor]) -> Dict[str, Tensor] + return {"cls_logits": self.classification_head(x), "bbox_regression": self.regression_head(x)} + + +class RetinaNetClassificationHead(nn.Module): + """ + A classification head for use in RetinaNet. + + Args: + in_channels (int): number of channels of the input feature + num_anchors (int): number of anchors to be predicted + num_classes (int): number of classes to be predicted + norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None + """ + + _version = 2 + + def __init__( + self, + in_channels, + num_anchors, + num_classes, + prior_probability=0.01, + norm_layer: Optional[Callable[..., nn.Module]] = None, + ): + super().__init__() + + conv = [] + for _ in range(4): + conv.append(misc_nn_ops.Conv2dNormActivation(in_channels, in_channels, norm_layer=norm_layer)) + self.conv = nn.Sequential(*conv) + + for layer in self.conv.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, std=0.01) + if layer.bias is not None: + torch.nn.init.constant_(layer.bias, 0) + + self.cls_logits = nn.Conv2d(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1) + torch.nn.init.normal_(self.cls_logits.weight, std=0.01) + torch.nn.init.constant_(self.cls_logits.bias, -math.log((1 - prior_probability) / prior_probability)) + + self.num_classes = num_classes + self.num_anchors = num_anchors + + # This is to fix using det_utils.Matcher.BETWEEN_THRESHOLDS in TorchScript. + # TorchScript doesn't support class attributes. + # https://github.com/pytorch/vision/pull/1697#issuecomment-630255584 + self.BETWEEN_THRESHOLDS = det_utils.Matcher.BETWEEN_THRESHOLDS + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + _v1_to_v2_weights(state_dict, prefix) + + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + def compute_loss(self, targets, head_outputs, matched_idxs): + # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor]) -> Tensor + losses = [] + + cls_logits = head_outputs["cls_logits"] + + for targets_per_image, cls_logits_per_image, matched_idxs_per_image in zip(targets, cls_logits, matched_idxs): + # determine only the foreground + foreground_idxs_per_image = matched_idxs_per_image >= 0 + num_foreground = foreground_idxs_per_image.sum() + + # create the target classification + gt_classes_target = torch.zeros_like(cls_logits_per_image) + gt_classes_target[ + foreground_idxs_per_image, + targets_per_image["labels"][matched_idxs_per_image[foreground_idxs_per_image]], + ] = 1.0 + + # find indices for which anchors should be ignored + valid_idxs_per_image = matched_idxs_per_image != self.BETWEEN_THRESHOLDS + + # compute the classification loss + losses.append( + sigmoid_focal_loss( + cls_logits_per_image[valid_idxs_per_image], + gt_classes_target[valid_idxs_per_image], + reduction="sum", + ) + / max(1, num_foreground) + ) + + return _sum(losses) / len(targets) + + def forward(self, x): + # type: (List[Tensor]) -> Tensor + all_cls_logits = [] + + for features in x: + cls_logits = self.conv(features) + cls_logits = self.cls_logits(cls_logits) + + # Permute classification output from (N, A * K, H, W) to (N, HWA, K). + N, _, H, W = cls_logits.shape + cls_logits = cls_logits.view(N, -1, self.num_classes, H, W) + cls_logits = cls_logits.permute(0, 3, 4, 1, 2) + cls_logits = cls_logits.reshape(N, -1, self.num_classes) # Size=(N, HWA, 4) + + all_cls_logits.append(cls_logits) + + return torch.cat(all_cls_logits, dim=1) + + +class RetinaNetRegressionHead(nn.Module): + """ + A regression head for use in RetinaNet. + + Args: + in_channels (int): number of channels of the input feature + num_anchors (int): number of anchors to be predicted + norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None + """ + + _version = 2 + + __annotations__ = { + "box_coder": det_utils.BoxCoder, + } + + def __init__(self, in_channels, num_anchors, norm_layer: Optional[Callable[..., nn.Module]] = None): + super().__init__() + + conv = [] + for _ in range(4): + conv.append(misc_nn_ops.Conv2dNormActivation(in_channels, in_channels, norm_layer=norm_layer)) + self.conv = nn.Sequential(*conv) + + self.bbox_reg = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1) + torch.nn.init.normal_(self.bbox_reg.weight, std=0.01) + torch.nn.init.zeros_(self.bbox_reg.bias) + + for layer in self.conv.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, std=0.01) + if layer.bias is not None: + torch.nn.init.zeros_(layer.bias) + + self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) + self._loss_type = "l1" + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + _v1_to_v2_weights(state_dict, prefix) + + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + def compute_loss(self, targets, head_outputs, anchors, matched_idxs): + # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor], List[Tensor]) -> Tensor + losses = [] + + bbox_regression = head_outputs["bbox_regression"] + + for targets_per_image, bbox_regression_per_image, anchors_per_image, matched_idxs_per_image in zip( + targets, bbox_regression, anchors, matched_idxs + ): + # determine only the foreground indices, ignore the rest + foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0] + num_foreground = foreground_idxs_per_image.numel() + + # select only the foreground boxes + matched_gt_boxes_per_image = targets_per_image["boxes"][matched_idxs_per_image[foreground_idxs_per_image]] + bbox_regression_per_image = bbox_regression_per_image[foreground_idxs_per_image, :] + anchors_per_image = anchors_per_image[foreground_idxs_per_image, :] + + # compute the loss + losses.append( + _box_loss( + self._loss_type, + self.box_coder, + anchors_per_image, + matched_gt_boxes_per_image, + bbox_regression_per_image, + ) + / max(1, num_foreground) + ) + + return _sum(losses) / max(1, len(targets)) + + def forward(self, x): + # type: (List[Tensor]) -> Tensor + all_bbox_regression = [] + + for features in x: + bbox_regression = self.conv(features) + bbox_regression = self.bbox_reg(bbox_regression) + + # Permute bbox regression output from (N, 4 * A, H, W) to (N, HWA, 4). + N, _, H, W = bbox_regression.shape + bbox_regression = bbox_regression.view(N, -1, 4, H, W) + bbox_regression = bbox_regression.permute(0, 3, 4, 1, 2) + bbox_regression = bbox_regression.reshape(N, -1, 4) # Size=(N, HWA, 4) + + all_bbox_regression.append(bbox_regression) + + return torch.cat(all_bbox_regression, dim=1) + + +class RetinaNet(nn.Module): + """ + Implements RetinaNet. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each image + - scores (Tensor[N]): the scores for each prediction + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain an out_channels attribute, which indicates the number of output + channels that each feature map has (and it should be the same for all feature maps). + The backbone should return a single Tensor or an OrderedDict[Tensor]. + num_classes (int): number of output classes of the model (including the background). + min_size (int): minimum size of the image to be rescaled before feeding it to the backbone + max_size (int): maximum size of the image to be rescaled before feeding it to the backbone + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + head (nn.Module): Module run on top of the feature pyramid. + Defaults to a module containing a classification and regression module. + score_thresh (float): Score threshold used for postprocessing the detections. + nms_thresh (float): NMS threshold used for postprocessing the detections. + detections_per_img (int): Number of best detections to keep after NMS. + fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training. + bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training. + topk_candidates (int): Number of best detections to keep before NMS. + + Example: + + >>> import torch + >>> import torchvision + >>> from torchvision.models.detection import RetinaNet + >>> from torchvision.models.detection.anchor_utils import AnchorGenerator + >>> # load a pre-trained model for classification and return + >>> # only the features + >>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features + >>> # RetinaNet needs to know the number of + >>> # output channels in a backbone. For mobilenet_v2, it's 1280, + >>> # so we need to add it here + >>> backbone.out_channels = 1280 + >>> + >>> # let's make the network generate 5 x 3 anchors per spatial + >>> # location, with 5 different sizes and 3 different aspect + >>> # ratios. We have a Tuple[Tuple[int]] because each feature + >>> # map could potentially have different sizes and + >>> # aspect ratios + >>> anchor_generator = AnchorGenerator( + >>> sizes=((32, 64, 128, 256, 512),), + >>> aspect_ratios=((0.5, 1.0, 2.0),) + >>> ) + >>> + >>> # put the pieces together inside a RetinaNet model + >>> model = RetinaNet(backbone, + >>> num_classes=2, + >>> anchor_generator=anchor_generator) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + """ + + __annotations__ = { + "box_coder": det_utils.BoxCoder, + "proposal_matcher": det_utils.Matcher, + } + + def __init__( + self, + backbone, + num_classes, + # transform parameters + min_size=800, + max_size=1333, + image_mean=None, + image_std=None, + # Anchor parameters + anchor_generator=None, + head=None, + proposal_matcher=None, + score_thresh=0.05, + nms_thresh=0.5, + detections_per_img=300, + fg_iou_thresh=0.5, + bg_iou_thresh=0.4, + topk_candidates=1000, + **kwargs, + ): + super().__init__() + _log_api_usage_once(self) + + if not hasattr(backbone, "out_channels"): + raise ValueError( + "backbone should contain an attribute out_channels " + "specifying the number of output channels (assumed to be the " + "same for all the levels)" + ) + self.backbone = backbone + + if not isinstance(anchor_generator, (AnchorGenerator, type(None))): + raise TypeError( + f"anchor_generator should be of type AnchorGenerator or None instead of {type(anchor_generator)}" + ) + + if anchor_generator is None: + anchor_generator = _default_anchorgen() + self.anchor_generator = anchor_generator + + if head is None: + head = RetinaNetHead(backbone.out_channels, anchor_generator.num_anchors_per_location()[0], num_classes) + self.head = head + + if proposal_matcher is None: + proposal_matcher = det_utils.Matcher( + fg_iou_thresh, + bg_iou_thresh, + allow_low_quality_matches=True, + ) + self.proposal_matcher = proposal_matcher + + self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) + + if image_mean is None: + image_mean = [0.485, 0.456, 0.406] + if image_std is None: + image_std = [0.229, 0.224, 0.225] + self.transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std, **kwargs) + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.detections_per_img = detections_per_img + self.topk_candidates = topk_candidates + + # used only on torchscript mode + self._has_warned = False + + @torch.jit.unused + def eager_outputs(self, losses, detections): + # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + if self.training: + return losses + + return detections + + def compute_loss(self, targets, head_outputs, anchors): + # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor]) -> Dict[str, Tensor] + matched_idxs = [] + for anchors_per_image, targets_per_image in zip(anchors, targets): + if targets_per_image["boxes"].numel() == 0: + matched_idxs.append( + torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device) + ) + continue + + match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image) + matched_idxs.append(self.proposal_matcher(match_quality_matrix)) + + return self.head.compute_loss(targets, head_outputs, anchors, matched_idxs) + + def postprocess_detections(self, head_outputs, anchors, image_shapes): + # type: (Dict[str, List[Tensor]], List[List[Tensor]], List[Tuple[int, int]]) -> List[Dict[str, Tensor]] + class_logits = head_outputs["cls_logits"] + box_regression = head_outputs["bbox_regression"] + + num_images = len(image_shapes) + + detections: List[Dict[str, Tensor]] = [] + + for index in range(num_images): + box_regression_per_image = [br[index] for br in box_regression] + logits_per_image = [cl[index] for cl in class_logits] + anchors_per_image, image_shape = anchors[index], image_shapes[index] + + image_boxes = [] + image_scores = [] + image_labels = [] + + for box_regression_per_level, logits_per_level, anchors_per_level in zip( + box_regression_per_image, logits_per_image, anchors_per_image + ): + num_classes = logits_per_level.shape[-1] + + # remove low scoring boxes + scores_per_level = torch.sigmoid(logits_per_level).flatten() + keep_idxs = scores_per_level > self.score_thresh + scores_per_level = scores_per_level[keep_idxs] + topk_idxs = torch.where(keep_idxs)[0] + + # keep only topk scoring predictions + num_topk = det_utils._topk_min(topk_idxs, self.topk_candidates, 0) + scores_per_level, idxs = scores_per_level.topk(num_topk) + topk_idxs = topk_idxs[idxs] + + anchor_idxs = torch.div(topk_idxs, num_classes, rounding_mode="floor") + labels_per_level = topk_idxs % num_classes + + boxes_per_level = self.box_coder.decode_single( + box_regression_per_level[anchor_idxs], anchors_per_level[anchor_idxs] + ) + boxes_per_level = box_ops.clip_boxes_to_image(boxes_per_level, image_shape) + + image_boxes.append(boxes_per_level) + image_scores.append(scores_per_level) + image_labels.append(labels_per_level) + + image_boxes = torch.cat(image_boxes, dim=0) + image_scores = torch.cat(image_scores, dim=0) + image_labels = torch.cat(image_labels, dim=0) + + # non-maximum suppression + keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh) + keep = keep[: self.detections_per_img] + + detections.append( + { + "boxes": image_boxes[keep], + "scores": image_scores[keep], + "labels": image_labels[keep], + } + ) + + return detections + + def forward(self, images, targets=None): + # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + """ + Args: + images (list[Tensor]): images to be processed + targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional) + + Returns: + result (list[BoxList] or dict[Tensor]): the output from the model. + During training, it returns a dict[Tensor] which contains the losses. + During testing, it returns list[BoxList] contains additional fields + like `scores`, `labels` and `mask` (for Mask R-CNN models). + + """ + if self.training: + if targets is None: + torch._assert(False, "targets should not be none when in training mode") + else: + for target in targets: + boxes = target["boxes"] + torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.") + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + "Expected target boxes to be a tensor of shape [N, 4].", + ) + + # get the original image sizes + original_image_sizes: List[Tuple[int, int]] = [] + for img in images: + val = img.shape[-2:] + torch._assert( + len(val) == 2, + f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}", + ) + original_image_sizes.append((val[0], val[1])) + + # transform the input + images, targets = self.transform(images, targets) + + # Check for degenerate boxes + # TODO: Move this to a function + if targets is not None: + for target_idx, target in enumerate(targets): + boxes = target["boxes"] + degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] + if degenerate_boxes.any(): + # print the first degenerate box + bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] + degen_bb: List[float] = boxes[bb_idx].tolist() + torch._assert( + False, + "All bounding boxes should have positive height and width." + f" Found invalid box {degen_bb} for target at index {target_idx}.", + ) + + # get the features from the backbone + features = self.backbone(images.tensors) + if isinstance(features, torch.Tensor): + features = OrderedDict([("0", features)]) + + # TODO: Do we want a list or a dict? + features = list(features.values()) + + # compute the retinanet heads outputs using the features + head_outputs = self.head(features) + + # create the set of anchors + anchors = self.anchor_generator(images, features) + + losses = {} + detections: List[Dict[str, Tensor]] = [] + if self.training: + if targets is None: + torch._assert(False, "targets should not be none when in training mode") + else: + # compute the losses + losses = self.compute_loss(targets, head_outputs, anchors) + else: + # recover level sizes + num_anchors_per_level = [x.size(2) * x.size(3) for x in features] + HW = 0 + for v in num_anchors_per_level: + HW += v + HWA = head_outputs["cls_logits"].size(1) + A = HWA // HW + num_anchors_per_level = [hw * A for hw in num_anchors_per_level] + + # split outputs per level + split_head_outputs: Dict[str, List[Tensor]] = {} + for k in head_outputs: + split_head_outputs[k] = list(head_outputs[k].split(num_anchors_per_level, dim=1)) + split_anchors = [list(a.split(num_anchors_per_level)) for a in anchors] + + # compute the detections + detections = self.postprocess_detections(split_head_outputs, split_anchors, images.image_sizes) + detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) + + if torch.jit.is_scripting(): + if not self._has_warned: + warnings.warn("RetinaNet always returns a (Losses, Detections) tuple in scripting") + self._has_warned = True + return losses, detections + return self.eager_outputs(losses, detections) + + +_COMMON_META = { + "categories": _COCO_CATEGORIES, + "min_size": (1, 1), +} + + +class RetinaNet_ResNet50_FPN_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/retinanet_resnet50_fpn_coco-eeacb38b.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 34014999, + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#retinanet", + "_metrics": { + "COCO-val2017": { + "box_map": 36.4, + } + }, + "_ops": 151.54, + "_file_size": 130.267, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/retinanet_resnet50_fpn_v2_coco-5905b1c5.pth", + transforms=ObjectDetection, + meta={ + **_COMMON_META, + "num_params": 38198935, + "recipe": "https://github.com/pytorch/vision/pull/5756", + "_metrics": { + "COCO-val2017": { + "box_map": 41.5, + } + }, + "_ops": 152.238, + "_file_size": 146.037, + "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", + }, + ) + DEFAULT = COCO_V1 + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", RetinaNet_ResNet50_FPN_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1), +) +def retinanet_resnet50_fpn( + *, + weights: Optional[RetinaNet_ResNet50_FPN_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> RetinaNet: + """ + Constructs a RetinaNet model with a ResNet-50-FPN backbone. + + .. betastatus:: detection module + + Reference: `Focal Loss for Dense Object Detection `_. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the predicted labels for each detection + - scores (``Tensor[N]``): the scores of each detection + + For more details on the output, you may refer to :ref:`instance_seg_output`. + + Example:: + + >>> model = torchvision.models.detection.retinanet_resnet50_fpn(weights=RetinaNet_ResNet50_FPN_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + weights (:class:`~torchvision.models.detection.RetinaNet_ResNet50_FPN_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.RetinaNet_ResNet50_FPN_Weights` + below for more details, and possible values. By default, no + pre-trained weights are used. + progress (bool): If True, displays a progress bar of the download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained weights for + the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is + passed (the default) this value is set to 3. + **kwargs: parameters passed to the ``torchvision.models.detection.RetinaNet`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.RetinaNet_ResNet50_FPN_Weights + :members: + """ + weights = RetinaNet_ResNet50_FPN_Weights.verify(weights) + weights_backbone = ResNet50_Weights.verify(weights_backbone) + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + is_trained = weights is not None or weights_backbone is not None + trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3) + norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d + + backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer) + # skip P2 because it generates too many anchors (according to their paper) + backbone = _resnet_fpn_extractor( + backbone, trainable_backbone_layers, returned_layers=[2, 3, 4], extra_blocks=LastLevelP6P7(256, 256) + ) + model = RetinaNet(backbone, num_classes, **kwargs) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + if weights == RetinaNet_ResNet50_FPN_Weights.COCO_V1: + overwrite_eps(model, 0.0) + + return model + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", RetinaNet_ResNet50_FPN_V2_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1), +) +def retinanet_resnet50_fpn_v2( + *, + weights: Optional[RetinaNet_ResNet50_FPN_V2_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[ResNet50_Weights] = None, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> RetinaNet: + """ + Constructs an improved RetinaNet model with a ResNet-50-FPN backbone. + + .. betastatus:: detection module + + Reference: `Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection + `_. + + :func:`~torchvision.models.detection.retinanet_resnet50_fpn` for more details. + + Args: + weights (:class:`~torchvision.models.detection.RetinaNet_ResNet50_FPN_V2_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.RetinaNet_ResNet50_FPN_V2_Weights` + below for more details, and possible values. By default, no + pre-trained weights are used. + progress (bool): If True, displays a progress bar of the download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained weights for + the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is + passed (the default) this value is set to 3. + **kwargs: parameters passed to the ``torchvision.models.detection.RetinaNet`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.RetinaNet_ResNet50_FPN_V2_Weights + :members: + """ + weights = RetinaNet_ResNet50_FPN_V2_Weights.verify(weights) + weights_backbone = ResNet50_Weights.verify(weights_backbone) + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + is_trained = weights is not None or weights_backbone is not None + trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3) + + backbone = resnet50(weights=weights_backbone, progress=progress) + backbone = _resnet_fpn_extractor( + backbone, trainable_backbone_layers, returned_layers=[2, 3, 4], extra_blocks=LastLevelP6P7(2048, 256) + ) + anchor_generator = _default_anchorgen() + head = RetinaNetHead( + backbone.out_channels, + anchor_generator.num_anchors_per_location()[0], + num_classes, + norm_layer=partial(nn.GroupNorm, 32), + ) + head.regression_head._loss_type = "giou" + model = RetinaNet(backbone, num_classes, anchor_generator=anchor_generator, head=head, **kwargs) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/roi_heads.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/roi_heads.py new file mode 100644 index 0000000000000000000000000000000000000000..51b210cb6f368c1f4914ffe99287efef6057cba4 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/roi_heads.py @@ -0,0 +1,876 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.nn.functional as F +import torchvision +from torch import nn, Tensor +from torchvision.ops import boxes as box_ops, roi_align + +from . import _utils as det_utils + + +def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): + # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] + """ + Computes the loss for Faster R-CNN. + + Args: + class_logits (Tensor) + box_regression (Tensor) + labels (list[BoxList]) + regression_targets (Tensor) + + Returns: + classification_loss (Tensor) + box_loss (Tensor) + """ + + labels = torch.cat(labels, dim=0) + regression_targets = torch.cat(regression_targets, dim=0) + + classification_loss = F.cross_entropy(class_logits, labels) + + # get indices that correspond to the regression targets for + # the corresponding ground truth labels, to be used with + # advanced indexing + sampled_pos_inds_subset = torch.where(labels > 0)[0] + labels_pos = labels[sampled_pos_inds_subset] + N, num_classes = class_logits.shape + box_regression = box_regression.reshape(N, box_regression.size(-1) // 4, 4) + + box_loss = F.smooth_l1_loss( + box_regression[sampled_pos_inds_subset, labels_pos], + regression_targets[sampled_pos_inds_subset], + beta=1 / 9, + reduction="sum", + ) + box_loss = box_loss / labels.numel() + + return classification_loss, box_loss + + +def maskrcnn_inference(x, labels): + # type: (Tensor, List[Tensor]) -> List[Tensor] + """ + From the results of the CNN, post process the masks + by taking the mask corresponding to the class with max + probability (which are of fixed size and directly output + by the CNN) and return the masks in the mask field of the BoxList. + + Args: + x (Tensor): the mask logits + labels (list[BoxList]): bounding boxes that are used as + reference, one for ech image + + Returns: + results (list[BoxList]): one BoxList for each image, containing + the extra field mask + """ + mask_prob = x.sigmoid() + + # select masks corresponding to the predicted classes + num_masks = x.shape[0] + boxes_per_image = [label.shape[0] for label in labels] + labels = torch.cat(labels) + index = torch.arange(num_masks, device=labels.device) + mask_prob = mask_prob[index, labels][:, None] + mask_prob = mask_prob.split(boxes_per_image, dim=0) + + return mask_prob + + +def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M): + # type: (Tensor, Tensor, Tensor, int) -> Tensor + """ + Given segmentation masks and the bounding boxes corresponding + to the location of the masks in the image, this function + crops and resizes the masks in the position defined by the + boxes. This prepares the masks for them to be fed to the + loss computation as the targets. + """ + matched_idxs = matched_idxs.to(boxes) + rois = torch.cat([matched_idxs[:, None], boxes], dim=1) + gt_masks = gt_masks[:, None].to(rois) + return roi_align(gt_masks, rois, (M, M), 1.0)[:, 0] + + +def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs): + # type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor + """ + Args: + proposals (list[BoxList]) + mask_logits (Tensor) + targets (list[BoxList]) + + Return: + mask_loss (Tensor): scalar tensor containing the loss + """ + + discretization_size = mask_logits.shape[-1] + labels = [gt_label[idxs] for gt_label, idxs in zip(gt_labels, mask_matched_idxs)] + mask_targets = [ + project_masks_on_boxes(m, p, i, discretization_size) for m, p, i in zip(gt_masks, proposals, mask_matched_idxs) + ] + + labels = torch.cat(labels, dim=0) + mask_targets = torch.cat(mask_targets, dim=0) + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it separately + if mask_targets.numel() == 0: + return mask_logits.sum() * 0 + + mask_loss = F.binary_cross_entropy_with_logits( + mask_logits[torch.arange(labels.shape[0], device=labels.device), labels], mask_targets + ) + return mask_loss + + +def keypoints_to_heatmap(keypoints, rois, heatmap_size): + # type: (Tensor, Tensor, int) -> Tuple[Tensor, Tensor] + offset_x = rois[:, 0] + offset_y = rois[:, 1] + scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) + scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) + + offset_x = offset_x[:, None] + offset_y = offset_y[:, None] + scale_x = scale_x[:, None] + scale_y = scale_y[:, None] + + x = keypoints[..., 0] + y = keypoints[..., 1] + + x_boundary_inds = x == rois[:, 2][:, None] + y_boundary_inds = y == rois[:, 3][:, None] + + x = (x - offset_x) * scale_x + x = x.floor().long() + y = (y - offset_y) * scale_y + y = y.floor().long() + + x[x_boundary_inds] = heatmap_size - 1 + y[y_boundary_inds] = heatmap_size - 1 + + valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) + vis = keypoints[..., 2] > 0 + valid = (valid_loc & vis).long() + + lin_ind = y * heatmap_size + x + heatmaps = lin_ind * valid + + return heatmaps, valid + + +def _onnx_heatmaps_to_keypoints( + maps, maps_i, roi_map_width, roi_map_height, widths_i, heights_i, offset_x_i, offset_y_i +): + num_keypoints = torch.scalar_tensor(maps.size(1), dtype=torch.int64) + + width_correction = widths_i / roi_map_width + height_correction = heights_i / roi_map_height + + roi_map = F.interpolate( + maps_i[:, None], size=(int(roi_map_height), int(roi_map_width)), mode="bicubic", align_corners=False + )[:, 0] + + w = torch.scalar_tensor(roi_map.size(2), dtype=torch.int64) + pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1) + + x_int = pos % w + y_int = (pos - x_int) // w + + x = (torch.tensor(0.5, dtype=torch.float32) + x_int.to(dtype=torch.float32)) * width_correction.to( + dtype=torch.float32 + ) + y = (torch.tensor(0.5, dtype=torch.float32) + y_int.to(dtype=torch.float32)) * height_correction.to( + dtype=torch.float32 + ) + + xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32) + xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32) + xy_preds_i_2 = torch.ones(xy_preds_i_1.shape, dtype=torch.float32) + xy_preds_i = torch.stack( + [ + xy_preds_i_0.to(dtype=torch.float32), + xy_preds_i_1.to(dtype=torch.float32), + xy_preds_i_2.to(dtype=torch.float32), + ], + 0, + ) + + # TODO: simplify when indexing without rank will be supported by ONNX + base = num_keypoints * num_keypoints + num_keypoints + 1 + ind = torch.arange(num_keypoints) + ind = ind.to(dtype=torch.int64) * base + end_scores_i = ( + roi_map.index_select(1, y_int.to(dtype=torch.int64)) + .index_select(2, x_int.to(dtype=torch.int64)) + .view(-1) + .index_select(0, ind.to(dtype=torch.int64)) + ) + + return xy_preds_i, end_scores_i + + +@torch.jit._script_if_tracing +def _onnx_heatmaps_to_keypoints_loop( + maps, rois, widths_ceil, heights_ceil, widths, heights, offset_x, offset_y, num_keypoints +): + xy_preds = torch.zeros((0, 3, int(num_keypoints)), dtype=torch.float32, device=maps.device) + end_scores = torch.zeros((0, int(num_keypoints)), dtype=torch.float32, device=maps.device) + + for i in range(int(rois.size(0))): + xy_preds_i, end_scores_i = _onnx_heatmaps_to_keypoints( + maps, maps[i], widths_ceil[i], heights_ceil[i], widths[i], heights[i], offset_x[i], offset_y[i] + ) + xy_preds = torch.cat((xy_preds.to(dtype=torch.float32), xy_preds_i.unsqueeze(0).to(dtype=torch.float32)), 0) + end_scores = torch.cat( + (end_scores.to(dtype=torch.float32), end_scores_i.to(dtype=torch.float32).unsqueeze(0)), 0 + ) + return xy_preds, end_scores + + +def heatmaps_to_keypoints(maps, rois): + """Extract predicted keypoint locations from heatmaps. Output has shape + (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob) + for each keypoint. + """ + # This function converts a discrete image coordinate in a HEATMAP_SIZE x + # HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain + # consistency with keypoints_to_heatmap_labels by using the conversion from + # Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a + # continuous coordinate. + offset_x = rois[:, 0] + offset_y = rois[:, 1] + + widths = rois[:, 2] - rois[:, 0] + heights = rois[:, 3] - rois[:, 1] + widths = widths.clamp(min=1) + heights = heights.clamp(min=1) + widths_ceil = widths.ceil() + heights_ceil = heights.ceil() + + num_keypoints = maps.shape[1] + + if torchvision._is_tracing(): + xy_preds, end_scores = _onnx_heatmaps_to_keypoints_loop( + maps, + rois, + widths_ceil, + heights_ceil, + widths, + heights, + offset_x, + offset_y, + torch.scalar_tensor(num_keypoints, dtype=torch.int64), + ) + return xy_preds.permute(0, 2, 1), end_scores + + xy_preds = torch.zeros((len(rois), 3, num_keypoints), dtype=torch.float32, device=maps.device) + end_scores = torch.zeros((len(rois), num_keypoints), dtype=torch.float32, device=maps.device) + for i in range(len(rois)): + roi_map_width = int(widths_ceil[i].item()) + roi_map_height = int(heights_ceil[i].item()) + width_correction = widths[i] / roi_map_width + height_correction = heights[i] / roi_map_height + roi_map = F.interpolate( + maps[i][:, None], size=(roi_map_height, roi_map_width), mode="bicubic", align_corners=False + )[:, 0] + # roi_map_probs = scores_to_probs(roi_map.copy()) + w = roi_map.shape[2] + pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1) + + x_int = pos % w + y_int = torch.div(pos - x_int, w, rounding_mode="floor") + # assert (roi_map_probs[k, y_int, x_int] == + # roi_map_probs[k, :, :].max()) + x = (x_int.float() + 0.5) * width_correction + y = (y_int.float() + 0.5) * height_correction + xy_preds[i, 0, :] = x + offset_x[i] + xy_preds[i, 1, :] = y + offset_y[i] + xy_preds[i, 2, :] = 1 + end_scores[i, :] = roi_map[torch.arange(num_keypoints, device=roi_map.device), y_int, x_int] + + return xy_preds.permute(0, 2, 1), end_scores + + +def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched_idxs): + # type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) -> Tensor + N, K, H, W = keypoint_logits.shape + if H != W: + raise ValueError( + f"keypoint_logits height and width (last two elements of shape) should be equal. Instead got H = {H} and W = {W}" + ) + discretization_size = H + heatmaps = [] + valid = [] + for proposals_per_image, gt_kp_in_image, midx in zip(proposals, gt_keypoints, keypoint_matched_idxs): + kp = gt_kp_in_image[midx] + heatmaps_per_image, valid_per_image = keypoints_to_heatmap(kp, proposals_per_image, discretization_size) + heatmaps.append(heatmaps_per_image.view(-1)) + valid.append(valid_per_image.view(-1)) + + keypoint_targets = torch.cat(heatmaps, dim=0) + valid = torch.cat(valid, dim=0).to(dtype=torch.uint8) + valid = torch.where(valid)[0] + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it sepaartely + if keypoint_targets.numel() == 0 or len(valid) == 0: + return keypoint_logits.sum() * 0 + + keypoint_logits = keypoint_logits.view(N * K, H * W) + + keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid]) + return keypoint_loss + + +def keypointrcnn_inference(x, boxes): + # type: (Tensor, List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + kp_probs = [] + kp_scores = [] + + boxes_per_image = [box.size(0) for box in boxes] + x2 = x.split(boxes_per_image, dim=0) + + for xx, bb in zip(x2, boxes): + kp_prob, scores = heatmaps_to_keypoints(xx, bb) + kp_probs.append(kp_prob) + kp_scores.append(scores) + + return kp_probs, kp_scores + + +def _onnx_expand_boxes(boxes, scale): + # type: (Tensor, float) -> Tensor + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half = w_half.to(dtype=torch.float32) * scale + h_half = h_half.to(dtype=torch.float32) * scale + + boxes_exp0 = x_c - w_half + boxes_exp1 = y_c - h_half + boxes_exp2 = x_c + w_half + boxes_exp3 = y_c + h_half + boxes_exp = torch.stack((boxes_exp0, boxes_exp1, boxes_exp2, boxes_exp3), 1) + return boxes_exp + + +# the next two functions should be merged inside Masker +# but are kept here for the moment while we need them +# temporarily for paste_mask_in_image +def expand_boxes(boxes, scale): + # type: (Tensor, float) -> Tensor + if torchvision._is_tracing(): + return _onnx_expand_boxes(boxes, scale) + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half *= scale + h_half *= scale + + boxes_exp = torch.zeros_like(boxes) + boxes_exp[:, 0] = x_c - w_half + boxes_exp[:, 2] = x_c + w_half + boxes_exp[:, 1] = y_c - h_half + boxes_exp[:, 3] = y_c + h_half + return boxes_exp + + +@torch.jit.unused +def expand_masks_tracing_scale(M, padding): + # type: (int, int) -> float + return torch.tensor(M + 2 * padding).to(torch.float32) / torch.tensor(M).to(torch.float32) + + +def expand_masks(mask, padding): + # type: (Tensor, int) -> Tuple[Tensor, float] + M = mask.shape[-1] + if torch._C._get_tracing_state(): # could not import is_tracing(), not sure why + scale = expand_masks_tracing_scale(M, padding) + else: + scale = float(M + 2 * padding) / M + padded_mask = F.pad(mask, (padding,) * 4) + return padded_mask, scale + + +def paste_mask_in_image(mask, box, im_h, im_w): + # type: (Tensor, Tensor, int, int) -> Tensor + TO_REMOVE = 1 + w = int(box[2] - box[0] + TO_REMOVE) + h = int(box[3] - box[1] + TO_REMOVE) + w = max(w, 1) + h = max(h, 1) + + # Set shape to [batchxCxHxW] + mask = mask.expand((1, 1, -1, -1)) + + # Resize mask + mask = F.interpolate(mask, size=(h, w), mode="bilinear", align_corners=False) + mask = mask[0][0] + + im_mask = torch.zeros((im_h, im_w), dtype=mask.dtype, device=mask.device) + x_0 = max(box[0], 0) + x_1 = min(box[2] + 1, im_w) + y_0 = max(box[1], 0) + y_1 = min(box[3] + 1, im_h) + + im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])] + return im_mask + + +def _onnx_paste_mask_in_image(mask, box, im_h, im_w): + one = torch.ones(1, dtype=torch.int64) + zero = torch.zeros(1, dtype=torch.int64) + + w = box[2] - box[0] + one + h = box[3] - box[1] + one + w = torch.max(torch.cat((w, one))) + h = torch.max(torch.cat((h, one))) + + # Set shape to [batchxCxHxW] + mask = mask.expand((1, 1, mask.size(0), mask.size(1))) + + # Resize mask + mask = F.interpolate(mask, size=(int(h), int(w)), mode="bilinear", align_corners=False) + mask = mask[0][0] + + x_0 = torch.max(torch.cat((box[0].unsqueeze(0), zero))) + x_1 = torch.min(torch.cat((box[2].unsqueeze(0) + one, im_w.unsqueeze(0)))) + y_0 = torch.max(torch.cat((box[1].unsqueeze(0), zero))) + y_1 = torch.min(torch.cat((box[3].unsqueeze(0) + one, im_h.unsqueeze(0)))) + + unpaded_im_mask = mask[(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])] + + # TODO : replace below with a dynamic padding when support is added in ONNX + + # pad y + zeros_y0 = torch.zeros(y_0, unpaded_im_mask.size(1)) + zeros_y1 = torch.zeros(im_h - y_1, unpaded_im_mask.size(1)) + concat_0 = torch.cat((zeros_y0, unpaded_im_mask.to(dtype=torch.float32), zeros_y1), 0)[0:im_h, :] + # pad x + zeros_x0 = torch.zeros(concat_0.size(0), x_0) + zeros_x1 = torch.zeros(concat_0.size(0), im_w - x_1) + im_mask = torch.cat((zeros_x0, concat_0, zeros_x1), 1)[:, :im_w] + return im_mask + + +@torch.jit._script_if_tracing +def _onnx_paste_masks_in_image_loop(masks, boxes, im_h, im_w): + res_append = torch.zeros(0, im_h, im_w) + for i in range(masks.size(0)): + mask_res = _onnx_paste_mask_in_image(masks[i][0], boxes[i], im_h, im_w) + mask_res = mask_res.unsqueeze(0) + res_append = torch.cat((res_append, mask_res)) + return res_append + + +def paste_masks_in_image(masks, boxes, img_shape, padding=1): + # type: (Tensor, Tensor, Tuple[int, int], int) -> Tensor + masks, scale = expand_masks(masks, padding=padding) + boxes = expand_boxes(boxes, scale).to(dtype=torch.int64) + im_h, im_w = img_shape + + if torchvision._is_tracing(): + return _onnx_paste_masks_in_image_loop( + masks, boxes, torch.scalar_tensor(im_h, dtype=torch.int64), torch.scalar_tensor(im_w, dtype=torch.int64) + )[:, None] + res = [paste_mask_in_image(m[0], b, im_h, im_w) for m, b in zip(masks, boxes)] + if len(res) > 0: + ret = torch.stack(res, dim=0)[:, None] + else: + ret = masks.new_empty((0, 1, im_h, im_w)) + return ret + + +class RoIHeads(nn.Module): + __annotations__ = { + "box_coder": det_utils.BoxCoder, + "proposal_matcher": det_utils.Matcher, + "fg_bg_sampler": det_utils.BalancedPositiveNegativeSampler, + } + + def __init__( + self, + box_roi_pool, + box_head, + box_predictor, + # Faster R-CNN training + fg_iou_thresh, + bg_iou_thresh, + batch_size_per_image, + positive_fraction, + bbox_reg_weights, + # Faster R-CNN inference + score_thresh, + nms_thresh, + detections_per_img, + # Mask + mask_roi_pool=None, + mask_head=None, + mask_predictor=None, + keypoint_roi_pool=None, + keypoint_head=None, + keypoint_predictor=None, + ): + super().__init__() + + self.box_similarity = box_ops.box_iou + # assign ground-truth boxes for each proposal + self.proposal_matcher = det_utils.Matcher(fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=False) + + self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(batch_size_per_image, positive_fraction) + + if bbox_reg_weights is None: + bbox_reg_weights = (10.0, 10.0, 5.0, 5.0) + self.box_coder = det_utils.BoxCoder(bbox_reg_weights) + + self.box_roi_pool = box_roi_pool + self.box_head = box_head + self.box_predictor = box_predictor + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.detections_per_img = detections_per_img + + self.mask_roi_pool = mask_roi_pool + self.mask_head = mask_head + self.mask_predictor = mask_predictor + + self.keypoint_roi_pool = keypoint_roi_pool + self.keypoint_head = keypoint_head + self.keypoint_predictor = keypoint_predictor + + def has_mask(self): + if self.mask_roi_pool is None: + return False + if self.mask_head is None: + return False + if self.mask_predictor is None: + return False + return True + + def has_keypoint(self): + if self.keypoint_roi_pool is None: + return False + if self.keypoint_head is None: + return False + if self.keypoint_predictor is None: + return False + return True + + def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels): + # type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + matched_idxs = [] + labels = [] + for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels): + + if gt_boxes_in_image.numel() == 0: + # Background image + device = proposals_in_image.device + clamped_matched_idxs_in_image = torch.zeros( + (proposals_in_image.shape[0],), dtype=torch.int64, device=device + ) + labels_in_image = torch.zeros((proposals_in_image.shape[0],), dtype=torch.int64, device=device) + else: + # set to self.box_similarity when https://github.com/pytorch/pytorch/issues/27495 lands + match_quality_matrix = box_ops.box_iou(gt_boxes_in_image, proposals_in_image) + matched_idxs_in_image = self.proposal_matcher(match_quality_matrix) + + clamped_matched_idxs_in_image = matched_idxs_in_image.clamp(min=0) + + labels_in_image = gt_labels_in_image[clamped_matched_idxs_in_image] + labels_in_image = labels_in_image.to(dtype=torch.int64) + + # Label background (below the low threshold) + bg_inds = matched_idxs_in_image == self.proposal_matcher.BELOW_LOW_THRESHOLD + labels_in_image[bg_inds] = 0 + + # Label ignore proposals (between low and high thresholds) + ignore_inds = matched_idxs_in_image == self.proposal_matcher.BETWEEN_THRESHOLDS + labels_in_image[ignore_inds] = -1 # -1 is ignored by sampler + + matched_idxs.append(clamped_matched_idxs_in_image) + labels.append(labels_in_image) + return matched_idxs, labels + + def subsample(self, labels): + # type: (List[Tensor]) -> List[Tensor] + sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) + sampled_inds = [] + for img_idx, (pos_inds_img, neg_inds_img) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)): + img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0] + sampled_inds.append(img_sampled_inds) + return sampled_inds + + def add_gt_proposals(self, proposals, gt_boxes): + # type: (List[Tensor], List[Tensor]) -> List[Tensor] + proposals = [torch.cat((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes)] + + return proposals + + def check_targets(self, targets): + # type: (Optional[List[Dict[str, Tensor]]]) -> None + if targets is None: + raise ValueError("targets should not be None") + if not all(["boxes" in t for t in targets]): + raise ValueError("Every element of targets should have a boxes key") + if not all(["labels" in t for t in targets]): + raise ValueError("Every element of targets should have a labels key") + if self.has_mask(): + if not all(["masks" in t for t in targets]): + raise ValueError("Every element of targets should have a masks key") + + def select_training_samples( + self, + proposals, # type: List[Tensor] + targets, # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]] + self.check_targets(targets) + if targets is None: + raise ValueError("targets should not be None") + dtype = proposals[0].dtype + device = proposals[0].device + + gt_boxes = [t["boxes"].to(dtype) for t in targets] + gt_labels = [t["labels"] for t in targets] + + # append ground-truth bboxes to propos + proposals = self.add_gt_proposals(proposals, gt_boxes) + + # get matching gt indices for each proposal + matched_idxs, labels = self.assign_targets_to_proposals(proposals, gt_boxes, gt_labels) + # sample a fixed proportion of positive-negative proposals + sampled_inds = self.subsample(labels) + matched_gt_boxes = [] + num_images = len(proposals) + for img_id in range(num_images): + img_sampled_inds = sampled_inds[img_id] + proposals[img_id] = proposals[img_id][img_sampled_inds] + labels[img_id] = labels[img_id][img_sampled_inds] + matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds] + + gt_boxes_in_image = gt_boxes[img_id] + if gt_boxes_in_image.numel() == 0: + gt_boxes_in_image = torch.zeros((1, 4), dtype=dtype, device=device) + matched_gt_boxes.append(gt_boxes_in_image[matched_idxs[img_id]]) + + regression_targets = self.box_coder.encode(matched_gt_boxes, proposals) + return proposals, matched_idxs, labels, regression_targets + + def postprocess_detections( + self, + class_logits, # type: Tensor + box_regression, # type: Tensor + proposals, # type: List[Tensor] + image_shapes, # type: List[Tuple[int, int]] + ): + # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]] + device = class_logits.device + num_classes = class_logits.shape[-1] + + boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals] + pred_boxes = self.box_coder.decode(box_regression, proposals) + + pred_scores = F.softmax(class_logits, -1) + + pred_boxes_list = pred_boxes.split(boxes_per_image, 0) + pred_scores_list = pred_scores.split(boxes_per_image, 0) + + all_boxes = [] + all_scores = [] + all_labels = [] + for boxes, scores, image_shape in zip(pred_boxes_list, pred_scores_list, image_shapes): + boxes = box_ops.clip_boxes_to_image(boxes, image_shape) + + # create labels for each prediction + labels = torch.arange(num_classes, device=device) + labels = labels.view(1, -1).expand_as(scores) + + # remove predictions with the background label + boxes = boxes[:, 1:] + scores = scores[:, 1:] + labels = labels[:, 1:] + + # batch everything, by making every class prediction be a separate instance + boxes = boxes.reshape(-1, 4) + scores = scores.reshape(-1) + labels = labels.reshape(-1) + + # remove low scoring boxes + inds = torch.where(scores > self.score_thresh)[0] + boxes, scores, labels = boxes[inds], scores[inds], labels[inds] + + # remove empty boxes + keep = box_ops.remove_small_boxes(boxes, min_size=1e-2) + boxes, scores, labels = boxes[keep], scores[keep], labels[keep] + + # non-maximum suppression, independently done per class + keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh) + # keep only topk scoring predictions + keep = keep[: self.detections_per_img] + boxes, scores, labels = boxes[keep], scores[keep], labels[keep] + + all_boxes.append(boxes) + all_scores.append(scores) + all_labels.append(labels) + + return all_boxes, all_scores, all_labels + + def forward( + self, + features, # type: Dict[str, Tensor] + proposals, # type: List[Tensor] + image_shapes, # type: List[Tuple[int, int]] + targets=None, # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]] + """ + Args: + features (List[Tensor]) + proposals (List[Tensor[N, 4]]) + image_shapes (List[Tuple[H, W]]) + targets (List[Dict]) + """ + if targets is not None: + for t in targets: + # TODO: https://github.com/pytorch/pytorch/issues/26731 + floating_point_types = (torch.float, torch.double, torch.half) + if not t["boxes"].dtype in floating_point_types: + raise TypeError(f"target boxes must of float type, instead got {t['boxes'].dtype}") + if not t["labels"].dtype == torch.int64: + raise TypeError(f"target labels must of int64 type, instead got {t['labels'].dtype}") + if self.has_keypoint(): + if not t["keypoints"].dtype == torch.float32: + raise TypeError(f"target keypoints must of float type, instead got {t['keypoints'].dtype}") + + if self.training: + proposals, matched_idxs, labels, regression_targets = self.select_training_samples(proposals, targets) + else: + labels = None + regression_targets = None + matched_idxs = None + + box_features = self.box_roi_pool(features, proposals, image_shapes) + box_features = self.box_head(box_features) + class_logits, box_regression = self.box_predictor(box_features) + + result: List[Dict[str, torch.Tensor]] = [] + losses = {} + if self.training: + if labels is None: + raise ValueError("labels cannot be None") + if regression_targets is None: + raise ValueError("regression_targets cannot be None") + loss_classifier, loss_box_reg = fastrcnn_loss(class_logits, box_regression, labels, regression_targets) + losses = {"loss_classifier": loss_classifier, "loss_box_reg": loss_box_reg} + else: + boxes, scores, labels = self.postprocess_detections(class_logits, box_regression, proposals, image_shapes) + num_images = len(boxes) + for i in range(num_images): + result.append( + { + "boxes": boxes[i], + "labels": labels[i], + "scores": scores[i], + } + ) + + if self.has_mask(): + mask_proposals = [p["boxes"] for p in result] + if self.training: + if matched_idxs is None: + raise ValueError("if in training, matched_idxs should not be None") + + # during training, only focus on positive boxes + num_images = len(proposals) + mask_proposals = [] + pos_matched_idxs = [] + for img_id in range(num_images): + pos = torch.where(labels[img_id] > 0)[0] + mask_proposals.append(proposals[img_id][pos]) + pos_matched_idxs.append(matched_idxs[img_id][pos]) + else: + pos_matched_idxs = None + + if self.mask_roi_pool is not None: + mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes) + mask_features = self.mask_head(mask_features) + mask_logits = self.mask_predictor(mask_features) + else: + raise Exception("Expected mask_roi_pool to be not None") + + loss_mask = {} + if self.training: + if targets is None or pos_matched_idxs is None or mask_logits is None: + raise ValueError("targets, pos_matched_idxs, mask_logits cannot be None when training") + + gt_masks = [t["masks"] for t in targets] + gt_labels = [t["labels"] for t in targets] + rcnn_loss_mask = maskrcnn_loss(mask_logits, mask_proposals, gt_masks, gt_labels, pos_matched_idxs) + loss_mask = {"loss_mask": rcnn_loss_mask} + else: + labels = [r["labels"] for r in result] + masks_probs = maskrcnn_inference(mask_logits, labels) + for mask_prob, r in zip(masks_probs, result): + r["masks"] = mask_prob + + losses.update(loss_mask) + + # keep none checks in if conditional so torchscript will conditionally + # compile each branch + if ( + self.keypoint_roi_pool is not None + and self.keypoint_head is not None + and self.keypoint_predictor is not None + ): + keypoint_proposals = [p["boxes"] for p in result] + if self.training: + # during training, only focus on positive boxes + num_images = len(proposals) + keypoint_proposals = [] + pos_matched_idxs = [] + if matched_idxs is None: + raise ValueError("if in trainning, matched_idxs should not be None") + + for img_id in range(num_images): + pos = torch.where(labels[img_id] > 0)[0] + keypoint_proposals.append(proposals[img_id][pos]) + pos_matched_idxs.append(matched_idxs[img_id][pos]) + else: + pos_matched_idxs = None + + keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes) + keypoint_features = self.keypoint_head(keypoint_features) + keypoint_logits = self.keypoint_predictor(keypoint_features) + + loss_keypoint = {} + if self.training: + if targets is None or pos_matched_idxs is None: + raise ValueError("both targets and pos_matched_idxs should not be None when in training mode") + + gt_keypoints = [t["keypoints"] for t in targets] + rcnn_loss_keypoint = keypointrcnn_loss( + keypoint_logits, keypoint_proposals, gt_keypoints, pos_matched_idxs + ) + loss_keypoint = {"loss_keypoint": rcnn_loss_keypoint} + else: + if keypoint_logits is None or keypoint_proposals is None: + raise ValueError( + "both keypoint_logits and keypoint_proposals should not be None when not in training mode" + ) + + keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals) + for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result): + r["keypoints"] = keypoint_prob + r["keypoints_scores"] = kps + losses.update(loss_keypoint) + + return result, losses diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/ssd.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/ssd.py new file mode 100644 index 0000000000000000000000000000000000000000..584798df7c191db426f0ec8c3e4ab737dad334de --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/ssd.py @@ -0,0 +1,682 @@ +import warnings +from collections import OrderedDict +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +from ...ops import boxes as box_ops +from ...transforms._presets import ObjectDetection +from ...utils import _log_api_usage_once +from .._api import register_model, Weights, WeightsEnum +from .._meta import _COCO_CATEGORIES +from .._utils import _ovewrite_value_param, handle_legacy_interface +from ..vgg import VGG, vgg16, VGG16_Weights +from . import _utils as det_utils +from .anchor_utils import DefaultBoxGenerator +from .backbone_utils import _validate_trainable_layers +from .transform import GeneralizedRCNNTransform + + +__all__ = [ + "SSD300_VGG16_Weights", + "ssd300_vgg16", +] + + +class SSD300_VGG16_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/ssd300_vgg16_coco-b556d3b4.pth", + transforms=ObjectDetection, + meta={ + "num_params": 35641826, + "categories": _COCO_CATEGORIES, + "min_size": (1, 1), + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssd300-vgg16", + "_metrics": { + "COCO-val2017": { + "box_map": 25.1, + } + }, + "_ops": 34.858, + "_file_size": 135.988, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +def _xavier_init(conv: nn.Module): + for layer in conv.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.xavier_uniform_(layer.weight) + if layer.bias is not None: + torch.nn.init.constant_(layer.bias, 0.0) + + +class SSDHead(nn.Module): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int): + super().__init__() + self.classification_head = SSDClassificationHead(in_channels, num_anchors, num_classes) + self.regression_head = SSDRegressionHead(in_channels, num_anchors) + + def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: + return { + "bbox_regression": self.regression_head(x), + "cls_logits": self.classification_head(x), + } + + +class SSDScoringHead(nn.Module): + def __init__(self, module_list: nn.ModuleList, num_columns: int): + super().__init__() + self.module_list = module_list + self.num_columns = num_columns + + def _get_result_from_module_list(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.module_list[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.module_list) + if idx < 0: + idx += num_blocks + out = x + for i, module in enumerate(self.module_list): + if i == idx: + out = module(x) + return out + + def forward(self, x: List[Tensor]) -> Tensor: + all_results = [] + + for i, features in enumerate(x): + results = self._get_result_from_module_list(features, i) + + # Permute output from (N, A * K, H, W) to (N, HWA, K). + N, _, H, W = results.shape + results = results.view(N, -1, self.num_columns, H, W) + results = results.permute(0, 3, 4, 1, 2) + results = results.reshape(N, -1, self.num_columns) # Size=(N, HWA, K) + + all_results.append(results) + + return torch.cat(all_results, dim=1) + + +class SSDClassificationHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int): + cls_logits = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + cls_logits.append(nn.Conv2d(channels, num_classes * anchors, kernel_size=3, padding=1)) + _xavier_init(cls_logits) + super().__init__(cls_logits, num_classes) + + +class SSDRegressionHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int]): + bbox_reg = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + bbox_reg.append(nn.Conv2d(channels, 4 * anchors, kernel_size=3, padding=1)) + _xavier_init(bbox_reg) + super().__init__(bbox_reg, 4) + + +class SSD(nn.Module): + """ + Implements SSD architecture from `"SSD: Single Shot MultiBox Detector" `_. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes, but they will be resized + to a fixed size before passing it to the backbone. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each detection + - scores (Tensor[N]): the scores for each detection + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain an out_channels attribute with the list of the output channels of + each feature map. The backbone should return a single Tensor or an OrderedDict[Tensor]. + anchor_generator (DefaultBoxGenerator): module that generates the default boxes for a + set of feature maps. + size (Tuple[int, int]): the width and height to which images will be rescaled before feeding them + to the backbone. + num_classes (int): number of output classes of the model (including the background). + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + head (nn.Module, optional): Module run on top of the backbone features. Defaults to a module containing + a classification and regression module. + score_thresh (float): Score threshold used for postprocessing the detections. + nms_thresh (float): NMS threshold used for postprocessing the detections. + detections_per_img (int): Number of best detections to keep after NMS. + iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training. + topk_candidates (int): Number of best detections to keep before NMS. + positive_fraction (float): a number between 0 and 1 which indicates the proportion of positive + proposals used during the training of the classification head. It is used to estimate the negative to + positive ratio. + """ + + __annotations__ = { + "box_coder": det_utils.BoxCoder, + "proposal_matcher": det_utils.Matcher, + } + + def __init__( + self, + backbone: nn.Module, + anchor_generator: DefaultBoxGenerator, + size: Tuple[int, int], + num_classes: int, + image_mean: Optional[List[float]] = None, + image_std: Optional[List[float]] = None, + head: Optional[nn.Module] = None, + score_thresh: float = 0.01, + nms_thresh: float = 0.45, + detections_per_img: int = 200, + iou_thresh: float = 0.5, + topk_candidates: int = 400, + positive_fraction: float = 0.25, + **kwargs: Any, + ): + super().__init__() + _log_api_usage_once(self) + + self.backbone = backbone + + self.anchor_generator = anchor_generator + + self.box_coder = det_utils.BoxCoder(weights=(10.0, 10.0, 5.0, 5.0)) + + if head is None: + if hasattr(backbone, "out_channels"): + out_channels = backbone.out_channels + else: + out_channels = det_utils.retrieve_out_channels(backbone, size) + + if len(out_channels) != len(anchor_generator.aspect_ratios): + raise ValueError( + f"The length of the output channels from the backbone ({len(out_channels)}) do not match the length of the anchor generator aspect ratios ({len(anchor_generator.aspect_ratios)})" + ) + + num_anchors = self.anchor_generator.num_anchors_per_location() + head = SSDHead(out_channels, num_anchors, num_classes) + self.head = head + + self.proposal_matcher = det_utils.SSDMatcher(iou_thresh) + + if image_mean is None: + image_mean = [0.485, 0.456, 0.406] + if image_std is None: + image_std = [0.229, 0.224, 0.225] + self.transform = GeneralizedRCNNTransform( + min(size), max(size), image_mean, image_std, size_divisible=1, fixed_size=size, **kwargs + ) + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.detections_per_img = detections_per_img + self.topk_candidates = topk_candidates + self.neg_to_pos_ratio = (1.0 - positive_fraction) / positive_fraction + + # used only on torchscript mode + self._has_warned = False + + @torch.jit.unused + def eager_outputs( + self, losses: Dict[str, Tensor], detections: List[Dict[str, Tensor]] + ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: + if self.training: + return losses + + return detections + + def compute_loss( + self, + targets: List[Dict[str, Tensor]], + head_outputs: Dict[str, Tensor], + anchors: List[Tensor], + matched_idxs: List[Tensor], + ) -> Dict[str, Tensor]: + bbox_regression = head_outputs["bbox_regression"] + cls_logits = head_outputs["cls_logits"] + + # Match original targets with default boxes + num_foreground = 0 + bbox_loss = [] + cls_targets = [] + for ( + targets_per_image, + bbox_regression_per_image, + cls_logits_per_image, + anchors_per_image, + matched_idxs_per_image, + ) in zip(targets, bbox_regression, cls_logits, anchors, matched_idxs): + # produce the matching between boxes and targets + foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0] + foreground_matched_idxs_per_image = matched_idxs_per_image[foreground_idxs_per_image] + num_foreground += foreground_matched_idxs_per_image.numel() + + # Calculate regression loss + matched_gt_boxes_per_image = targets_per_image["boxes"][foreground_matched_idxs_per_image] + bbox_regression_per_image = bbox_regression_per_image[foreground_idxs_per_image, :] + anchors_per_image = anchors_per_image[foreground_idxs_per_image, :] + target_regression = self.box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image) + bbox_loss.append( + torch.nn.functional.smooth_l1_loss(bbox_regression_per_image, target_regression, reduction="sum") + ) + + # Estimate ground truth for class targets + gt_classes_target = torch.zeros( + (cls_logits_per_image.size(0),), + dtype=targets_per_image["labels"].dtype, + device=targets_per_image["labels"].device, + ) + gt_classes_target[foreground_idxs_per_image] = targets_per_image["labels"][ + foreground_matched_idxs_per_image + ] + cls_targets.append(gt_classes_target) + + bbox_loss = torch.stack(bbox_loss) + cls_targets = torch.stack(cls_targets) + + # Calculate classification loss + num_classes = cls_logits.size(-1) + cls_loss = F.cross_entropy(cls_logits.view(-1, num_classes), cls_targets.view(-1), reduction="none").view( + cls_targets.size() + ) + + # Hard Negative Sampling + foreground_idxs = cls_targets > 0 + num_negative = self.neg_to_pos_ratio * foreground_idxs.sum(1, keepdim=True) + # num_negative[num_negative < self.neg_to_pos_ratio] = self.neg_to_pos_ratio + negative_loss = cls_loss.clone() + negative_loss[foreground_idxs] = -float("inf") # use -inf to detect positive values that creeped in the sample + values, idx = negative_loss.sort(1, descending=True) + # background_idxs = torch.logical_and(idx.sort(1)[1] < num_negative, torch.isfinite(values)) + background_idxs = idx.sort(1)[1] < num_negative + + N = max(1, num_foreground) + return { + "bbox_regression": bbox_loss.sum() / N, + "classification": (cls_loss[foreground_idxs].sum() + cls_loss[background_idxs].sum()) / N, + } + + def forward( + self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None + ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: + if self.training: + if targets is None: + torch._assert(False, "targets should not be none when in training mode") + else: + for target in targets: + boxes = target["boxes"] + if isinstance(boxes, torch.Tensor): + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) + else: + torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + + # get the original image sizes + original_image_sizes: List[Tuple[int, int]] = [] + for img in images: + val = img.shape[-2:] + torch._assert( + len(val) == 2, + f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}", + ) + original_image_sizes.append((val[0], val[1])) + + # transform the input + images, targets = self.transform(images, targets) + + # Check for degenerate boxes + if targets is not None: + for target_idx, target in enumerate(targets): + boxes = target["boxes"] + degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] + if degenerate_boxes.any(): + bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] + degen_bb: List[float] = boxes[bb_idx].tolist() + torch._assert( + False, + "All bounding boxes should have positive height and width." + f" Found invalid box {degen_bb} for target at index {target_idx}.", + ) + + # get the features from the backbone + features = self.backbone(images.tensors) + if isinstance(features, torch.Tensor): + features = OrderedDict([("0", features)]) + + features = list(features.values()) + + # compute the ssd heads outputs using the features + head_outputs = self.head(features) + + # create the set of anchors + anchors = self.anchor_generator(images, features) + + losses = {} + detections: List[Dict[str, Tensor]] = [] + if self.training: + matched_idxs = [] + if targets is None: + torch._assert(False, "targets should not be none when in training mode") + else: + for anchors_per_image, targets_per_image in zip(anchors, targets): + if targets_per_image["boxes"].numel() == 0: + matched_idxs.append( + torch.full( + (anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device + ) + ) + continue + + match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image) + matched_idxs.append(self.proposal_matcher(match_quality_matrix)) + + losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs) + else: + detections = self.postprocess_detections(head_outputs, anchors, images.image_sizes) + detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) + + if torch.jit.is_scripting(): + if not self._has_warned: + warnings.warn("SSD always returns a (Losses, Detections) tuple in scripting") + self._has_warned = True + return losses, detections + return self.eager_outputs(losses, detections) + + def postprocess_detections( + self, head_outputs: Dict[str, Tensor], image_anchors: List[Tensor], image_shapes: List[Tuple[int, int]] + ) -> List[Dict[str, Tensor]]: + bbox_regression = head_outputs["bbox_regression"] + pred_scores = F.softmax(head_outputs["cls_logits"], dim=-1) + + num_classes = pred_scores.size(-1) + device = pred_scores.device + + detections: List[Dict[str, Tensor]] = [] + + for boxes, scores, anchors, image_shape in zip(bbox_regression, pred_scores, image_anchors, image_shapes): + boxes = self.box_coder.decode_single(boxes, anchors) + boxes = box_ops.clip_boxes_to_image(boxes, image_shape) + + image_boxes = [] + image_scores = [] + image_labels = [] + for label in range(1, num_classes): + score = scores[:, label] + + keep_idxs = score > self.score_thresh + score = score[keep_idxs] + box = boxes[keep_idxs] + + # keep only topk scoring predictions + num_topk = det_utils._topk_min(score, self.topk_candidates, 0) + score, idxs = score.topk(num_topk) + box = box[idxs] + + image_boxes.append(box) + image_scores.append(score) + image_labels.append(torch.full_like(score, fill_value=label, dtype=torch.int64, device=device)) + + image_boxes = torch.cat(image_boxes, dim=0) + image_scores = torch.cat(image_scores, dim=0) + image_labels = torch.cat(image_labels, dim=0) + + # non-maximum suppression + keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh) + keep = keep[: self.detections_per_img] + + detections.append( + { + "boxes": image_boxes[keep], + "scores": image_scores[keep], + "labels": image_labels[keep], + } + ) + return detections + + +class SSDFeatureExtractorVGG(nn.Module): + def __init__(self, backbone: nn.Module, highres: bool): + super().__init__() + + _, _, maxpool3_pos, maxpool4_pos, _ = (i for i, layer in enumerate(backbone) if isinstance(layer, nn.MaxPool2d)) + + # Patch ceil_mode for maxpool3 to get the same WxH output sizes as the paper + backbone[maxpool3_pos].ceil_mode = True + + # parameters used for L2 regularization + rescaling + self.scale_weight = nn.Parameter(torch.ones(512) * 20) + + # Multiple Feature maps - page 4, Fig 2 of SSD paper + self.features = nn.Sequential(*backbone[:maxpool4_pos]) # until conv4_3 + + # SSD300 case - page 4, Fig 2 of SSD paper + extra = nn.ModuleList( + [ + nn.Sequential( + nn.Conv2d(1024, 256, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2), # conv8_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(512, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2), # conv9_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3), # conv10_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3), # conv11_2 + nn.ReLU(inplace=True), + ), + ] + ) + if highres: + # Additional layers for the SSD512 case. See page 11, footernote 5. + extra.append( + nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=4), # conv12_2 + nn.ReLU(inplace=True), + ) + ) + _xavier_init(extra) + + fc = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=False), # add modified maxpool5 + nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=6, dilation=6), # FC6 with atrous + nn.ReLU(inplace=True), + nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=1), # FC7 + nn.ReLU(inplace=True), + ) + _xavier_init(fc) + extra.insert( + 0, + nn.Sequential( + *backbone[maxpool4_pos:-1], # until conv5_3, skip maxpool5 + fc, + ), + ) + self.extra = extra + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + # L2 regularization + Rescaling of 1st block's feature map + x = self.features(x) + rescaled = self.scale_weight.view(1, -1, 1, 1) * F.normalize(x) + output = [rescaled] + + # Calculating Feature maps for the rest blocks + for block in self.extra: + x = block(x) + output.append(x) + + return OrderedDict([(str(i), v) for i, v in enumerate(output)]) + + +def _vgg_extractor(backbone: VGG, highres: bool, trainable_layers: int): + backbone = backbone.features + # Gather the indices of maxpools. These are the locations of output blocks. + stage_indices = [0] + [i for i, b in enumerate(backbone) if isinstance(b, nn.MaxPool2d)][:-1] + num_stages = len(stage_indices) + + # find the index of the layer from which we won't freeze + torch._assert( + 0 <= trainable_layers <= num_stages, + f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}", + ) + freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] + + for b in backbone[:freeze_before]: + for parameter in b.parameters(): + parameter.requires_grad_(False) + + return SSDFeatureExtractorVGG(backbone, highres) + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", SSD300_VGG16_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", VGG16_Weights.IMAGENET1K_FEATURES), +) +def ssd300_vgg16( + *, + weights: Optional[SSD300_VGG16_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[VGG16_Weights] = VGG16_Weights.IMAGENET1K_FEATURES, + trainable_backbone_layers: Optional[int] = None, + **kwargs: Any, +) -> SSD: + """The SSD300 model is based on the `SSD: Single Shot MultiBox Detector + `_ paper. + + .. betastatus:: detection module + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes, but they will be resized + to a fixed size before passing it to the backbone. + + The behavior of the model changes depending on if it is in training or evaluation mode. + + During training, the model expects both the input tensors and targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each detection + - scores (Tensor[N]): the scores for each detection + + Example: + + >>> model = torchvision.models.detection.ssd300_vgg16(weights=SSD300_VGG16_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 300, 300), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + weights (:class:`~torchvision.models.detection.SSD300_VGG16_Weights`, optional): The pretrained + weights to use. See + :class:`~torchvision.models.detection.SSD300_VGG16_Weights` + below for more details, and possible values. By default, no + pre-trained weights are used. + progress (bool, optional): If True, displays a progress bar of the download to stderr + Default is True. + num_classes (int, optional): number of output classes of the model (including the background) + weights_backbone (:class:`~torchvision.models.VGG16_Weights`, optional): The pretrained weights for the + backbone + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is + passed (the default) this value is set to 4. + **kwargs: parameters passed to the ``torchvision.models.detection.SSD`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.SSD300_VGG16_Weights + :members: + """ + weights = SSD300_VGG16_Weights.verify(weights) + weights_backbone = VGG16_Weights.verify(weights_backbone) + + if "size" in kwargs: + warnings.warn("The size of the model is already fixed; ignoring the parameter.") + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + trainable_backbone_layers = _validate_trainable_layers( + weights is not None or weights_backbone is not None, trainable_backbone_layers, 5, 4 + ) + + # Use custom backbones more appropriate for SSD + backbone = vgg16(weights=weights_backbone, progress=progress) + backbone = _vgg_extractor(backbone, False, trainable_backbone_layers) + anchor_generator = DefaultBoxGenerator( + [[2], [2, 3], [2, 3], [2, 3], [2], [2]], + scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05], + steps=[8, 16, 32, 64, 100, 300], + ) + + defaults = { + # Rescale the input in a way compatible to the backbone + "image_mean": [0.48235, 0.45882, 0.40784], + "image_std": [1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0], # undo the 0-1 scaling of toTensor + } + kwargs: Any = {**defaults, **kwargs} + model = SSD(backbone, anchor_generator, (300, 300), num_classes, **kwargs) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/ssdlite.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/ssdlite.py new file mode 100644 index 0000000000000000000000000000000000000000..b1ef24ef14d6bde102b0a9a80291a50dbfac030f --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/ssdlite.py @@ -0,0 +1,331 @@ +import warnings +from collections import OrderedDict +from functools import partial +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from torch import nn, Tensor + +from ...ops.misc import Conv2dNormActivation +from ...transforms._presets import ObjectDetection +from ...utils import _log_api_usage_once +from .. import mobilenet +from .._api import register_model, Weights, WeightsEnum +from .._meta import _COCO_CATEGORIES +from .._utils import _ovewrite_value_param, handle_legacy_interface +from ..mobilenetv3 import mobilenet_v3_large, MobileNet_V3_Large_Weights +from . import _utils as det_utils +from .anchor_utils import DefaultBoxGenerator +from .backbone_utils import _validate_trainable_layers +from .ssd import SSD, SSDScoringHead + + +__all__ = [ + "SSDLite320_MobileNet_V3_Large_Weights", + "ssdlite320_mobilenet_v3_large", +] + + +# Building blocks of SSDlite as described in section 6.2 of MobileNetV2 paper +def _prediction_block( + in_channels: int, out_channels: int, kernel_size: int, norm_layer: Callable[..., nn.Module] +) -> nn.Sequential: + return nn.Sequential( + # 3x3 depthwise with stride 1 and padding 1 + Conv2dNormActivation( + in_channels, + in_channels, + kernel_size=kernel_size, + groups=in_channels, + norm_layer=norm_layer, + activation_layer=nn.ReLU6, + ), + # 1x1 projetion to output channels + nn.Conv2d(in_channels, out_channels, 1), + ) + + +def _extra_block(in_channels: int, out_channels: int, norm_layer: Callable[..., nn.Module]) -> nn.Sequential: + activation = nn.ReLU6 + intermediate_channels = out_channels // 2 + return nn.Sequential( + # 1x1 projection to half output channels + Conv2dNormActivation( + in_channels, intermediate_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation + ), + # 3x3 depthwise with stride 2 and padding 1 + Conv2dNormActivation( + intermediate_channels, + intermediate_channels, + kernel_size=3, + stride=2, + groups=intermediate_channels, + norm_layer=norm_layer, + activation_layer=activation, + ), + # 1x1 projetion to output channels + Conv2dNormActivation( + intermediate_channels, out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation + ), + ) + + +def _normal_init(conv: nn.Module): + for layer in conv.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0.0, std=0.03) + if layer.bias is not None: + torch.nn.init.constant_(layer.bias, 0.0) + + +class SSDLiteHead(nn.Module): + def __init__( + self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module] + ): + super().__init__() + self.classification_head = SSDLiteClassificationHead(in_channels, num_anchors, num_classes, norm_layer) + self.regression_head = SSDLiteRegressionHead(in_channels, num_anchors, norm_layer) + + def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: + return { + "bbox_regression": self.regression_head(x), + "cls_logits": self.classification_head(x), + } + + +class SSDLiteClassificationHead(SSDScoringHead): + def __init__( + self, in_channels: List[int], num_anchors: List[int], num_classes: int, norm_layer: Callable[..., nn.Module] + ): + cls_logits = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + cls_logits.append(_prediction_block(channels, num_classes * anchors, 3, norm_layer)) + _normal_init(cls_logits) + super().__init__(cls_logits, num_classes) + + +class SSDLiteRegressionHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int], norm_layer: Callable[..., nn.Module]): + bbox_reg = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + bbox_reg.append(_prediction_block(channels, 4 * anchors, 3, norm_layer)) + _normal_init(bbox_reg) + super().__init__(bbox_reg, 4) + + +class SSDLiteFeatureExtractorMobileNet(nn.Module): + def __init__( + self, + backbone: nn.Module, + c4_pos: int, + norm_layer: Callable[..., nn.Module], + width_mult: float = 1.0, + min_depth: int = 16, + ): + super().__init__() + _log_api_usage_once(self) + + if backbone[c4_pos].use_res_connect: + raise ValueError("backbone[c4_pos].use_res_connect should be False") + + self.features = nn.Sequential( + # As described in section 6.3 of MobileNetV3 paper + nn.Sequential(*backbone[:c4_pos], backbone[c4_pos].block[0]), # from start until C4 expansion layer + nn.Sequential(backbone[c4_pos].block[1:], *backbone[c4_pos + 1 :]), # from C4 depthwise until end + ) + + get_depth = lambda d: max(min_depth, int(d * width_mult)) # noqa: E731 + extra = nn.ModuleList( + [ + _extra_block(backbone[-1].out_channels, get_depth(512), norm_layer), + _extra_block(get_depth(512), get_depth(256), norm_layer), + _extra_block(get_depth(256), get_depth(256), norm_layer), + _extra_block(get_depth(256), get_depth(128), norm_layer), + ] + ) + _normal_init(extra) + + self.extra = extra + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + # Get feature maps from backbone and extra. Can't be refactored due to JIT limitations. + output = [] + for block in self.features: + x = block(x) + output.append(x) + + for block in self.extra: + x = block(x) + output.append(x) + + return OrderedDict([(str(i), v) for i, v in enumerate(output)]) + + +def _mobilenet_extractor( + backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3], + trainable_layers: int, + norm_layer: Callable[..., nn.Module], +): + backbone = backbone.features + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] + num_stages = len(stage_indices) + + # find the index of the layer from which we won't freeze + if not 0 <= trainable_layers <= num_stages: + raise ValueError("trainable_layers should be in the range [0, {num_stages}], instead got {trainable_layers}") + freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] + + for b in backbone[:freeze_before]: + for parameter in b.parameters(): + parameter.requires_grad_(False) + + return SSDLiteFeatureExtractorMobileNet(backbone, stage_indices[-2], norm_layer) + + +class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): + COCO_V1 = Weights( + url="https://download.pytorch.org/models/ssdlite320_mobilenet_v3_large_coco-a79551df.pth", + transforms=ObjectDetection, + meta={ + "num_params": 3440060, + "categories": _COCO_CATEGORIES, + "min_size": (1, 1), + "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssdlite320-mobilenetv3-large", + "_metrics": { + "COCO-val2017": { + "box_map": 21.3, + } + }, + "_ops": 0.583, + "_file_size": 13.418, + "_docs": """These weights were produced by following a similar training recipe as on the paper.""", + }, + ) + DEFAULT = COCO_V1 + + +@register_model() +@handle_legacy_interface( + weights=("pretrained", SSDLite320_MobileNet_V3_Large_Weights.COCO_V1), + weights_backbone=("pretrained_backbone", MobileNet_V3_Large_Weights.IMAGENET1K_V1), +) +def ssdlite320_mobilenet_v3_large( + *, + weights: Optional[SSDLite320_MobileNet_V3_Large_Weights] = None, + progress: bool = True, + num_classes: Optional[int] = None, + weights_backbone: Optional[MobileNet_V3_Large_Weights] = MobileNet_V3_Large_Weights.IMAGENET1K_V1, + trainable_backbone_layers: Optional[int] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None, + **kwargs: Any, +) -> SSD: + """SSDlite model architecture with input size 320x320 and a MobileNetV3 Large backbone, as + described at `Searching for MobileNetV3 `__ and + `MobileNetV2: Inverted Residuals and Linear Bottlenecks `__. + + .. betastatus:: detection module + + See :func:`~torchvision.models.detection.ssd300_vgg16` for more details. + + Example: + + >>> model = torchvision.models.detection.ssdlite320_mobilenet_v3_large(weights=SSDLite320_MobileNet_V3_Large_Weights.DEFAULT) + >>> model.eval() + >>> x = [torch.rand(3, 320, 320), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + weights (:class:`~torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + num_classes (int, optional): number of output classes of the model + (including the background). + weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The pretrained + weights for the backbone. + trainable_backbone_layers (int, optional): number of trainable (not frozen) layers + starting from final block. Valid values are between 0 and 6, with 6 meaning all + backbone layers are trainable. If ``None`` is passed (the default) this value is + set to 6. + norm_layer (callable, optional): Module specifying the normalization layer to use. + **kwargs: parameters passed to the ``torchvision.models.detection.ssd.SSD`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.detection.SSDLite320_MobileNet_V3_Large_Weights + :members: + """ + + weights = SSDLite320_MobileNet_V3_Large_Weights.verify(weights) + weights_backbone = MobileNet_V3_Large_Weights.verify(weights_backbone) + + if "size" in kwargs: + warnings.warn("The size of the model is already fixed; ignoring the parameter.") + + if weights is not None: + weights_backbone = None + num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"])) + elif num_classes is None: + num_classes = 91 + + trainable_backbone_layers = _validate_trainable_layers( + weights is not None or weights_backbone is not None, trainable_backbone_layers, 6, 6 + ) + + # Enable reduced tail if no pretrained backbone is selected. See Table 6 of MobileNetV3 paper. + reduce_tail = weights_backbone is None + + if norm_layer is None: + norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.03) + + backbone = mobilenet_v3_large( + weights=weights_backbone, progress=progress, norm_layer=norm_layer, reduced_tail=reduce_tail, **kwargs + ) + if weights_backbone is None: + # Change the default initialization scheme if not pretrained + _normal_init(backbone) + backbone = _mobilenet_extractor( + backbone, + trainable_backbone_layers, + norm_layer, + ) + + size = (320, 320) + anchor_generator = DefaultBoxGenerator([[2, 3] for _ in range(6)], min_ratio=0.2, max_ratio=0.95) + out_channels = det_utils.retrieve_out_channels(backbone, size) + num_anchors = anchor_generator.num_anchors_per_location() + if len(out_channels) != len(anchor_generator.aspect_ratios): + raise ValueError( + f"The length of the output channels from the backbone {len(out_channels)} do not match the length of the anchor generator aspect ratios {len(anchor_generator.aspect_ratios)}" + ) + + defaults = { + "score_thresh": 0.001, + "nms_thresh": 0.55, + "detections_per_img": 300, + "topk_candidates": 300, + # Rescale the input in a way compatible to the backbone: + # The following mean/std rescale the data from [0, 1] to [-1, 1] + "image_mean": [0.5, 0.5, 0.5], + "image_std": [0.5, 0.5, 0.5], + } + kwargs: Any = {**defaults, **kwargs} + model = SSD( + backbone, + anchor_generator, + size, + num_classes, + head=SSDLiteHead(out_channels, num_anchors, num_classes, norm_layer), + **kwargs, + ) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/detection/transform.py b/wemm/lib/python3.10/site-packages/torchvision/models/detection/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..589d5e45bdc7eef8c86951c4125f1dee232def1a --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/detection/transform.py @@ -0,0 +1,309 @@ +import math +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torchvision +from torch import nn, Tensor + +from .image_list import ImageList +from .roi_heads import paste_masks_in_image + + +@torch.jit.unused +def _get_shape_onnx(image: Tensor) -> Tensor: + from torch.onnx import operators + + return operators.shape_as_tensor(image)[-2:] + + +@torch.jit.unused +def _fake_cast_onnx(v: Tensor) -> float: + # ONNX requires a tensor but here we fake its type for JIT. + return v + + +def _resize_image_and_masks( + image: Tensor, + self_min_size: float, + self_max_size: float, + target: Optional[Dict[str, Tensor]] = None, + fixed_size: Optional[Tuple[int, int]] = None, +) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + if torchvision._is_tracing(): + im_shape = _get_shape_onnx(image) + else: + im_shape = torch.tensor(image.shape[-2:]) + + size: Optional[List[int]] = None + scale_factor: Optional[float] = None + recompute_scale_factor: Optional[bool] = None + if fixed_size is not None: + size = [fixed_size[1], fixed_size[0]] + else: + min_size = torch.min(im_shape).to(dtype=torch.float32) + max_size = torch.max(im_shape).to(dtype=torch.float32) + scale = torch.min(self_min_size / min_size, self_max_size / max_size) + + if torchvision._is_tracing(): + scale_factor = _fake_cast_onnx(scale) + else: + scale_factor = scale.item() + recompute_scale_factor = True + + image = torch.nn.functional.interpolate( + image[None], + size=size, + scale_factor=scale_factor, + mode="bilinear", + recompute_scale_factor=recompute_scale_factor, + align_corners=False, + )[0] + + if target is None: + return image, target + + if "masks" in target: + mask = target["masks"] + mask = torch.nn.functional.interpolate( + mask[:, None].float(), size=size, scale_factor=scale_factor, recompute_scale_factor=recompute_scale_factor + )[:, 0].byte() + target["masks"] = mask + return image, target + + +class GeneralizedRCNNTransform(nn.Module): + """ + Performs input / target transformation before feeding the data to a GeneralizedRCNN + model. + + The transformations it performs are: + - input normalization (mean subtraction and std division) + - input / target resizing to match min_size / max_size + + It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets + """ + + def __init__( + self, + min_size: int, + max_size: int, + image_mean: List[float], + image_std: List[float], + size_divisible: int = 32, + fixed_size: Optional[Tuple[int, int]] = None, + **kwargs: Any, + ): + super().__init__() + if not isinstance(min_size, (list, tuple)): + min_size = (min_size,) + self.min_size = min_size + self.max_size = max_size + self.image_mean = image_mean + self.image_std = image_std + self.size_divisible = size_divisible + self.fixed_size = fixed_size + self._skip_resize = kwargs.pop("_skip_resize", False) + + def forward( + self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None + ) -> Tuple[ImageList, Optional[List[Dict[str, Tensor]]]]: + images = [img for img in images] + if targets is not None: + # make a copy of targets to avoid modifying it in-place + # once torchscript supports dict comprehension + # this can be simplified as follows + # targets = [{k: v for k,v in t.items()} for t in targets] + targets_copy: List[Dict[str, Tensor]] = [] + for t in targets: + data: Dict[str, Tensor] = {} + for k, v in t.items(): + data[k] = v + targets_copy.append(data) + targets = targets_copy + for i in range(len(images)): + image = images[i] + target_index = targets[i] if targets is not None else None + + if image.dim() != 3: + raise ValueError(f"images is expected to be a list of 3d tensors of shape [C, H, W], got {image.shape}") + image = self.normalize(image) + image, target_index = self.resize(image, target_index) + images[i] = image + if targets is not None and target_index is not None: + targets[i] = target_index + + image_sizes = [img.shape[-2:] for img in images] + images = self.batch_images(images, size_divisible=self.size_divisible) + image_sizes_list: List[Tuple[int, int]] = [] + for image_size in image_sizes: + torch._assert( + len(image_size) == 2, + f"Input tensors expected to have in the last two elements H and W, instead got {image_size}", + ) + image_sizes_list.append((image_size[0], image_size[1])) + + image_list = ImageList(images, image_sizes_list) + return image_list, targets + + def normalize(self, image: Tensor) -> Tensor: + if not image.is_floating_point(): + raise TypeError( + f"Expected input images to be of floating type (in range [0, 1]), " + f"but found type {image.dtype} instead" + ) + dtype, device = image.dtype, image.device + mean = torch.as_tensor(self.image_mean, dtype=dtype, device=device) + std = torch.as_tensor(self.image_std, dtype=dtype, device=device) + return (image - mean[:, None, None]) / std[:, None, None] + + def torch_choice(self, k: List[int]) -> int: + """ + Implements `random.choice` via torch ops, so it can be compiled with + TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803 + is fixed. + """ + index = int(torch.empty(1).uniform_(0.0, float(len(k))).item()) + return k[index] + + def resize( + self, + image: Tensor, + target: Optional[Dict[str, Tensor]] = None, + ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + h, w = image.shape[-2:] + if self.training: + if self._skip_resize: + return image, target + size = float(self.torch_choice(self.min_size)) + else: + # FIXME assume for now that testing uses the largest scale + size = float(self.min_size[-1]) + image, target = _resize_image_and_masks(image, size, float(self.max_size), target, self.fixed_size) + + if target is None: + return image, target + + bbox = target["boxes"] + bbox = resize_boxes(bbox, (h, w), image.shape[-2:]) + target["boxes"] = bbox + + if "keypoints" in target: + keypoints = target["keypoints"] + keypoints = resize_keypoints(keypoints, (h, w), image.shape[-2:]) + target["keypoints"] = keypoints + return image, target + + # _onnx_batch_images() is an implementation of + # batch_images() that is supported by ONNX tracing. + @torch.jit.unused + def _onnx_batch_images(self, images: List[Tensor], size_divisible: int = 32) -> Tensor: + max_size = [] + for i in range(images[0].dim()): + max_size_i = torch.max(torch.stack([img.shape[i] for img in images]).to(torch.float32)).to(torch.int64) + max_size.append(max_size_i) + stride = size_divisible + max_size[1] = (torch.ceil((max_size[1].to(torch.float32)) / stride) * stride).to(torch.int64) + max_size[2] = (torch.ceil((max_size[2].to(torch.float32)) / stride) * stride).to(torch.int64) + max_size = tuple(max_size) + + # work around for + # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + # which is not yet supported in onnx + padded_imgs = [] + for img in images: + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) + padded_imgs.append(padded_img) + + return torch.stack(padded_imgs) + + def max_by_axis(self, the_list: List[List[int]]) -> List[int]: + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + def batch_images(self, images: List[Tensor], size_divisible: int = 32) -> Tensor: + if torchvision._is_tracing(): + # batch_images() does not export well to ONNX + # call _onnx_batch_images() instead + return self._onnx_batch_images(images, size_divisible) + + max_size = self.max_by_axis([list(img.shape) for img in images]) + stride = float(size_divisible) + max_size = list(max_size) + max_size[1] = int(math.ceil(float(max_size[1]) / stride) * stride) + max_size[2] = int(math.ceil(float(max_size[2]) / stride) * stride) + + batch_shape = [len(images)] + max_size + batched_imgs = images[0].new_full(batch_shape, 0) + for i in range(batched_imgs.shape[0]): + img = images[i] + batched_imgs[i, : img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + + return batched_imgs + + def postprocess( + self, + result: List[Dict[str, Tensor]], + image_shapes: List[Tuple[int, int]], + original_image_sizes: List[Tuple[int, int]], + ) -> List[Dict[str, Tensor]]: + if self.training: + return result + for i, (pred, im_s, o_im_s) in enumerate(zip(result, image_shapes, original_image_sizes)): + boxes = pred["boxes"] + boxes = resize_boxes(boxes, im_s, o_im_s) + result[i]["boxes"] = boxes + if "masks" in pred: + masks = pred["masks"] + masks = paste_masks_in_image(masks, boxes, o_im_s) + result[i]["masks"] = masks + if "keypoints" in pred: + keypoints = pred["keypoints"] + keypoints = resize_keypoints(keypoints, im_s, o_im_s) + result[i]["keypoints"] = keypoints + return result + + def __repr__(self) -> str: + format_string = f"{self.__class__.__name__}(" + _indent = "\n " + format_string += f"{_indent}Normalize(mean={self.image_mean}, std={self.image_std})" + format_string += f"{_indent}Resize(min_size={self.min_size}, max_size={self.max_size}, mode='bilinear')" + format_string += "\n)" + return format_string + + +def resize_keypoints(keypoints: Tensor, original_size: List[int], new_size: List[int]) -> Tensor: + ratios = [ + torch.tensor(s, dtype=torch.float32, device=keypoints.device) + / torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device) + for s, s_orig in zip(new_size, original_size) + ] + ratio_h, ratio_w = ratios + resized_data = keypoints.clone() + if torch._C._get_tracing_state(): + resized_data_0 = resized_data[:, :, 0] * ratio_w + resized_data_1 = resized_data[:, :, 1] * ratio_h + resized_data = torch.stack((resized_data_0, resized_data_1, resized_data[:, :, 2]), dim=2) + else: + resized_data[..., 0] *= ratio_w + resized_data[..., 1] *= ratio_h + return resized_data + + +def resize_boxes(boxes: Tensor, original_size: List[int], new_size: List[int]) -> Tensor: + ratios = [ + torch.tensor(s, dtype=torch.float32, device=boxes.device) + / torch.tensor(s_orig, dtype=torch.float32, device=boxes.device) + for s, s_orig in zip(new_size, original_size) + ] + ratio_height, ratio_width = ratios + xmin, ymin, xmax, ymax = boxes.unbind(1) + + xmin = xmin * ratio_width + xmax = xmax * ratio_width + ymin = ymin * ratio_height + ymax = ymax * ratio_height + return torch.stack((xmin, ymin, xmax, ymax), dim=1) diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/inception.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/inception.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c74a8fb5bb498e20916267a9862b99cbdba88a77 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/inception.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/quantization/resnet.py b/wemm/lib/python3.10/site-packages/torchvision/models/quantization/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..18ccff6696408c5f07c0eee24e6d66aec46617b3 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/quantization/resnet.py @@ -0,0 +1,484 @@ +from functools import partial +from typing import Any, List, Optional, Type, Union + +import torch +import torch.nn as nn +from torch import Tensor +from torchvision.models.resnet import ( + BasicBlock, + Bottleneck, + ResNet, + ResNet18_Weights, + ResNet50_Weights, + ResNeXt101_32X8D_Weights, + ResNeXt101_64X4D_Weights, +) + +from ...transforms._presets import ImageClassification +from .._api import register_model, Weights, WeightsEnum +from .._meta import _IMAGENET_CATEGORIES +from .._utils import _ovewrite_named_param, handle_legacy_interface +from .utils import _fuse_modules, _replace_relu, quantize_model + + +__all__ = [ + "QuantizableResNet", + "ResNet18_QuantizedWeights", + "ResNet50_QuantizedWeights", + "ResNeXt101_32X8D_QuantizedWeights", + "ResNeXt101_64X4D_QuantizedWeights", + "resnet18", + "resnet50", + "resnext101_32x8d", + "resnext101_64x4d", +] + + +class QuantizableBasicBlock(BasicBlock): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.add_relu = torch.nn.quantized.FloatFunctional() + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = self.add_relu.add_relu(out, identity) + + return out + + def fuse_model(self, is_qat: Optional[bool] = None) -> None: + _fuse_modules(self, [["conv1", "bn1", "relu"], ["conv2", "bn2"]], is_qat, inplace=True) + if self.downsample: + _fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True) + + +class QuantizableBottleneck(Bottleneck): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.skip_add_relu = nn.quantized.FloatFunctional() + self.relu1 = nn.ReLU(inplace=False) + self.relu2 = nn.ReLU(inplace=False) + + def forward(self, x: Tensor) -> Tensor: + identity = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + out = self.conv2(out) + out = self.bn2(out) + out = self.relu2(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + out = self.skip_add_relu.add_relu(out, identity) + + return out + + def fuse_model(self, is_qat: Optional[bool] = None) -> None: + _fuse_modules( + self, [["conv1", "bn1", "relu1"], ["conv2", "bn2", "relu2"], ["conv3", "bn3"]], is_qat, inplace=True + ) + if self.downsample: + _fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True) + + +class QuantizableResNet(ResNet): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + self.quant = torch.ao.quantization.QuantStub() + self.dequant = torch.ao.quantization.DeQuantStub() + + def forward(self, x: Tensor) -> Tensor: + x = self.quant(x) + # Ensure scriptability + # super(QuantizableResNet,self).forward(x) + # is not scriptable + x = self._forward_impl(x) + x = self.dequant(x) + return x + + def fuse_model(self, is_qat: Optional[bool] = None) -> None: + r"""Fuse conv/bn/relu modules in resnet models + + Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization. + Model is modified in place. Note that this operation does not change numerics + and the model after modification is in floating point + """ + _fuse_modules(self, ["conv1", "bn1", "relu"], is_qat, inplace=True) + for m in self.modules(): + if type(m) is QuantizableBottleneck or type(m) is QuantizableBasicBlock: + m.fuse_model(is_qat) + + +def _resnet( + block: Type[Union[QuantizableBasicBlock, QuantizableBottleneck]], + layers: List[int], + weights: Optional[WeightsEnum], + progress: bool, + quantize: bool, + **kwargs: Any, +) -> QuantizableResNet: + if weights is not None: + _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"])) + if "backend" in weights.meta: + _ovewrite_named_param(kwargs, "backend", weights.meta["backend"]) + backend = kwargs.pop("backend", "fbgemm") + + model = QuantizableResNet(block, layers, **kwargs) + _replace_relu(model) + if quantize: + quantize_model(model, backend) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model + + +_COMMON_META = { + "min_size": (1, 1), + "categories": _IMAGENET_CATEGORIES, + "backend": "fbgemm", + "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models", + "_docs": """ + These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized + weights listed below. + """, +} + + +class ResNet18_QuantizedWeights(WeightsEnum): + IMAGENET1K_FBGEMM_V1 = Weights( + url="https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth", + transforms=partial(ImageClassification, crop_size=224), + meta={ + **_COMMON_META, + "num_params": 11689512, + "unquantized": ResNet18_Weights.IMAGENET1K_V1, + "_metrics": { + "ImageNet-1K": { + "acc@1": 69.494, + "acc@5": 88.882, + } + }, + "_ops": 1.814, + "_file_size": 11.238, + }, + ) + DEFAULT = IMAGENET1K_FBGEMM_V1 + + +class ResNet50_QuantizedWeights(WeightsEnum): + IMAGENET1K_FBGEMM_V1 = Weights( + url="https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth", + transforms=partial(ImageClassification, crop_size=224), + meta={ + **_COMMON_META, + "num_params": 25557032, + "unquantized": ResNet50_Weights.IMAGENET1K_V1, + "_metrics": { + "ImageNet-1K": { + "acc@1": 75.920, + "acc@5": 92.814, + } + }, + "_ops": 4.089, + "_file_size": 24.759, + }, + ) + IMAGENET1K_FBGEMM_V2 = Weights( + url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth", + transforms=partial(ImageClassification, crop_size=224, resize_size=232), + meta={ + **_COMMON_META, + "num_params": 25557032, + "unquantized": ResNet50_Weights.IMAGENET1K_V2, + "_metrics": { + "ImageNet-1K": { + "acc@1": 80.282, + "acc@5": 94.976, + } + }, + "_ops": 4.089, + "_file_size": 24.953, + }, + ) + DEFAULT = IMAGENET1K_FBGEMM_V2 + + +class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): + IMAGENET1K_FBGEMM_V1 = Weights( + url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth", + transforms=partial(ImageClassification, crop_size=224), + meta={ + **_COMMON_META, + "num_params": 88791336, + "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1, + "_metrics": { + "ImageNet-1K": { + "acc@1": 78.986, + "acc@5": 94.480, + } + }, + "_ops": 16.414, + "_file_size": 86.034, + }, + ) + IMAGENET1K_FBGEMM_V2 = Weights( + url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth", + transforms=partial(ImageClassification, crop_size=224, resize_size=232), + meta={ + **_COMMON_META, + "num_params": 88791336, + "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2, + "_metrics": { + "ImageNet-1K": { + "acc@1": 82.574, + "acc@5": 96.132, + } + }, + "_ops": 16.414, + "_file_size": 86.645, + }, + ) + DEFAULT = IMAGENET1K_FBGEMM_V2 + + +class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum): + IMAGENET1K_FBGEMM_V1 = Weights( + url="https://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pth", + transforms=partial(ImageClassification, crop_size=224, resize_size=232), + meta={ + **_COMMON_META, + "num_params": 83455272, + "recipe": "https://github.com/pytorch/vision/pull/5935", + "unquantized": ResNeXt101_64X4D_Weights.IMAGENET1K_V1, + "_metrics": { + "ImageNet-1K": { + "acc@1": 82.898, + "acc@5": 96.326, + } + }, + "_ops": 15.46, + "_file_size": 81.556, + }, + ) + DEFAULT = IMAGENET1K_FBGEMM_V1 + + +@register_model(name="quantized_resnet18") +@handle_legacy_interface( + weights=( + "pretrained", + lambda kwargs: ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1 + if kwargs.get("quantize", False) + else ResNet18_Weights.IMAGENET1K_V1, + ) +) +def resnet18( + *, + weights: Optional[Union[ResNet18_QuantizedWeights, ResNet18_Weights]] = None, + progress: bool = True, + quantize: bool = False, + **kwargs: Any, +) -> QuantizableResNet: + """ResNet-18 model from + `Deep Residual Learning for Image Recognition `_ + + .. note:: + Note that ``quantize = True`` returns a quantized model with 8 bit + weights. Quantized models only support inference and run on CPUs. + GPU inference is not yet supported. + + Args: + weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The + pretrained weights for the model. See + :class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + quantize (bool, optional): If True, return a quantized version of the model. Default is False. + **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights + :members: + + .. autoclass:: torchvision.models.ResNet18_Weights + :members: + :noindex: + """ + weights = (ResNet18_QuantizedWeights if quantize else ResNet18_Weights).verify(weights) + + return _resnet(QuantizableBasicBlock, [2, 2, 2, 2], weights, progress, quantize, **kwargs) + + +@register_model(name="quantized_resnet50") +@handle_legacy_interface( + weights=( + "pretrained", + lambda kwargs: ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1 + if kwargs.get("quantize", False) + else ResNet50_Weights.IMAGENET1K_V1, + ) +) +def resnet50( + *, + weights: Optional[Union[ResNet50_QuantizedWeights, ResNet50_Weights]] = None, + progress: bool = True, + quantize: bool = False, + **kwargs: Any, +) -> QuantizableResNet: + """ResNet-50 model from + `Deep Residual Learning for Image Recognition `_ + + .. note:: + Note that ``quantize = True`` returns a quantized model with 8 bit + weights. Quantized models only support inference and run on CPUs. + GPU inference is not yet supported. + + Args: + weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The + pretrained weights for the model. See + :class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + quantize (bool, optional): If True, return a quantized version of the model. Default is False. + **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights + :members: + + .. autoclass:: torchvision.models.ResNet50_Weights + :members: + :noindex: + """ + weights = (ResNet50_QuantizedWeights if quantize else ResNet50_Weights).verify(weights) + + return _resnet(QuantizableBottleneck, [3, 4, 6, 3], weights, progress, quantize, **kwargs) + + +@register_model(name="quantized_resnext101_32x8d") +@handle_legacy_interface( + weights=( + "pretrained", + lambda kwargs: ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1 + if kwargs.get("quantize", False) + else ResNeXt101_32X8D_Weights.IMAGENET1K_V1, + ) +) +def resnext101_32x8d( + *, + weights: Optional[Union[ResNeXt101_32X8D_QuantizedWeights, ResNeXt101_32X8D_Weights]] = None, + progress: bool = True, + quantize: bool = False, + **kwargs: Any, +) -> QuantizableResNet: + """ResNeXt-101 32x8d model from + `Aggregated Residual Transformation for Deep Neural Networks `_ + + .. note:: + Note that ``quantize = True`` returns a quantized model with 8 bit + weights. Quantized models only support inference and run on CPUs. + GPU inference is not yet supported. + + Args: + weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The + pretrained weights for the model. See + :class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + quantize (bool, optional): If True, return a quantized version of the model. Default is False. + **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights + :members: + + .. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights + :members: + :noindex: + """ + weights = (ResNeXt101_32X8D_QuantizedWeights if quantize else ResNeXt101_32X8D_Weights).verify(weights) + + _ovewrite_named_param(kwargs, "groups", 32) + _ovewrite_named_param(kwargs, "width_per_group", 8) + return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs) + + +@register_model(name="quantized_resnext101_64x4d") +@handle_legacy_interface( + weights=( + "pretrained", + lambda kwargs: ResNeXt101_64X4D_QuantizedWeights.IMAGENET1K_FBGEMM_V1 + if kwargs.get("quantize", False) + else ResNeXt101_64X4D_Weights.IMAGENET1K_V1, + ) +) +def resnext101_64x4d( + *, + weights: Optional[Union[ResNeXt101_64X4D_QuantizedWeights, ResNeXt101_64X4D_Weights]] = None, + progress: bool = True, + quantize: bool = False, + **kwargs: Any, +) -> QuantizableResNet: + """ResNeXt-101 64x4d model from + `Aggregated Residual Transformation for Deep Neural Networks `_ + + .. note:: + Note that ``quantize = True`` returns a quantized model with 8 bit + weights. Quantized models only support inference and run on CPUs. + GPU inference is not yet supported. + + Args: + weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The + pretrained weights for the model. See + :class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + quantize (bool, optional): If True, return a quantized version of the model. Default is False. + **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights + :members: + + .. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights + :members: + :noindex: + """ + weights = (ResNeXt101_64X4D_QuantizedWeights if quantize else ResNeXt101_64X4D_Weights).verify(weights) + + _ovewrite_named_param(kwargs, "groups", 64) + _ovewrite_named_param(kwargs, "width_per_group", 4) + return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs) diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/swin_transformer.py b/wemm/lib/python3.10/site-packages/torchvision/models/swin_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..249ca37b9d29170fc50462b940a578e7eb7e4bb9 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/swin_transformer.py @@ -0,0 +1,1033 @@ +import math +from functools import partial +from typing import Any, Callable, List, Optional + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +from ..ops.misc import MLP, Permute +from ..ops.stochastic_depth import StochasticDepth +from ..transforms._presets import ImageClassification, InterpolationMode +from ..utils import _log_api_usage_once +from ._api import register_model, Weights, WeightsEnum +from ._meta import _IMAGENET_CATEGORIES +from ._utils import _ovewrite_named_param, handle_legacy_interface + + +__all__ = [ + "SwinTransformer", + "Swin_T_Weights", + "Swin_S_Weights", + "Swin_B_Weights", + "Swin_V2_T_Weights", + "Swin_V2_S_Weights", + "Swin_V2_B_Weights", + "swin_t", + "swin_s", + "swin_b", + "swin_v2_t", + "swin_v2_s", + "swin_v2_b", +] + + +def _patch_merging_pad(x: torch.Tensor) -> torch.Tensor: + H, W, _ = x.shape[-3:] + x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) + x0 = x[..., 0::2, 0::2, :] # ... H/2 W/2 C + x1 = x[..., 1::2, 0::2, :] # ... H/2 W/2 C + x2 = x[..., 0::2, 1::2, :] # ... H/2 W/2 C + x3 = x[..., 1::2, 1::2, :] # ... H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # ... H/2 W/2 4*C + return x + + +torch.fx.wrap("_patch_merging_pad") + + +def _get_relative_position_bias( + relative_position_bias_table: torch.Tensor, relative_position_index: torch.Tensor, window_size: List[int] +) -> torch.Tensor: + N = window_size[0] * window_size[1] + relative_position_bias = relative_position_bias_table[relative_position_index] # type: ignore[index] + relative_position_bias = relative_position_bias.view(N, N, -1) + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous().unsqueeze(0) + return relative_position_bias + + +torch.fx.wrap("_get_relative_position_bias") + + +class PatchMerging(nn.Module): + """Patch Merging Layer. + Args: + dim (int): Number of input channels. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + """ + + def __init__(self, dim: int, norm_layer: Callable[..., nn.Module] = nn.LayerNorm): + super().__init__() + _log_api_usage_once(self) + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x: Tensor): + """ + Args: + x (Tensor): input tensor with expected layout of [..., H, W, C] + Returns: + Tensor with layout of [..., H/2, W/2, 2*C] + """ + x = _patch_merging_pad(x) + x = self.norm(x) + x = self.reduction(x) # ... H/2 W/2 2*C + return x + + +class PatchMergingV2(nn.Module): + """Patch Merging Layer for Swin Transformer V2. + Args: + dim (int): Number of input channels. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + """ + + def __init__(self, dim: int, norm_layer: Callable[..., nn.Module] = nn.LayerNorm): + super().__init__() + _log_api_usage_once(self) + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(2 * dim) # difference + + def forward(self, x: Tensor): + """ + Args: + x (Tensor): input tensor with expected layout of [..., H, W, C] + Returns: + Tensor with layout of [..., H/2, W/2, 2*C] + """ + x = _patch_merging_pad(x) + x = self.reduction(x) # ... H/2 W/2 2*C + x = self.norm(x) + return x + + +def shifted_window_attention( + input: Tensor, + qkv_weight: Tensor, + proj_weight: Tensor, + relative_position_bias: Tensor, + window_size: List[int], + num_heads: int, + shift_size: List[int], + attention_dropout: float = 0.0, + dropout: float = 0.0, + qkv_bias: Optional[Tensor] = None, + proj_bias: Optional[Tensor] = None, + logit_scale: Optional[torch.Tensor] = None, + training: bool = True, +) -> Tensor: + """ + Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + input (Tensor[N, H, W, C]): The input tensor or 4-dimensions. + qkv_weight (Tensor[in_dim, out_dim]): The weight tensor of query, key, value. + proj_weight (Tensor[out_dim, out_dim]): The weight tensor of projection. + relative_position_bias (Tensor): The learned relative position bias added to attention. + window_size (List[int]): Window size. + num_heads (int): Number of attention heads. + shift_size (List[int]): Shift size for shifted window attention. + attention_dropout (float): Dropout ratio of attention weight. Default: 0.0. + dropout (float): Dropout ratio of output. Default: 0.0. + qkv_bias (Tensor[out_dim], optional): The bias tensor of query, key, value. Default: None. + proj_bias (Tensor[out_dim], optional): The bias tensor of projection. Default: None. + logit_scale (Tensor[out_dim], optional): Logit scale of cosine attention for Swin Transformer V2. Default: None. + training (bool, optional): Training flag used by the dropout parameters. Default: True. + Returns: + Tensor[N, H, W, C]: The output tensor after shifted window attention. + """ + B, H, W, C = input.shape + # pad feature maps to multiples of window size + pad_r = (window_size[1] - W % window_size[1]) % window_size[1] + pad_b = (window_size[0] - H % window_size[0]) % window_size[0] + x = F.pad(input, (0, 0, 0, pad_r, 0, pad_b)) + _, pad_H, pad_W, _ = x.shape + + shift_size = shift_size.copy() + # If window size is larger than feature size, there is no need to shift window + if window_size[0] >= pad_H: + shift_size[0] = 0 + if window_size[1] >= pad_W: + shift_size[1] = 0 + + # cyclic shift + if sum(shift_size) > 0: + x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1]), dims=(1, 2)) + + # partition windows + num_windows = (pad_H // window_size[0]) * (pad_W // window_size[1]) + x = x.view(B, pad_H // window_size[0], window_size[0], pad_W // window_size[1], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).reshape(B * num_windows, window_size[0] * window_size[1], C) # B*nW, Ws*Ws, C + + # multi-head attention + if logit_scale is not None and qkv_bias is not None: + qkv_bias = qkv_bias.clone() + length = qkv_bias.numel() // 3 + qkv_bias[length : 2 * length].zero_() + qkv = F.linear(x, qkv_weight, qkv_bias) + qkv = qkv.reshape(x.size(0), x.size(1), 3, num_heads, C // num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + if logit_scale is not None: + # cosine attention + attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1) + logit_scale = torch.clamp(logit_scale, max=math.log(100.0)).exp() + attn = attn * logit_scale + else: + q = q * (C // num_heads) ** -0.5 + attn = q.matmul(k.transpose(-2, -1)) + # add relative position bias + attn = attn + relative_position_bias + + if sum(shift_size) > 0: + # generate attention mask + attn_mask = x.new_zeros((pad_H, pad_W)) + h_slices = ((0, -window_size[0]), (-window_size[0], -shift_size[0]), (-shift_size[0], None)) + w_slices = ((0, -window_size[1]), (-window_size[1], -shift_size[1]), (-shift_size[1], None)) + count = 0 + for h in h_slices: + for w in w_slices: + attn_mask[h[0] : h[1], w[0] : w[1]] = count + count += 1 + attn_mask = attn_mask.view(pad_H // window_size[0], window_size[0], pad_W // window_size[1], window_size[1]) + attn_mask = attn_mask.permute(0, 2, 1, 3).reshape(num_windows, window_size[0] * window_size[1]) + attn_mask = attn_mask.unsqueeze(1) - attn_mask.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + attn = attn.view(x.size(0) // num_windows, num_windows, num_heads, x.size(1), x.size(1)) + attn = attn + attn_mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, num_heads, x.size(1), x.size(1)) + + attn = F.softmax(attn, dim=-1) + attn = F.dropout(attn, p=attention_dropout, training=training) + + x = attn.matmul(v).transpose(1, 2).reshape(x.size(0), x.size(1), C) + x = F.linear(x, proj_weight, proj_bias) + x = F.dropout(x, p=dropout, training=training) + + # reverse windows + x = x.view(B, pad_H // window_size[0], pad_W // window_size[1], window_size[0], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, pad_H, pad_W, C) + + # reverse cyclic shift + if sum(shift_size) > 0: + x = torch.roll(x, shifts=(shift_size[0], shift_size[1]), dims=(1, 2)) + + # unpad features + x = x[:, :H, :W, :].contiguous() + return x + + +torch.fx.wrap("shifted_window_attention") + + +class ShiftedWindowAttention(nn.Module): + """ + See :func:`shifted_window_attention`. + """ + + def __init__( + self, + dim: int, + window_size: List[int], + shift_size: List[int], + num_heads: int, + qkv_bias: bool = True, + proj_bias: bool = True, + attention_dropout: float = 0.0, + dropout: float = 0.0, + ): + super().__init__() + if len(window_size) != 2 or len(shift_size) != 2: + raise ValueError("window_size and shift_size must be of length 2") + self.window_size = window_size + self.shift_size = shift_size + self.num_heads = num_heads + self.attention_dropout = attention_dropout + self.dropout = dropout + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim, bias=proj_bias) + + self.define_relative_position_bias_table() + self.define_relative_position_index() + + def define_relative_position_bias_table(self): + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), self.num_heads) + ) # 2*Wh-1 * 2*Ww-1, nH + nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02) + + def define_relative_position_index(self): + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing="ij")) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1).flatten() # Wh*Ww*Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + def get_relative_position_bias(self) -> torch.Tensor: + return _get_relative_position_bias( + self.relative_position_bias_table, self.relative_position_index, self.window_size # type: ignore[arg-type] + ) + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x (Tensor): Tensor with layout of [B, H, W, C] + Returns: + Tensor with same layout as input, i.e. [B, H, W, C] + """ + relative_position_bias = self.get_relative_position_bias() + return shifted_window_attention( + x, + self.qkv.weight, + self.proj.weight, + relative_position_bias, + self.window_size, + self.num_heads, + shift_size=self.shift_size, + attention_dropout=self.attention_dropout, + dropout=self.dropout, + qkv_bias=self.qkv.bias, + proj_bias=self.proj.bias, + training=self.training, + ) + + +class ShiftedWindowAttentionV2(ShiftedWindowAttention): + """ + See :func:`shifted_window_attention_v2`. + """ + + def __init__( + self, + dim: int, + window_size: List[int], + shift_size: List[int], + num_heads: int, + qkv_bias: bool = True, + proj_bias: bool = True, + attention_dropout: float = 0.0, + dropout: float = 0.0, + ): + super().__init__( + dim, + window_size, + shift_size, + num_heads, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + attention_dropout=attention_dropout, + dropout=dropout, + ) + + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) + # mlp to generate continuous relative position bias + self.cpb_mlp = nn.Sequential( + nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False) + ) + if qkv_bias: + length = self.qkv.bias.numel() // 3 + self.qkv.bias[length : 2 * length].data.zero_() + + def define_relative_position_bias_table(self): + # get relative_coords_table + relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32) + relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32) + relative_coords_table = torch.stack(torch.meshgrid([relative_coords_h, relative_coords_w], indexing="ij")) + relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + + relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1 + relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1 + + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = ( + torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / 3.0 + ) + self.register_buffer("relative_coords_table", relative_coords_table) + + def get_relative_position_bias(self) -> torch.Tensor: + relative_position_bias = _get_relative_position_bias( + self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads), + self.relative_position_index, # type: ignore[arg-type] + self.window_size, + ) + relative_position_bias = 16 * torch.sigmoid(relative_position_bias) + return relative_position_bias + + def forward(self, x: Tensor): + """ + Args: + x (Tensor): Tensor with layout of [B, H, W, C] + Returns: + Tensor with same layout as input, i.e. [B, H, W, C] + """ + relative_position_bias = self.get_relative_position_bias() + return shifted_window_attention( + x, + self.qkv.weight, + self.proj.weight, + relative_position_bias, + self.window_size, + self.num_heads, + shift_size=self.shift_size, + attention_dropout=self.attention_dropout, + dropout=self.dropout, + qkv_bias=self.qkv.bias, + proj_bias=self.proj.bias, + logit_scale=self.logit_scale, + training=self.training, + ) + + +class SwinTransformerBlock(nn.Module): + """ + Swin Transformer Block. + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (List[int]): Window size. + shift_size (List[int]): Shift size for shifted window attention. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0. + dropout (float): Dropout rate. Default: 0.0. + attention_dropout (float): Attention dropout rate. Default: 0.0. + stochastic_depth_prob: (float): Stochastic depth rate. Default: 0.0. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + attn_layer (nn.Module): Attention layer. Default: ShiftedWindowAttention + """ + + def __init__( + self, + dim: int, + num_heads: int, + window_size: List[int], + shift_size: List[int], + mlp_ratio: float = 4.0, + dropout: float = 0.0, + attention_dropout: float = 0.0, + stochastic_depth_prob: float = 0.0, + norm_layer: Callable[..., nn.Module] = nn.LayerNorm, + attn_layer: Callable[..., nn.Module] = ShiftedWindowAttention, + ): + super().__init__() + _log_api_usage_once(self) + + self.norm1 = norm_layer(dim) + self.attn = attn_layer( + dim, + window_size, + shift_size, + num_heads, + attention_dropout=attention_dropout, + dropout=dropout, + ) + self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row") + self.norm2 = norm_layer(dim) + self.mlp = MLP(dim, [int(dim * mlp_ratio), dim], activation_layer=nn.GELU, inplace=None, dropout=dropout) + + for m in self.mlp.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + nn.init.normal_(m.bias, std=1e-6) + + def forward(self, x: Tensor): + x = x + self.stochastic_depth(self.attn(self.norm1(x))) + x = x + self.stochastic_depth(self.mlp(self.norm2(x))) + return x + + +class SwinTransformerBlockV2(SwinTransformerBlock): + """ + Swin Transformer V2 Block. + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (List[int]): Window size. + shift_size (List[int]): Shift size for shifted window attention. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0. + dropout (float): Dropout rate. Default: 0.0. + attention_dropout (float): Attention dropout rate. Default: 0.0. + stochastic_depth_prob: (float): Stochastic depth rate. Default: 0.0. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + attn_layer (nn.Module): Attention layer. Default: ShiftedWindowAttentionV2. + """ + + def __init__( + self, + dim: int, + num_heads: int, + window_size: List[int], + shift_size: List[int], + mlp_ratio: float = 4.0, + dropout: float = 0.0, + attention_dropout: float = 0.0, + stochastic_depth_prob: float = 0.0, + norm_layer: Callable[..., nn.Module] = nn.LayerNorm, + attn_layer: Callable[..., nn.Module] = ShiftedWindowAttentionV2, + ): + super().__init__( + dim, + num_heads, + window_size, + shift_size, + mlp_ratio=mlp_ratio, + dropout=dropout, + attention_dropout=attention_dropout, + stochastic_depth_prob=stochastic_depth_prob, + norm_layer=norm_layer, + attn_layer=attn_layer, + ) + + def forward(self, x: Tensor): + # Here is the difference, we apply norm after the attention in V2. + # In V1 we applied norm before the attention. + x = x + self.stochastic_depth(self.norm1(self.attn(x))) + x = x + self.stochastic_depth(self.norm2(self.mlp(x))) + return x + + +class SwinTransformer(nn.Module): + """ + Implements Swin Transformer from the `"Swin Transformer: Hierarchical Vision Transformer using + Shifted Windows" `_ paper. + Args: + patch_size (List[int]): Patch size. + embed_dim (int): Patch embedding dimension. + depths (List(int)): Depth of each Swin Transformer layer. + num_heads (List(int)): Number of attention heads in different layers. + window_size (List[int]): Window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0. + dropout (float): Dropout rate. Default: 0.0. + attention_dropout (float): Attention dropout rate. Default: 0.0. + stochastic_depth_prob (float): Stochastic depth rate. Default: 0.1. + num_classes (int): Number of classes for classification head. Default: 1000. + block (nn.Module, optional): SwinTransformer Block. Default: None. + norm_layer (nn.Module, optional): Normalization layer. Default: None. + downsample_layer (nn.Module): Downsample layer (patch merging). Default: PatchMerging. + """ + + def __init__( + self, + patch_size: List[int], + embed_dim: int, + depths: List[int], + num_heads: List[int], + window_size: List[int], + mlp_ratio: float = 4.0, + dropout: float = 0.0, + attention_dropout: float = 0.0, + stochastic_depth_prob: float = 0.1, + num_classes: int = 1000, + norm_layer: Optional[Callable[..., nn.Module]] = None, + block: Optional[Callable[..., nn.Module]] = None, + downsample_layer: Callable[..., nn.Module] = PatchMerging, + ): + super().__init__() + _log_api_usage_once(self) + self.num_classes = num_classes + + if block is None: + block = SwinTransformerBlock + if norm_layer is None: + norm_layer = partial(nn.LayerNorm, eps=1e-5) + + layers: List[nn.Module] = [] + # split image into non-overlapping patches + layers.append( + nn.Sequential( + nn.Conv2d( + 3, embed_dim, kernel_size=(patch_size[0], patch_size[1]), stride=(patch_size[0], patch_size[1]) + ), + Permute([0, 2, 3, 1]), + norm_layer(embed_dim), + ) + ) + + total_stage_blocks = sum(depths) + stage_block_id = 0 + # build SwinTransformer blocks + for i_stage in range(len(depths)): + stage: List[nn.Module] = [] + dim = embed_dim * 2**i_stage + for i_layer in range(depths[i_stage]): + # adjust stochastic depth probability based on the depth of the stage block + sd_prob = stochastic_depth_prob * float(stage_block_id) / (total_stage_blocks - 1) + stage.append( + block( + dim, + num_heads[i_stage], + window_size=window_size, + shift_size=[0 if i_layer % 2 == 0 else w // 2 for w in window_size], + mlp_ratio=mlp_ratio, + dropout=dropout, + attention_dropout=attention_dropout, + stochastic_depth_prob=sd_prob, + norm_layer=norm_layer, + ) + ) + stage_block_id += 1 + layers.append(nn.Sequential(*stage)) + # add patch merging layer + if i_stage < (len(depths) - 1): + layers.append(downsample_layer(dim, norm_layer)) + self.features = nn.Sequential(*layers) + + num_features = embed_dim * 2 ** (len(depths) - 1) + self.norm = norm_layer(num_features) + self.permute = Permute([0, 3, 1, 2]) # B H W C -> B C H W + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.flatten = nn.Flatten(1) + self.head = nn.Linear(num_features, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.features(x) + x = self.norm(x) + x = self.permute(x) + x = self.avgpool(x) + x = self.flatten(x) + x = self.head(x) + return x + + +def _swin_transformer( + patch_size: List[int], + embed_dim: int, + depths: List[int], + num_heads: List[int], + window_size: List[int], + stochastic_depth_prob: float, + weights: Optional[WeightsEnum], + progress: bool, + **kwargs: Any, +) -> SwinTransformer: + if weights is not None: + _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"])) + + model = SwinTransformer( + patch_size=patch_size, + embed_dim=embed_dim, + depths=depths, + num_heads=num_heads, + window_size=window_size, + stochastic_depth_prob=stochastic_depth_prob, + **kwargs, + ) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model + + +_COMMON_META = { + "categories": _IMAGENET_CATEGORIES, +} + + +class Swin_T_Weights(WeightsEnum): + IMAGENET1K_V1 = Weights( + url="https://download.pytorch.org/models/swin_t-704ceda3.pth", + transforms=partial( + ImageClassification, crop_size=224, resize_size=232, interpolation=InterpolationMode.BICUBIC + ), + meta={ + **_COMMON_META, + "num_params": 28288354, + "min_size": (224, 224), + "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer", + "_metrics": { + "ImageNet-1K": { + "acc@1": 81.474, + "acc@5": 95.776, + } + }, + "_ops": 4.491, + "_file_size": 108.19, + "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", + }, + ) + DEFAULT = IMAGENET1K_V1 + + +class Swin_S_Weights(WeightsEnum): + IMAGENET1K_V1 = Weights( + url="https://download.pytorch.org/models/swin_s-5e29d889.pth", + transforms=partial( + ImageClassification, crop_size=224, resize_size=246, interpolation=InterpolationMode.BICUBIC + ), + meta={ + **_COMMON_META, + "num_params": 49606258, + "min_size": (224, 224), + "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer", + "_metrics": { + "ImageNet-1K": { + "acc@1": 83.196, + "acc@5": 96.360, + } + }, + "_ops": 8.741, + "_file_size": 189.786, + "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", + }, + ) + DEFAULT = IMAGENET1K_V1 + + +class Swin_B_Weights(WeightsEnum): + IMAGENET1K_V1 = Weights( + url="https://download.pytorch.org/models/swin_b-68c6b09e.pth", + transforms=partial( + ImageClassification, crop_size=224, resize_size=238, interpolation=InterpolationMode.BICUBIC + ), + meta={ + **_COMMON_META, + "num_params": 87768224, + "min_size": (224, 224), + "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer", + "_metrics": { + "ImageNet-1K": { + "acc@1": 83.582, + "acc@5": 96.640, + } + }, + "_ops": 15.431, + "_file_size": 335.364, + "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", + }, + ) + DEFAULT = IMAGENET1K_V1 + + +class Swin_V2_T_Weights(WeightsEnum): + IMAGENET1K_V1 = Weights( + url="https://download.pytorch.org/models/swin_v2_t-b137f0e2.pth", + transforms=partial( + ImageClassification, crop_size=256, resize_size=260, interpolation=InterpolationMode.BICUBIC + ), + meta={ + **_COMMON_META, + "num_params": 28351570, + "min_size": (256, 256), + "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer-v2", + "_metrics": { + "ImageNet-1K": { + "acc@1": 82.072, + "acc@5": 96.132, + } + }, + "_ops": 5.94, + "_file_size": 108.626, + "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", + }, + ) + DEFAULT = IMAGENET1K_V1 + + +class Swin_V2_S_Weights(WeightsEnum): + IMAGENET1K_V1 = Weights( + url="https://download.pytorch.org/models/swin_v2_s-637d8ceb.pth", + transforms=partial( + ImageClassification, crop_size=256, resize_size=260, interpolation=InterpolationMode.BICUBIC + ), + meta={ + **_COMMON_META, + "num_params": 49737442, + "min_size": (256, 256), + "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer-v2", + "_metrics": { + "ImageNet-1K": { + "acc@1": 83.712, + "acc@5": 96.816, + } + }, + "_ops": 11.546, + "_file_size": 190.675, + "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", + }, + ) + DEFAULT = IMAGENET1K_V1 + + +class Swin_V2_B_Weights(WeightsEnum): + IMAGENET1K_V1 = Weights( + url="https://download.pytorch.org/models/swin_v2_b-781e5279.pth", + transforms=partial( + ImageClassification, crop_size=256, resize_size=272, interpolation=InterpolationMode.BICUBIC + ), + meta={ + **_COMMON_META, + "num_params": 87930848, + "min_size": (256, 256), + "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#swintransformer-v2", + "_metrics": { + "ImageNet-1K": { + "acc@1": 84.112, + "acc@5": 96.864, + } + }, + "_ops": 20.325, + "_file_size": 336.372, + "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", + }, + ) + DEFAULT = IMAGENET1K_V1 + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin_T_Weights.IMAGENET1K_V1)) +def swin_t(*, weights: Optional[Swin_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: + """ + Constructs a swin_tiny architecture from + `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. + + Args: + weights (:class:`~torchvision.models.Swin_T_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.Swin_T_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.Swin_T_Weights + :members: + """ + weights = Swin_T_Weights.verify(weights) + + return _swin_transformer( + patch_size=[4, 4], + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=[7, 7], + stochastic_depth_prob=0.2, + weights=weights, + progress=progress, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin_S_Weights.IMAGENET1K_V1)) +def swin_s(*, weights: Optional[Swin_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: + """ + Constructs a swin_small architecture from + `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. + + Args: + weights (:class:`~torchvision.models.Swin_S_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.Swin_S_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.Swin_S_Weights + :members: + """ + weights = Swin_S_Weights.verify(weights) + + return _swin_transformer( + patch_size=[4, 4], + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=[7, 7], + stochastic_depth_prob=0.3, + weights=weights, + progress=progress, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin_B_Weights.IMAGENET1K_V1)) +def swin_b(*, weights: Optional[Swin_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: + """ + Constructs a swin_base architecture from + `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. + + Args: + weights (:class:`~torchvision.models.Swin_B_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.Swin_B_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.Swin_B_Weights + :members: + """ + weights = Swin_B_Weights.verify(weights) + + return _swin_transformer( + patch_size=[4, 4], + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=[7, 7], + stochastic_depth_prob=0.5, + weights=weights, + progress=progress, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin_V2_T_Weights.IMAGENET1K_V1)) +def swin_v2_t(*, weights: Optional[Swin_V2_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: + """ + Constructs a swin_v2_tiny architecture from + `Swin Transformer V2: Scaling Up Capacity and Resolution `_. + + Args: + weights (:class:`~torchvision.models.Swin_V2_T_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.Swin_V2_T_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.Swin_V2_T_Weights + :members: + """ + weights = Swin_V2_T_Weights.verify(weights) + + return _swin_transformer( + patch_size=[4, 4], + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=[8, 8], + stochastic_depth_prob=0.2, + weights=weights, + progress=progress, + block=SwinTransformerBlockV2, + downsample_layer=PatchMergingV2, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin_V2_S_Weights.IMAGENET1K_V1)) +def swin_v2_s(*, weights: Optional[Swin_V2_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: + """ + Constructs a swin_v2_small architecture from + `Swin Transformer V2: Scaling Up Capacity and Resolution `_. + + Args: + weights (:class:`~torchvision.models.Swin_V2_S_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.Swin_V2_S_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.Swin_V2_S_Weights + :members: + """ + weights = Swin_V2_S_Weights.verify(weights) + + return _swin_transformer( + patch_size=[4, 4], + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=[8, 8], + stochastic_depth_prob=0.3, + weights=weights, + progress=progress, + block=SwinTransformerBlockV2, + downsample_layer=PatchMergingV2, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin_V2_B_Weights.IMAGENET1K_V1)) +def swin_v2_b(*, weights: Optional[Swin_V2_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: + """ + Constructs a swin_v2_base architecture from + `Swin Transformer V2: Scaling Up Capacity and Resolution `_. + + Args: + weights (:class:`~torchvision.models.Swin_V2_B_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.Swin_V2_B_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.Swin_V2_B_Weights + :members: + """ + weights = Swin_V2_B_Weights.verify(weights) + + return _swin_transformer( + patch_size=[4, 4], + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=[8, 8], + stochastic_depth_prob=0.5, + weights=weights, + progress=progress, + block=SwinTransformerBlockV2, + downsample_layer=PatchMergingV2, + **kwargs, + ) diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6ba3e5c4fee55ed77b428093445f357d07b0b17 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/s3d.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/s3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..877ff85fa3670624edd3d45331940addcdbcc2dd Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/s3d.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/swin_transformer.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/swin_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dc7660f602018a60f038889ed33eb1c88994c24 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/models/video/__pycache__/swin_transformer.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/video/resnet.py b/wemm/lib/python3.10/site-packages/torchvision/models/video/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc46a8eda7cbbac7f47937567f3ae4596e1567c --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/video/resnet.py @@ -0,0 +1,503 @@ +from functools import partial +from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union + +import torch.nn as nn +from torch import Tensor + +from ...transforms._presets import VideoClassification +from ...utils import _log_api_usage_once +from .._api import register_model, Weights, WeightsEnum +from .._meta import _KINETICS400_CATEGORIES +from .._utils import _ovewrite_named_param, handle_legacy_interface + + +__all__ = [ + "VideoResNet", + "R3D_18_Weights", + "MC3_18_Weights", + "R2Plus1D_18_Weights", + "r3d_18", + "mc3_18", + "r2plus1d_18", +] + + +class Conv3DSimple(nn.Conv3d): + def __init__( + self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1 + ) -> None: + + super().__init__( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=(3, 3, 3), + stride=stride, + padding=padding, + bias=False, + ) + + @staticmethod + def get_downsample_stride(stride: int) -> Tuple[int, int, int]: + return stride, stride, stride + + +class Conv2Plus1D(nn.Sequential): + def __init__(self, in_planes: int, out_planes: int, midplanes: int, stride: int = 1, padding: int = 1) -> None: + super().__init__( + nn.Conv3d( + in_planes, + midplanes, + kernel_size=(1, 3, 3), + stride=(1, stride, stride), + padding=(0, padding, padding), + bias=False, + ), + nn.BatchNorm3d(midplanes), + nn.ReLU(inplace=True), + nn.Conv3d( + midplanes, out_planes, kernel_size=(3, 1, 1), stride=(stride, 1, 1), padding=(padding, 0, 0), bias=False + ), + ) + + @staticmethod + def get_downsample_stride(stride: int) -> Tuple[int, int, int]: + return stride, stride, stride + + +class Conv3DNoTemporal(nn.Conv3d): + def __init__( + self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1 + ) -> None: + + super().__init__( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=(1, 3, 3), + stride=(1, stride, stride), + padding=(0, padding, padding), + bias=False, + ) + + @staticmethod + def get_downsample_stride(stride: int) -> Tuple[int, int, int]: + return 1, stride, stride + + +class BasicBlock(nn.Module): + + expansion = 1 + + def __init__( + self, + inplanes: int, + planes: int, + conv_builder: Callable[..., nn.Module], + stride: int = 1, + downsample: Optional[nn.Module] = None, + ) -> None: + midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) + + super().__init__() + self.conv1 = nn.Sequential( + conv_builder(inplanes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True) + ) + self.conv2 = nn.Sequential(conv_builder(planes, planes, midplanes), nn.BatchNorm3d(planes)) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + residual = x + + out = self.conv1(x) + out = self.conv2(out) + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__( + self, + inplanes: int, + planes: int, + conv_builder: Callable[..., nn.Module], + stride: int = 1, + downsample: Optional[nn.Module] = None, + ) -> None: + + super().__init__() + midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) + + # 1x1x1 + self.conv1 = nn.Sequential( + nn.Conv3d(inplanes, planes, kernel_size=1, bias=False), nn.BatchNorm3d(planes), nn.ReLU(inplace=True) + ) + # Second kernel + self.conv2 = nn.Sequential( + conv_builder(planes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True) + ) + + # 1x1x1 + self.conv3 = nn.Sequential( + nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False), + nn.BatchNorm3d(planes * self.expansion), + ) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + residual = x + + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class BasicStem(nn.Sequential): + """The default conv-batchnorm-relu stem""" + + def __init__(self) -> None: + super().__init__( + nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False), + nn.BatchNorm3d(64), + nn.ReLU(inplace=True), + ) + + +class R2Plus1dStem(nn.Sequential): + """R(2+1)D stem is different than the default one as it uses separated 3D convolution""" + + def __init__(self) -> None: + super().__init__( + nn.Conv3d(3, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False), + nn.BatchNorm3d(45), + nn.ReLU(inplace=True), + nn.Conv3d(45, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False), + nn.BatchNorm3d(64), + nn.ReLU(inplace=True), + ) + + +class VideoResNet(nn.Module): + def __init__( + self, + block: Type[Union[BasicBlock, Bottleneck]], + conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]], + layers: List[int], + stem: Callable[..., nn.Module], + num_classes: int = 400, + zero_init_residual: bool = False, + ) -> None: + """Generic resnet video generator. + + Args: + block (Type[Union[BasicBlock, Bottleneck]]): resnet building block + conv_makers (List[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]]): generator + function for each layer + layers (List[int]): number of blocks per layer + stem (Callable[..., nn.Module]): module specifying the ResNet stem. + num_classes (int, optional): Dimension of the final FC layer. Defaults to 400. + zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False. + """ + super().__init__() + _log_api_usage_once(self) + self.inplanes = 64 + + self.stem = stem() + + self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1) + self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2) + + self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + # init weights + for m in self.modules(): + if isinstance(m, nn.Conv3d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm3d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) # type: ignore[union-attr, arg-type] + + def forward(self, x: Tensor) -> Tensor: + x = self.stem(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + # Flatten the layer to fc + x = x.flatten(1) + x = self.fc(x) + + return x + + def _make_layer( + self, + block: Type[Union[BasicBlock, Bottleneck]], + conv_builder: Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]], + planes: int, + blocks: int, + stride: int = 1, + ) -> nn.Sequential: + downsample = None + + if stride != 1 or self.inplanes != planes * block.expansion: + ds_stride = conv_builder.get_downsample_stride(stride) + downsample = nn.Sequential( + nn.Conv3d(self.inplanes, planes * block.expansion, kernel_size=1, stride=ds_stride, bias=False), + nn.BatchNorm3d(planes * block.expansion), + ) + layers = [] + layers.append(block(self.inplanes, planes, conv_builder, stride, downsample)) + + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, conv_builder)) + + return nn.Sequential(*layers) + + +def _video_resnet( + block: Type[Union[BasicBlock, Bottleneck]], + conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]], + layers: List[int], + stem: Callable[..., nn.Module], + weights: Optional[WeightsEnum], + progress: bool, + **kwargs: Any, +) -> VideoResNet: + if weights is not None: + _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"])) + + model = VideoResNet(block, conv_makers, layers, stem, **kwargs) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model + + +_COMMON_META = { + "min_size": (1, 1), + "categories": _KINETICS400_CATEGORIES, + "recipe": "https://github.com/pytorch/vision/tree/main/references/video_classification", + "_docs": ( + "The weights reproduce closely the accuracy of the paper. The accuracies are estimated on video-level " + "with parameters `frame_rate=15`, `clips_per_video=5`, and `clip_len=16`." + ), +} + + +class R3D_18_Weights(WeightsEnum): + KINETICS400_V1 = Weights( + url="https://download.pytorch.org/models/r3d_18-b3b3357e.pth", + transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)), + meta={ + **_COMMON_META, + "num_params": 33371472, + "_metrics": { + "Kinetics-400": { + "acc@1": 63.200, + "acc@5": 83.479, + } + }, + "_ops": 40.697, + "_file_size": 127.359, + }, + ) + DEFAULT = KINETICS400_V1 + + +class MC3_18_Weights(WeightsEnum): + KINETICS400_V1 = Weights( + url="https://download.pytorch.org/models/mc3_18-a90a0ba3.pth", + transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)), + meta={ + **_COMMON_META, + "num_params": 11695440, + "_metrics": { + "Kinetics-400": { + "acc@1": 63.960, + "acc@5": 84.130, + } + }, + "_ops": 43.343, + "_file_size": 44.672, + }, + ) + DEFAULT = KINETICS400_V1 + + +class R2Plus1D_18_Weights(WeightsEnum): + KINETICS400_V1 = Weights( + url="https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth", + transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)), + meta={ + **_COMMON_META, + "num_params": 31505325, + "_metrics": { + "Kinetics-400": { + "acc@1": 67.463, + "acc@5": 86.175, + } + }, + "_ops": 40.519, + "_file_size": 120.318, + }, + ) + DEFAULT = KINETICS400_V1 + + +@register_model() +@handle_legacy_interface(weights=("pretrained", R3D_18_Weights.KINETICS400_V1)) +def r3d_18(*, weights: Optional[R3D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet: + """Construct 18 layer Resnet3D model. + + .. betastatus:: video module + + Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition `__. + + Args: + weights (:class:`~torchvision.models.video.R3D_18_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.video.R3D_18_Weights` + below for more details, and possible values. By default, no + pre-trained weights are used. + progress (bool): If True, displays a progress bar of the download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class. + Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.video.R3D_18_Weights + :members: + """ + weights = R3D_18_Weights.verify(weights) + + return _video_resnet( + BasicBlock, + [Conv3DSimple] * 4, + [2, 2, 2, 2], + BasicStem, + weights, + progress, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", MC3_18_Weights.KINETICS400_V1)) +def mc3_18(*, weights: Optional[MC3_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet: + """Construct 18 layer Mixed Convolution network as in + + .. betastatus:: video module + + Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition `__. + + Args: + weights (:class:`~torchvision.models.video.MC3_18_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.video.MC3_18_Weights` + below for more details, and possible values. By default, no + pre-trained weights are used. + progress (bool): If True, displays a progress bar of the download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class. + Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.video.MC3_18_Weights + :members: + """ + weights = MC3_18_Weights.verify(weights) + + return _video_resnet( + BasicBlock, + [Conv3DSimple] + [Conv3DNoTemporal] * 3, # type: ignore[list-item] + [2, 2, 2, 2], + BasicStem, + weights, + progress, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", R2Plus1D_18_Weights.KINETICS400_V1)) +def r2plus1d_18(*, weights: Optional[R2Plus1D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet: + """Construct 18 layer deep R(2+1)D network as in + + .. betastatus:: video module + + Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition `__. + + Args: + weights (:class:`~torchvision.models.video.R2Plus1D_18_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.video.R2Plus1D_18_Weights` + below for more details, and possible values. By default, no + pre-trained weights are used. + progress (bool): If True, displays a progress bar of the download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class. + Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.video.R2Plus1D_18_Weights + :members: + """ + weights = R2Plus1D_18_Weights.verify(weights) + + return _video_resnet( + BasicBlock, + [Conv2Plus1D] * 4, + [2, 2, 2, 2], + R2Plus1dStem, + weights, + progress, + **kwargs, + ) + + +# The dictionary below is internal implementation detail and will be removed in v0.15 +from .._utils import _ModelURLs + + +model_urls = _ModelURLs( + { + "r3d_18": R3D_18_Weights.KINETICS400_V1.url, + "mc3_18": MC3_18_Weights.KINETICS400_V1.url, + "r2plus1d_18": R2Plus1D_18_Weights.KINETICS400_V1.url, + } +) diff --git a/wemm/lib/python3.10/site-packages/torchvision/models/video/swin_transformer.py b/wemm/lib/python3.10/site-packages/torchvision/models/video/swin_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..25cf3cf997e7be5ef2c94823f580d3400649287c --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchvision/models/video/swin_transformer.py @@ -0,0 +1,743 @@ +# Modified from 2d Swin Transformers in torchvision: +# https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py + +from functools import partial +from typing import Any, Callable, List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +from ...transforms._presets import VideoClassification + +from ...utils import _log_api_usage_once + +from .._api import register_model, Weights, WeightsEnum + +from .._meta import _KINETICS400_CATEGORIES +from .._utils import _ovewrite_named_param, handle_legacy_interface +from ..swin_transformer import PatchMerging, SwinTransformerBlock + +__all__ = [ + "SwinTransformer3d", + "Swin3D_T_Weights", + "Swin3D_S_Weights", + "Swin3D_B_Weights", + "swin3d_t", + "swin3d_s", + "swin3d_b", +] + + +def _get_window_and_shift_size( + shift_size: List[int], size_dhw: List[int], window_size: List[int] +) -> Tuple[List[int], List[int]]: + for i in range(3): + if size_dhw[i] <= window_size[i]: + # In this case, window_size will adapt to the input size, and no need to shift + window_size[i] = size_dhw[i] + shift_size[i] = 0 + + return window_size, shift_size + + +torch.fx.wrap("_get_window_and_shift_size") + + +def _get_relative_position_bias( + relative_position_bias_table: torch.Tensor, relative_position_index: torch.Tensor, window_size: List[int] +) -> Tensor: + window_vol = window_size[0] * window_size[1] * window_size[2] + # In 3d case we flatten the relative_position_bias + relative_position_bias = relative_position_bias_table[ + relative_position_index[:window_vol, :window_vol].flatten() # type: ignore[index] + ] + relative_position_bias = relative_position_bias.view(window_vol, window_vol, -1) + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous().unsqueeze(0) + return relative_position_bias + + +torch.fx.wrap("_get_relative_position_bias") + + +def _compute_pad_size_3d(size_dhw: Tuple[int, int, int], patch_size: Tuple[int, int, int]) -> Tuple[int, int, int]: + pad_size = [(patch_size[i] - size_dhw[i] % patch_size[i]) % patch_size[i] for i in range(3)] + return pad_size[0], pad_size[1], pad_size[2] + + +torch.fx.wrap("_compute_pad_size_3d") + + +def _compute_attention_mask_3d( + x: Tensor, + size_dhw: Tuple[int, int, int], + window_size: Tuple[int, int, int], + shift_size: Tuple[int, int, int], +) -> Tensor: + # generate attention mask + attn_mask = x.new_zeros(*size_dhw) + num_windows = (size_dhw[0] // window_size[0]) * (size_dhw[1] // window_size[1]) * (size_dhw[2] // window_size[2]) + slices = [ + ( + (0, -window_size[i]), + (-window_size[i], -shift_size[i]), + (-shift_size[i], None), + ) + for i in range(3) + ] + count = 0 + for d in slices[0]: + for h in slices[1]: + for w in slices[2]: + attn_mask[d[0] : d[1], h[0] : h[1], w[0] : w[1]] = count + count += 1 + + # Partition window on attn_mask + attn_mask = attn_mask.view( + size_dhw[0] // window_size[0], + window_size[0], + size_dhw[1] // window_size[1], + window_size[1], + size_dhw[2] // window_size[2], + window_size[2], + ) + attn_mask = attn_mask.permute(0, 2, 4, 1, 3, 5).reshape( + num_windows, window_size[0] * window_size[1] * window_size[2] + ) + attn_mask = attn_mask.unsqueeze(1) - attn_mask.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + return attn_mask + + +torch.fx.wrap("_compute_attention_mask_3d") + + +def shifted_window_attention_3d( + input: Tensor, + qkv_weight: Tensor, + proj_weight: Tensor, + relative_position_bias: Tensor, + window_size: List[int], + num_heads: int, + shift_size: List[int], + attention_dropout: float = 0.0, + dropout: float = 0.0, + qkv_bias: Optional[Tensor] = None, + proj_bias: Optional[Tensor] = None, + training: bool = True, +) -> Tensor: + """ + Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + input (Tensor[B, T, H, W, C]): The input tensor, 5-dimensions. + qkv_weight (Tensor[in_dim, out_dim]): The weight tensor of query, key, value. + proj_weight (Tensor[out_dim, out_dim]): The weight tensor of projection. + relative_position_bias (Tensor): The learned relative position bias added to attention. + window_size (List[int]): 3-dimensions window size, T, H, W . + num_heads (int): Number of attention heads. + shift_size (List[int]): Shift size for shifted window attention (T, H, W). + attention_dropout (float): Dropout ratio of attention weight. Default: 0.0. + dropout (float): Dropout ratio of output. Default: 0.0. + qkv_bias (Tensor[out_dim], optional): The bias tensor of query, key, value. Default: None. + proj_bias (Tensor[out_dim], optional): The bias tensor of projection. Default: None. + training (bool, optional): Training flag used by the dropout parameters. Default: True. + Returns: + Tensor[B, T, H, W, C]: The output tensor after shifted window attention. + """ + b, t, h, w, c = input.shape + # pad feature maps to multiples of window size + pad_size = _compute_pad_size_3d((t, h, w), (window_size[0], window_size[1], window_size[2])) + x = F.pad(input, (0, 0, 0, pad_size[2], 0, pad_size[1], 0, pad_size[0])) + _, tp, hp, wp, _ = x.shape + padded_size = (tp, hp, wp) + + # cyclic shift + if sum(shift_size) > 0: + x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3)) + + # partition windows + num_windows = ( + (padded_size[0] // window_size[0]) * (padded_size[1] // window_size[1]) * (padded_size[2] // window_size[2]) + ) + x = x.view( + b, + padded_size[0] // window_size[0], + window_size[0], + padded_size[1] // window_size[1], + window_size[1], + padded_size[2] // window_size[2], + window_size[2], + c, + ) + x = x.permute(0, 1, 3, 5, 2, 4, 6, 7).reshape( + b * num_windows, window_size[0] * window_size[1] * window_size[2], c + ) # B*nW, Wd*Wh*Ww, C + + # multi-head attention + qkv = F.linear(x, qkv_weight, qkv_bias) + qkv = qkv.reshape(x.size(0), x.size(1), 3, num_heads, c // num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + q = q * (c // num_heads) ** -0.5 + attn = q.matmul(k.transpose(-2, -1)) + # add relative position bias + attn = attn + relative_position_bias + + if sum(shift_size) > 0: + # generate attention mask to handle shifted windows with varying size + attn_mask = _compute_attention_mask_3d( + x, + (padded_size[0], padded_size[1], padded_size[2]), + (window_size[0], window_size[1], window_size[2]), + (shift_size[0], shift_size[1], shift_size[2]), + ) + attn = attn.view(x.size(0) // num_windows, num_windows, num_heads, x.size(1), x.size(1)) + attn = attn + attn_mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, num_heads, x.size(1), x.size(1)) + + attn = F.softmax(attn, dim=-1) + attn = F.dropout(attn, p=attention_dropout, training=training) + + x = attn.matmul(v).transpose(1, 2).reshape(x.size(0), x.size(1), c) + x = F.linear(x, proj_weight, proj_bias) + x = F.dropout(x, p=dropout, training=training) + + # reverse windows + x = x.view( + b, + padded_size[0] // window_size[0], + padded_size[1] // window_size[1], + padded_size[2] // window_size[2], + window_size[0], + window_size[1], + window_size[2], + c, + ) + x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).reshape(b, tp, hp, wp, c) + + # reverse cyclic shift + if sum(shift_size) > 0: + x = torch.roll(x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3)) + + # unpad features + x = x[:, :t, :h, :w, :].contiguous() + return x + + +torch.fx.wrap("shifted_window_attention_3d") + + +class ShiftedWindowAttention3d(nn.Module): + """ + See :func:`shifted_window_attention_3d`. + """ + + def __init__( + self, + dim: int, + window_size: List[int], + shift_size: List[int], + num_heads: int, + qkv_bias: bool = True, + proj_bias: bool = True, + attention_dropout: float = 0.0, + dropout: float = 0.0, + ) -> None: + super().__init__() + if len(window_size) != 3 or len(shift_size) != 3: + raise ValueError("window_size and shift_size must be of length 2") + + self.window_size = window_size # Wd, Wh, Ww + self.shift_size = shift_size + self.num_heads = num_heads + self.attention_dropout = attention_dropout + self.dropout = dropout + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim, bias=proj_bias) + + self.define_relative_position_bias_table() + self.define_relative_position_index() + + def define_relative_position_bias_table(self) -> None: + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros( + (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1), + self.num_heads, + ) + ) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH + nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02) + + def define_relative_position_index(self) -> None: + # get pair-wise relative position index for each token inside the window + coords_dhw = [torch.arange(self.window_size[i]) for i in range(3)] + coords = torch.stack( + torch.meshgrid(coords_dhw[0], coords_dhw[1], coords_dhw[2], indexing="ij") + ) # 3, Wd, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Wd*Wh*Ww, Wd*Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wd*Wh*Ww, Wd*Wh*Ww, 3 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 2] += self.window_size[2] - 1 + + relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1) + relative_coords[:, :, 1] *= 2 * self.window_size[2] - 1 + # We don't flatten the relative_position_index here in 3d case. + relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + def get_relative_position_bias(self, window_size: List[int]) -> torch.Tensor: + return _get_relative_position_bias(self.relative_position_bias_table, self.relative_position_index, window_size) # type: ignore + + def forward(self, x: Tensor) -> Tensor: + _, t, h, w, _ = x.shape + size_dhw = [t, h, w] + window_size, shift_size = self.window_size.copy(), self.shift_size.copy() + # Handle case where window_size is larger than the input tensor + window_size, shift_size = _get_window_and_shift_size(shift_size, size_dhw, window_size) + + relative_position_bias = self.get_relative_position_bias(window_size) + + return shifted_window_attention_3d( + x, + self.qkv.weight, + self.proj.weight, + relative_position_bias, + window_size, + self.num_heads, + shift_size=shift_size, + attention_dropout=self.attention_dropout, + dropout=self.dropout, + qkv_bias=self.qkv.bias, + proj_bias=self.proj.bias, + training=self.training, + ) + + +# Modified from: +# https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/mmaction/models/backbones/swin_transformer.py +class PatchEmbed3d(nn.Module): + """Video to Patch Embedding. + + Args: + patch_size (List[int]): Patch token size. + in_channels (int): Number of input channels. Default: 3 + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__( + self, + patch_size: List[int], + in_channels: int = 3, + embed_dim: int = 96, + norm_layer: Optional[Callable[..., nn.Module]] = None, + ) -> None: + super().__init__() + _log_api_usage_once(self) + self.tuple_patch_size = (patch_size[0], patch_size[1], patch_size[2]) + + self.proj = nn.Conv3d( + in_channels, + embed_dim, + kernel_size=self.tuple_patch_size, + stride=self.tuple_patch_size, + ) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = nn.Identity() + + def forward(self, x: Tensor) -> Tensor: + """Forward function.""" + # padding + _, _, t, h, w = x.size() + pad_size = _compute_pad_size_3d((t, h, w), self.tuple_patch_size) + x = F.pad(x, (0, pad_size[2], 0, pad_size[1], 0, pad_size[0])) + x = self.proj(x) # B C T Wh Ww + x = x.permute(0, 2, 3, 4, 1) # B T Wh Ww C + if self.norm is not None: + x = self.norm(x) + return x + + +class SwinTransformer3d(nn.Module): + """ + Implements 3D Swin Transformer from the `"Video Swin Transformer" `_ paper. + Args: + patch_size (List[int]): Patch size. + embed_dim (int): Patch embedding dimension. + depths (List(int)): Depth of each Swin Transformer layer. + num_heads (List(int)): Number of attention heads in different layers. + window_size (List[int]): Window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0. + dropout (float): Dropout rate. Default: 0.0. + attention_dropout (float): Attention dropout rate. Default: 0.0. + stochastic_depth_prob (float): Stochastic depth rate. Default: 0.1. + num_classes (int): Number of classes for classification head. Default: 400. + norm_layer (nn.Module, optional): Normalization layer. Default: None. + block (nn.Module, optional): SwinTransformer Block. Default: None. + downsample_layer (nn.Module): Downsample layer (patch merging). Default: PatchMerging. + patch_embed (nn.Module, optional): Patch Embedding layer. Default: None. + """ + + def __init__( + self, + patch_size: List[int], + embed_dim: int, + depths: List[int], + num_heads: List[int], + window_size: List[int], + mlp_ratio: float = 4.0, + dropout: float = 0.0, + attention_dropout: float = 0.0, + stochastic_depth_prob: float = 0.1, + num_classes: int = 400, + norm_layer: Optional[Callable[..., nn.Module]] = None, + block: Optional[Callable[..., nn.Module]] = None, + downsample_layer: Callable[..., nn.Module] = PatchMerging, + patch_embed: Optional[Callable[..., nn.Module]] = None, + ) -> None: + super().__init__() + _log_api_usage_once(self) + self.num_classes = num_classes + + if block is None: + block = partial(SwinTransformerBlock, attn_layer=ShiftedWindowAttention3d) + + if norm_layer is None: + norm_layer = partial(nn.LayerNorm, eps=1e-5) + + if patch_embed is None: + patch_embed = PatchEmbed3d + + # split image into non-overlapping patches + self.patch_embed = patch_embed(patch_size=patch_size, embed_dim=embed_dim, norm_layer=norm_layer) + self.pos_drop = nn.Dropout(p=dropout) + + layers: List[nn.Module] = [] + total_stage_blocks = sum(depths) + stage_block_id = 0 + # build SwinTransformer blocks + for i_stage in range(len(depths)): + stage: List[nn.Module] = [] + dim = embed_dim * 2**i_stage + for i_layer in range(depths[i_stage]): + # adjust stochastic depth probability based on the depth of the stage block + sd_prob = stochastic_depth_prob * float(stage_block_id) / (total_stage_blocks - 1) + stage.append( + block( + dim, + num_heads[i_stage], + window_size=window_size, + shift_size=[0 if i_layer % 2 == 0 else w // 2 for w in window_size], + mlp_ratio=mlp_ratio, + dropout=dropout, + attention_dropout=attention_dropout, + stochastic_depth_prob=sd_prob, + norm_layer=norm_layer, + attn_layer=ShiftedWindowAttention3d, + ) + ) + stage_block_id += 1 + layers.append(nn.Sequential(*stage)) + # add patch merging layer + if i_stage < (len(depths) - 1): + layers.append(downsample_layer(dim, norm_layer)) + self.features = nn.Sequential(*layers) + + self.num_features = embed_dim * 2 ** (len(depths) - 1) + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool3d(1) + self.head = nn.Linear(self.num_features, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.zeros_(m.bias) + + def forward(self, x: Tensor) -> Tensor: + # x: B C T H W + x = self.patch_embed(x) # B _T _H _W C + x = self.pos_drop(x) + x = self.features(x) # B _T _H _W C + x = self.norm(x) + x = x.permute(0, 4, 1, 2, 3) # B, C, _T, _H, _W + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.head(x) + return x + + +def _swin_transformer3d( + patch_size: List[int], + embed_dim: int, + depths: List[int], + num_heads: List[int], + window_size: List[int], + stochastic_depth_prob: float, + weights: Optional[WeightsEnum], + progress: bool, + **kwargs: Any, +) -> SwinTransformer3d: + if weights is not None: + _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"])) + + model = SwinTransformer3d( + patch_size=patch_size, + embed_dim=embed_dim, + depths=depths, + num_heads=num_heads, + window_size=window_size, + stochastic_depth_prob=stochastic_depth_prob, + **kwargs, + ) + + if weights is not None: + model.load_state_dict(weights.get_state_dict(progress=progress)) + + return model + + +_COMMON_META = { + "categories": _KINETICS400_CATEGORIES, + "min_size": (1, 1), + "min_temporal_size": 1, +} + + +class Swin3D_T_Weights(WeightsEnum): + KINETICS400_V1 = Weights( + url="https://download.pytorch.org/models/swin3d_t-7615ae03.pth", + transforms=partial( + VideoClassification, + crop_size=(224, 224), + resize_size=(256,), + mean=(0.4850, 0.4560, 0.4060), + std=(0.2290, 0.2240, 0.2250), + ), + meta={ + **_COMMON_META, + "recipe": "https://github.com/SwinTransformer/Video-Swin-Transformer#kinetics-400", + "_docs": ( + "The weights were ported from the paper. The accuracies are estimated on video-level " + "with parameters `frame_rate=15`, `clips_per_video=12`, and `clip_len=32`" + ), + "num_params": 28158070, + "_metrics": { + "Kinetics-400": { + "acc@1": 77.715, + "acc@5": 93.519, + } + }, + "_ops": 43.882, + "_file_size": 121.543, + }, + ) + DEFAULT = KINETICS400_V1 + + +class Swin3D_S_Weights(WeightsEnum): + KINETICS400_V1 = Weights( + url="https://download.pytorch.org/models/swin3d_s-da41c237.pth", + transforms=partial( + VideoClassification, + crop_size=(224, 224), + resize_size=(256,), + mean=(0.4850, 0.4560, 0.4060), + std=(0.2290, 0.2240, 0.2250), + ), + meta={ + **_COMMON_META, + "recipe": "https://github.com/SwinTransformer/Video-Swin-Transformer#kinetics-400", + "_docs": ( + "The weights were ported from the paper. The accuracies are estimated on video-level " + "with parameters `frame_rate=15`, `clips_per_video=12`, and `clip_len=32`" + ), + "num_params": 49816678, + "_metrics": { + "Kinetics-400": { + "acc@1": 79.521, + "acc@5": 94.158, + } + }, + "_ops": 82.841, + "_file_size": 218.288, + }, + ) + DEFAULT = KINETICS400_V1 + + +class Swin3D_B_Weights(WeightsEnum): + KINETICS400_V1 = Weights( + url="https://download.pytorch.org/models/swin3d_b_1k-24f7c7c6.pth", + transforms=partial( + VideoClassification, + crop_size=(224, 224), + resize_size=(256,), + mean=(0.4850, 0.4560, 0.4060), + std=(0.2290, 0.2240, 0.2250), + ), + meta={ + **_COMMON_META, + "recipe": "https://github.com/SwinTransformer/Video-Swin-Transformer#kinetics-400", + "_docs": ( + "The weights were ported from the paper. The accuracies are estimated on video-level " + "with parameters `frame_rate=15`, `clips_per_video=12`, and `clip_len=32`" + ), + "num_params": 88048984, + "_metrics": { + "Kinetics-400": { + "acc@1": 79.427, + "acc@5": 94.386, + } + }, + "_ops": 140.667, + "_file_size": 364.134, + }, + ) + KINETICS400_IMAGENET22K_V1 = Weights( + url="https://download.pytorch.org/models/swin3d_b_22k-7c6ae6fa.pth", + transforms=partial( + VideoClassification, + crop_size=(224, 224), + resize_size=(256,), + mean=(0.4850, 0.4560, 0.4060), + std=(0.2290, 0.2240, 0.2250), + ), + meta={ + **_COMMON_META, + "recipe": "https://github.com/SwinTransformer/Video-Swin-Transformer#kinetics-400", + "_docs": ( + "The weights were ported from the paper. The accuracies are estimated on video-level " + "with parameters `frame_rate=15`, `clips_per_video=12`, and `clip_len=32`" + ), + "num_params": 88048984, + "_metrics": { + "Kinetics-400": { + "acc@1": 81.643, + "acc@5": 95.574, + } + }, + "_ops": 140.667, + "_file_size": 364.134, + }, + ) + DEFAULT = KINETICS400_V1 + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin3D_T_Weights.KINETICS400_V1)) +def swin3d_t(*, weights: Optional[Swin3D_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer3d: + """ + Constructs a swin_tiny architecture from + `Video Swin Transformer `_. + + Args: + weights (:class:`~torchvision.models.video.Swin3D_T_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.video.Swin3D_T_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.video.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.video.Swin3D_T_Weights + :members: + """ + weights = Swin3D_T_Weights.verify(weights) + + return _swin_transformer3d( + patch_size=[2, 4, 4], + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=[8, 7, 7], + stochastic_depth_prob=0.1, + weights=weights, + progress=progress, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin3D_S_Weights.KINETICS400_V1)) +def swin3d_s(*, weights: Optional[Swin3D_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer3d: + """ + Constructs a swin_small architecture from + `Video Swin Transformer `_. + + Args: + weights (:class:`~torchvision.models.video.Swin3D_S_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.video.Swin3D_S_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.video.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.video.Swin3D_S_Weights + :members: + """ + weights = Swin3D_S_Weights.verify(weights) + + return _swin_transformer3d( + patch_size=[2, 4, 4], + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=[8, 7, 7], + stochastic_depth_prob=0.1, + weights=weights, + progress=progress, + **kwargs, + ) + + +@register_model() +@handle_legacy_interface(weights=("pretrained", Swin3D_B_Weights.KINETICS400_V1)) +def swin3d_b(*, weights: Optional[Swin3D_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer3d: + """ + Constructs a swin_base architecture from + `Video Swin Transformer `_. + + Args: + weights (:class:`~torchvision.models.video.Swin3D_B_Weights`, optional): The + pretrained weights to use. See + :class:`~torchvision.models.video.Swin3D_B_Weights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + **kwargs: parameters passed to the ``torchvision.models.video.swin_transformer.SwinTransformer`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.video.Swin3D_B_Weights + :members: + """ + weights = Swin3D_B_Weights.verify(weights) + + return _swin_transformer3d( + patch_size=[2, 4, 4], + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=[8, 7, 7], + stochastic_depth_prob=0.1, + weights=weights, + progress=progress, + **kwargs, + ) diff --git a/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/giou_loss.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/giou_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71b3a0d5dc57b1deac41936869a0ef3683d0b51a Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchvision/ops/__pycache__/giou_loss.cpython-310.pyc differ