Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2018-06-18/endpoint-rule-set-1.json.gz +3 -0
- wemm/lib/python3.10/site-packages/botocore/data/fsx/2018-03-01/endpoint-rule-set-1.json.gz +3 -0
- wemm/lib/python3.10/site-packages/botocore/data/macie2/2020-01-01/endpoint-rule-set-1.json.gz +3 -0
- wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/idna/py.typed +0 -0
- wemm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/__pycache__/utils.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__init__.py +145 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/_optical_flow.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/celeba.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/flowers102.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/inaturalist.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/pcam.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sbd.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sun397.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/widerface.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/celeba.py +189 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/country211.py +58 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/eurosat.py +58 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/fer2013.py +75 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/fgvc_aircraft.py +114 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/folder.py +317 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/food101.py +93 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/imagenet.py +212 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/inaturalist.py +241 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/lfw.py +255 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/oxford_iiit_pet.py +125 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/phototour.py +228 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/samplers/__pycache__/clip_sampler.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/stl10.py +174 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/sun397.py +76 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/ucf101.py +130 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/usps.py +95 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/video_utils.py +419 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/voc.py +224 -0
- wemm/lib/python3.10/site-packages/torchvision/datasets/widerface.py +191 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/densenet.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/googlenet.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/maxvit.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/regnet.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/squeezenet.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/vgg.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/torchvision/models/detection/__init__.py +7 -0
wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2018-06-18/endpoint-rule-set-1.json.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e91ddb21316f7400642cbf0078ae107bdae9b6daf96f89c9e74ca89c2c63dedd
|
| 3 |
+
size 1839
|
wemm/lib/python3.10/site-packages/botocore/data/fsx/2018-03-01/endpoint-rule-set-1.json.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ab077c76d6b89219c42c47f5f8085f8fdd7bc8ad3de06eb9275fcd8f9730c7f7
|
| 3 |
+
size 1287
|
wemm/lib/python3.10/site-packages/botocore/data/macie2/2020-01-01/endpoint-rule-set-1.json.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d9f335150b42d830a5467ceeecd7d2e1fb89a6435c22006420913b80ff01f621
|
| 3 |
+
size 1289
|
wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc
ADDED
|
Binary file (30.5 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc
ADDED
|
Binary file (12.2 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (304 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/idna/py.typed
ADDED
|
File without changes
|
wemm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (1.44 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (18.8 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc
ADDED
|
Binary file (351 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__init__.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
|
| 2 |
+
from ._stereo_matching import (
|
| 3 |
+
CarlaStereo,
|
| 4 |
+
CREStereo,
|
| 5 |
+
ETH3DStereo,
|
| 6 |
+
FallingThingsStereo,
|
| 7 |
+
InStereo2k,
|
| 8 |
+
Kitti2012Stereo,
|
| 9 |
+
Kitti2015Stereo,
|
| 10 |
+
Middlebury2014Stereo,
|
| 11 |
+
SceneFlowStereo,
|
| 12 |
+
SintelStereo,
|
| 13 |
+
)
|
| 14 |
+
from .caltech import Caltech101, Caltech256
|
| 15 |
+
from .celeba import CelebA
|
| 16 |
+
from .cifar import CIFAR10, CIFAR100
|
| 17 |
+
from .cityscapes import Cityscapes
|
| 18 |
+
from .clevr import CLEVRClassification
|
| 19 |
+
from .coco import CocoCaptions, CocoDetection
|
| 20 |
+
from .country211 import Country211
|
| 21 |
+
from .dtd import DTD
|
| 22 |
+
from .eurosat import EuroSAT
|
| 23 |
+
from .fakedata import FakeData
|
| 24 |
+
from .fer2013 import FER2013
|
| 25 |
+
from .fgvc_aircraft import FGVCAircraft
|
| 26 |
+
from .flickr import Flickr30k, Flickr8k
|
| 27 |
+
from .flowers102 import Flowers102
|
| 28 |
+
from .folder import DatasetFolder, ImageFolder
|
| 29 |
+
from .food101 import Food101
|
| 30 |
+
from .gtsrb import GTSRB
|
| 31 |
+
from .hmdb51 import HMDB51
|
| 32 |
+
from .imagenet import ImageNet
|
| 33 |
+
from .inaturalist import INaturalist
|
| 34 |
+
from .kinetics import Kinetics
|
| 35 |
+
from .kitti import Kitti
|
| 36 |
+
from .lfw import LFWPairs, LFWPeople
|
| 37 |
+
from .lsun import LSUN, LSUNClass
|
| 38 |
+
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
|
| 39 |
+
from .moving_mnist import MovingMNIST
|
| 40 |
+
from .omniglot import Omniglot
|
| 41 |
+
from .oxford_iiit_pet import OxfordIIITPet
|
| 42 |
+
from .pcam import PCAM
|
| 43 |
+
from .phototour import PhotoTour
|
| 44 |
+
from .places365 import Places365
|
| 45 |
+
from .rendered_sst2 import RenderedSST2
|
| 46 |
+
from .sbd import SBDataset
|
| 47 |
+
from .sbu import SBU
|
| 48 |
+
from .semeion import SEMEION
|
| 49 |
+
from .stanford_cars import StanfordCars
|
| 50 |
+
from .stl10 import STL10
|
| 51 |
+
from .sun397 import SUN397
|
| 52 |
+
from .svhn import SVHN
|
| 53 |
+
from .ucf101 import UCF101
|
| 54 |
+
from .usps import USPS
|
| 55 |
+
from .vision import VisionDataset
|
| 56 |
+
from .voc import VOCDetection, VOCSegmentation
|
| 57 |
+
from .widerface import WIDERFace
|
| 58 |
+
|
| 59 |
+
__all__ = (
|
| 60 |
+
"LSUN",
|
| 61 |
+
"LSUNClass",
|
| 62 |
+
"ImageFolder",
|
| 63 |
+
"DatasetFolder",
|
| 64 |
+
"FakeData",
|
| 65 |
+
"CocoCaptions",
|
| 66 |
+
"CocoDetection",
|
| 67 |
+
"CIFAR10",
|
| 68 |
+
"CIFAR100",
|
| 69 |
+
"EMNIST",
|
| 70 |
+
"FashionMNIST",
|
| 71 |
+
"QMNIST",
|
| 72 |
+
"MNIST",
|
| 73 |
+
"KMNIST",
|
| 74 |
+
"StanfordCars",
|
| 75 |
+
"STL10",
|
| 76 |
+
"SUN397",
|
| 77 |
+
"SVHN",
|
| 78 |
+
"PhotoTour",
|
| 79 |
+
"SEMEION",
|
| 80 |
+
"Omniglot",
|
| 81 |
+
"SBU",
|
| 82 |
+
"Flickr8k",
|
| 83 |
+
"Flickr30k",
|
| 84 |
+
"Flowers102",
|
| 85 |
+
"VOCSegmentation",
|
| 86 |
+
"VOCDetection",
|
| 87 |
+
"Cityscapes",
|
| 88 |
+
"ImageNet",
|
| 89 |
+
"Caltech101",
|
| 90 |
+
"Caltech256",
|
| 91 |
+
"CelebA",
|
| 92 |
+
"WIDERFace",
|
| 93 |
+
"SBDataset",
|
| 94 |
+
"VisionDataset",
|
| 95 |
+
"USPS",
|
| 96 |
+
"Kinetics",
|
| 97 |
+
"HMDB51",
|
| 98 |
+
"UCF101",
|
| 99 |
+
"Places365",
|
| 100 |
+
"Kitti",
|
| 101 |
+
"INaturalist",
|
| 102 |
+
"LFWPeople",
|
| 103 |
+
"LFWPairs",
|
| 104 |
+
"KittiFlow",
|
| 105 |
+
"Sintel",
|
| 106 |
+
"FlyingChairs",
|
| 107 |
+
"FlyingThings3D",
|
| 108 |
+
"HD1K",
|
| 109 |
+
"Food101",
|
| 110 |
+
"DTD",
|
| 111 |
+
"FER2013",
|
| 112 |
+
"GTSRB",
|
| 113 |
+
"CLEVRClassification",
|
| 114 |
+
"OxfordIIITPet",
|
| 115 |
+
"PCAM",
|
| 116 |
+
"Country211",
|
| 117 |
+
"FGVCAircraft",
|
| 118 |
+
"EuroSAT",
|
| 119 |
+
"RenderedSST2",
|
| 120 |
+
"Kitti2012Stereo",
|
| 121 |
+
"Kitti2015Stereo",
|
| 122 |
+
"CarlaStereo",
|
| 123 |
+
"Middlebury2014Stereo",
|
| 124 |
+
"CREStereo",
|
| 125 |
+
"FallingThingsStereo",
|
| 126 |
+
"SceneFlowStereo",
|
| 127 |
+
"SintelStereo",
|
| 128 |
+
"InStereo2k",
|
| 129 |
+
"ETH3DStereo",
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
# We override current module's attributes to handle the import:
|
| 134 |
+
# from torchvision.datasets import wrap_dataset_for_transforms_v2
|
| 135 |
+
# with beta state v2 warning from torchvision.datapoints
|
| 136 |
+
# We also want to avoid raising the warning when importing other attributes
|
| 137 |
+
# from torchvision.datasets
|
| 138 |
+
# Ref: https://peps.python.org/pep-0562/
|
| 139 |
+
def __getattr__(name):
|
| 140 |
+
if name in ("wrap_dataset_for_transforms_v2",):
|
| 141 |
+
from torchvision.datapoints._dataset_wrapper import wrap_dataset_for_transforms_v2
|
| 142 |
+
|
| 143 |
+
return wrap_dataset_for_transforms_v2
|
| 144 |
+
|
| 145 |
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/_optical_flow.cpython-310.pyc
ADDED
|
Binary file (17.6 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/celeba.cpython-310.pyc
ADDED
|
Binary file (7.16 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/flowers102.cpython-310.pyc
ADDED
|
Binary file (4.6 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/inaturalist.cpython-310.pyc
ADDED
|
Binary file (8.56 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/pcam.cpython-310.pyc
ADDED
|
Binary file (4.82 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sbd.cpython-310.pyc
ADDED
|
Binary file (5.98 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sun397.cpython-310.pyc
ADDED
|
Binary file (3.43 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/widerface.cpython-310.pyc
ADDED
|
Binary file (6.75 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/celeba.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import os
|
| 3 |
+
from collections import namedtuple
|
| 4 |
+
from typing import Any, Callable, List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import PIL
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from .utils import check_integrity, download_file_from_google_drive, extract_archive, verify_str_arg
|
| 10 |
+
from .vision import VisionDataset
|
| 11 |
+
|
| 12 |
+
CSV = namedtuple("CSV", ["header", "index", "data"])
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class CelebA(VisionDataset):
|
| 16 |
+
"""`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
root (string): Root directory where images are downloaded to.
|
| 20 |
+
split (string): One of {'train', 'valid', 'test', 'all'}.
|
| 21 |
+
Accordingly dataset is selected.
|
| 22 |
+
target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,
|
| 23 |
+
or ``landmarks``. Can also be a list to output a tuple with all specified target types.
|
| 24 |
+
The targets represent:
|
| 25 |
+
|
| 26 |
+
- ``attr`` (Tensor shape=(40,) dtype=int): binary (0, 1) labels for attributes
|
| 27 |
+
- ``identity`` (int): label for each person (data points with the same identity are the same person)
|
| 28 |
+
- ``bbox`` (Tensor shape=(4,) dtype=int): bounding box (x, y, width, height)
|
| 29 |
+
- ``landmarks`` (Tensor shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
|
| 30 |
+
righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
|
| 31 |
+
|
| 32 |
+
Defaults to ``attr``. If empty, ``None`` will be returned as target.
|
| 33 |
+
|
| 34 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 35 |
+
and returns a transformed version. E.g, ``transforms.PILToTensor``
|
| 36 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 37 |
+
target and transforms it.
|
| 38 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 39 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 40 |
+
downloaded again.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
base_folder = "celeba"
|
| 44 |
+
# There currently does not appear to be an easy way to extract 7z in python (without introducing additional
|
| 45 |
+
# dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available
|
| 46 |
+
# right now.
|
| 47 |
+
file_list = [
|
| 48 |
+
# File ID MD5 Hash Filename
|
| 49 |
+
("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
|
| 50 |
+
# ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc","b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"),
|
| 51 |
+
# ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"),
|
| 52 |
+
("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
|
| 53 |
+
("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
|
| 54 |
+
("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
|
| 55 |
+
("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
|
| 56 |
+
# ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"),
|
| 57 |
+
("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
root: str,
|
| 63 |
+
split: str = "train",
|
| 64 |
+
target_type: Union[List[str], str] = "attr",
|
| 65 |
+
transform: Optional[Callable] = None,
|
| 66 |
+
target_transform: Optional[Callable] = None,
|
| 67 |
+
download: bool = False,
|
| 68 |
+
) -> None:
|
| 69 |
+
super().__init__(root, transform=transform, target_transform=target_transform)
|
| 70 |
+
self.split = split
|
| 71 |
+
if isinstance(target_type, list):
|
| 72 |
+
self.target_type = target_type
|
| 73 |
+
else:
|
| 74 |
+
self.target_type = [target_type]
|
| 75 |
+
|
| 76 |
+
if not self.target_type and self.target_transform is not None:
|
| 77 |
+
raise RuntimeError("target_transform is specified but target_type is empty")
|
| 78 |
+
|
| 79 |
+
if download:
|
| 80 |
+
self.download()
|
| 81 |
+
|
| 82 |
+
if not self._check_integrity():
|
| 83 |
+
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
|
| 84 |
+
|
| 85 |
+
split_map = {
|
| 86 |
+
"train": 0,
|
| 87 |
+
"valid": 1,
|
| 88 |
+
"test": 2,
|
| 89 |
+
"all": None,
|
| 90 |
+
}
|
| 91 |
+
split_ = split_map[verify_str_arg(split.lower(), "split", ("train", "valid", "test", "all"))]
|
| 92 |
+
splits = self._load_csv("list_eval_partition.txt")
|
| 93 |
+
identity = self._load_csv("identity_CelebA.txt")
|
| 94 |
+
bbox = self._load_csv("list_bbox_celeba.txt", header=1)
|
| 95 |
+
landmarks_align = self._load_csv("list_landmarks_align_celeba.txt", header=1)
|
| 96 |
+
attr = self._load_csv("list_attr_celeba.txt", header=1)
|
| 97 |
+
|
| 98 |
+
mask = slice(None) if split_ is None else (splits.data == split_).squeeze()
|
| 99 |
+
|
| 100 |
+
if mask == slice(None): # if split == "all"
|
| 101 |
+
self.filename = splits.index
|
| 102 |
+
else:
|
| 103 |
+
self.filename = [splits.index[i] for i in torch.squeeze(torch.nonzero(mask))]
|
| 104 |
+
self.identity = identity.data[mask]
|
| 105 |
+
self.bbox = bbox.data[mask]
|
| 106 |
+
self.landmarks_align = landmarks_align.data[mask]
|
| 107 |
+
self.attr = attr.data[mask]
|
| 108 |
+
# map from {-1, 1} to {0, 1}
|
| 109 |
+
self.attr = torch.div(self.attr + 1, 2, rounding_mode="floor")
|
| 110 |
+
self.attr_names = attr.header
|
| 111 |
+
|
| 112 |
+
def _load_csv(
|
| 113 |
+
self,
|
| 114 |
+
filename: str,
|
| 115 |
+
header: Optional[int] = None,
|
| 116 |
+
) -> CSV:
|
| 117 |
+
with open(os.path.join(self.root, self.base_folder, filename)) as csv_file:
|
| 118 |
+
data = list(csv.reader(csv_file, delimiter=" ", skipinitialspace=True))
|
| 119 |
+
|
| 120 |
+
if header is not None:
|
| 121 |
+
headers = data[header]
|
| 122 |
+
data = data[header + 1 :]
|
| 123 |
+
else:
|
| 124 |
+
headers = []
|
| 125 |
+
|
| 126 |
+
indices = [row[0] for row in data]
|
| 127 |
+
data = [row[1:] for row in data]
|
| 128 |
+
data_int = [list(map(int, i)) for i in data]
|
| 129 |
+
|
| 130 |
+
return CSV(headers, indices, torch.tensor(data_int))
|
| 131 |
+
|
| 132 |
+
def _check_integrity(self) -> bool:
|
| 133 |
+
for (_, md5, filename) in self.file_list:
|
| 134 |
+
fpath = os.path.join(self.root, self.base_folder, filename)
|
| 135 |
+
_, ext = os.path.splitext(filename)
|
| 136 |
+
# Allow original archive to be deleted (zip and 7z)
|
| 137 |
+
# Only need the extracted images
|
| 138 |
+
if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
|
| 139 |
+
return False
|
| 140 |
+
|
| 141 |
+
# Should check a hash of the images
|
| 142 |
+
return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
|
| 143 |
+
|
| 144 |
+
def download(self) -> None:
|
| 145 |
+
if self._check_integrity():
|
| 146 |
+
print("Files already downloaded and verified")
|
| 147 |
+
return
|
| 148 |
+
|
| 149 |
+
for (file_id, md5, filename) in self.file_list:
|
| 150 |
+
download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
|
| 151 |
+
|
| 152 |
+
extract_archive(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"))
|
| 153 |
+
|
| 154 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 155 |
+
X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
|
| 156 |
+
|
| 157 |
+
target: Any = []
|
| 158 |
+
for t in self.target_type:
|
| 159 |
+
if t == "attr":
|
| 160 |
+
target.append(self.attr[index, :])
|
| 161 |
+
elif t == "identity":
|
| 162 |
+
target.append(self.identity[index, 0])
|
| 163 |
+
elif t == "bbox":
|
| 164 |
+
target.append(self.bbox[index, :])
|
| 165 |
+
elif t == "landmarks":
|
| 166 |
+
target.append(self.landmarks_align[index, :])
|
| 167 |
+
else:
|
| 168 |
+
# TODO: refactor with utils.verify_str_arg
|
| 169 |
+
raise ValueError(f'Target type "{t}" is not recognized.')
|
| 170 |
+
|
| 171 |
+
if self.transform is not None:
|
| 172 |
+
X = self.transform(X)
|
| 173 |
+
|
| 174 |
+
if target:
|
| 175 |
+
target = tuple(target) if len(target) > 1 else target[0]
|
| 176 |
+
|
| 177 |
+
if self.target_transform is not None:
|
| 178 |
+
target = self.target_transform(target)
|
| 179 |
+
else:
|
| 180 |
+
target = None
|
| 181 |
+
|
| 182 |
+
return X, target
|
| 183 |
+
|
| 184 |
+
def __len__(self) -> int:
|
| 185 |
+
return len(self.attr)
|
| 186 |
+
|
| 187 |
+
def extra_repr(self) -> str:
|
| 188 |
+
lines = ["Target type: {target_type}", "Split: {split}"]
|
| 189 |
+
return "\n".join(lines).format(**self.__dict__)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/country211.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import Callable, Optional
|
| 3 |
+
|
| 4 |
+
from .folder import ImageFolder
|
| 5 |
+
from .utils import download_and_extract_archive, verify_str_arg
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Country211(ImageFolder):
|
| 9 |
+
"""`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
|
| 10 |
+
|
| 11 |
+
This dataset was built by filtering the images from the YFCC100m dataset
|
| 12 |
+
that have GPS coordinate corresponding to a ISO-3166 country code. The
|
| 13 |
+
dataset is balanced by sampling 150 train images, 50 validation images, and
|
| 14 |
+
100 test images for each country.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
root (string): Root directory of the dataset.
|
| 18 |
+
split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
|
| 19 |
+
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
|
| 20 |
+
version. E.g, ``transforms.RandomCrop``.
|
| 21 |
+
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
|
| 22 |
+
download (bool, optional): If True, downloads the dataset from the internet and puts it into
|
| 23 |
+
``root/country211/``. If dataset is already downloaded, it is not downloaded again.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
_URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
|
| 27 |
+
_MD5 = "84988d7644798601126c29e9877aab6a"
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
root: str,
|
| 32 |
+
split: str = "train",
|
| 33 |
+
transform: Optional[Callable] = None,
|
| 34 |
+
target_transform: Optional[Callable] = None,
|
| 35 |
+
download: bool = False,
|
| 36 |
+
) -> None:
|
| 37 |
+
self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
|
| 38 |
+
|
| 39 |
+
root = Path(root).expanduser()
|
| 40 |
+
self.root = str(root)
|
| 41 |
+
self._base_folder = root / "country211"
|
| 42 |
+
|
| 43 |
+
if download:
|
| 44 |
+
self._download()
|
| 45 |
+
|
| 46 |
+
if not self._check_exists():
|
| 47 |
+
raise RuntimeError("Dataset not found. You can use download=True to download it")
|
| 48 |
+
|
| 49 |
+
super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform)
|
| 50 |
+
self.root = str(root)
|
| 51 |
+
|
| 52 |
+
def _check_exists(self) -> bool:
|
| 53 |
+
return self._base_folder.exists() and self._base_folder.is_dir()
|
| 54 |
+
|
| 55 |
+
def _download(self) -> None:
|
| 56 |
+
if self._check_exists():
|
| 57 |
+
return
|
| 58 |
+
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/eurosat.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Callable, Optional
|
| 3 |
+
|
| 4 |
+
from .folder import ImageFolder
|
| 5 |
+
from .utils import download_and_extract_archive
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class EuroSAT(ImageFolder):
|
| 9 |
+
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
root (string): Root directory of dataset where ``root/eurosat`` exists.
|
| 13 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 14 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 15 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 16 |
+
target and transforms it.
|
| 17 |
+
download (bool, optional): If True, downloads the dataset from the internet and
|
| 18 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 19 |
+
downloaded again. Default is False.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
root: str,
|
| 25 |
+
transform: Optional[Callable] = None,
|
| 26 |
+
target_transform: Optional[Callable] = None,
|
| 27 |
+
download: bool = False,
|
| 28 |
+
) -> None:
|
| 29 |
+
self.root = os.path.expanduser(root)
|
| 30 |
+
self._base_folder = os.path.join(self.root, "eurosat")
|
| 31 |
+
self._data_folder = os.path.join(self._base_folder, "2750")
|
| 32 |
+
|
| 33 |
+
if download:
|
| 34 |
+
self.download()
|
| 35 |
+
|
| 36 |
+
if not self._check_exists():
|
| 37 |
+
raise RuntimeError("Dataset not found. You can use download=True to download it")
|
| 38 |
+
|
| 39 |
+
super().__init__(self._data_folder, transform=transform, target_transform=target_transform)
|
| 40 |
+
self.root = os.path.expanduser(root)
|
| 41 |
+
|
| 42 |
+
def __len__(self) -> int:
|
| 43 |
+
return len(self.samples)
|
| 44 |
+
|
| 45 |
+
def _check_exists(self) -> bool:
|
| 46 |
+
return os.path.exists(self._data_folder)
|
| 47 |
+
|
| 48 |
+
def download(self) -> None:
|
| 49 |
+
|
| 50 |
+
if self._check_exists():
|
| 51 |
+
return
|
| 52 |
+
|
| 53 |
+
os.makedirs(self._base_folder, exist_ok=True)
|
| 54 |
+
download_and_extract_archive(
|
| 55 |
+
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
|
| 56 |
+
download_root=self._base_folder,
|
| 57 |
+
md5="c8fa014336c82ac7804f0398fcb19387",
|
| 58 |
+
)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/fer2013.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import pathlib
|
| 3 |
+
from typing import Any, Callable, Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
from .utils import check_integrity, verify_str_arg
|
| 9 |
+
from .vision import VisionDataset
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class FER2013(VisionDataset):
|
| 13 |
+
"""`FER2013
|
| 14 |
+
<https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge>`_ Dataset.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
root (string): Root directory of dataset where directory
|
| 18 |
+
``root/fer2013`` exists.
|
| 19 |
+
split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
|
| 20 |
+
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
|
| 21 |
+
version. E.g, ``transforms.RandomCrop``
|
| 22 |
+
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
_RESOURCES = {
|
| 26 |
+
"train": ("train.csv", "3f0dfb3d3fd99c811a1299cb947e3131"),
|
| 27 |
+
"test": ("test.csv", "b02c2298636a634e8c2faabbf3ea9a23"),
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
root: str,
|
| 33 |
+
split: str = "train",
|
| 34 |
+
transform: Optional[Callable] = None,
|
| 35 |
+
target_transform: Optional[Callable] = None,
|
| 36 |
+
) -> None:
|
| 37 |
+
self._split = verify_str_arg(split, "split", self._RESOURCES.keys())
|
| 38 |
+
super().__init__(root, transform=transform, target_transform=target_transform)
|
| 39 |
+
|
| 40 |
+
base_folder = pathlib.Path(self.root) / "fer2013"
|
| 41 |
+
file_name, md5 = self._RESOURCES[self._split]
|
| 42 |
+
data_file = base_folder / file_name
|
| 43 |
+
if not check_integrity(str(data_file), md5=md5):
|
| 44 |
+
raise RuntimeError(
|
| 45 |
+
f"{file_name} not found in {base_folder} or corrupted. "
|
| 46 |
+
f"You can download it from "
|
| 47 |
+
f"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
with open(data_file, "r", newline="") as file:
|
| 51 |
+
self._samples = [
|
| 52 |
+
(
|
| 53 |
+
torch.tensor([int(idx) for idx in row["pixels"].split()], dtype=torch.uint8).reshape(48, 48),
|
| 54 |
+
int(row["emotion"]) if "emotion" in row else None,
|
| 55 |
+
)
|
| 56 |
+
for row in csv.DictReader(file)
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
def __len__(self) -> int:
|
| 60 |
+
return len(self._samples)
|
| 61 |
+
|
| 62 |
+
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
|
| 63 |
+
image_tensor, target = self._samples[idx]
|
| 64 |
+
image = Image.fromarray(image_tensor.numpy())
|
| 65 |
+
|
| 66 |
+
if self.transform is not None:
|
| 67 |
+
image = self.transform(image)
|
| 68 |
+
|
| 69 |
+
if self.target_transform is not None:
|
| 70 |
+
target = self.target_transform(target)
|
| 71 |
+
|
| 72 |
+
return image, target
|
| 73 |
+
|
| 74 |
+
def extra_repr(self) -> str:
|
| 75 |
+
return f"split={self._split}"
|
wemm/lib/python3.10/site-packages/torchvision/datasets/fgvc_aircraft.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from typing import Any, Callable, Optional, Tuple
|
| 5 |
+
|
| 6 |
+
import PIL.Image
|
| 7 |
+
|
| 8 |
+
from .utils import download_and_extract_archive, verify_str_arg
|
| 9 |
+
from .vision import VisionDataset
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class FGVCAircraft(VisionDataset):
|
| 13 |
+
"""`FGVC Aircraft <https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/>`_ Dataset.
|
| 14 |
+
|
| 15 |
+
The dataset contains 10,000 images of aircraft, with 100 images for each of 100
|
| 16 |
+
different aircraft model variants, most of which are airplanes.
|
| 17 |
+
Aircraft models are organized in a three-levels hierarchy. The three levels, from
|
| 18 |
+
finer to coarser, are:
|
| 19 |
+
|
| 20 |
+
- ``variant``, e.g. Boeing 737-700. A variant collapses all the models that are visually
|
| 21 |
+
indistinguishable into one class. The dataset comprises 100 different variants.
|
| 22 |
+
- ``family``, e.g. Boeing 737. The dataset comprises 70 different families.
|
| 23 |
+
- ``manufacturer``, e.g. Boeing. The dataset comprises 30 different manufacturers.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
root (string): Root directory of the FGVC Aircraft dataset.
|
| 27 |
+
split (string, optional): The dataset split, supports ``train``, ``val``,
|
| 28 |
+
``trainval`` and ``test``.
|
| 29 |
+
annotation_level (str, optional): The annotation level, supports ``variant``,
|
| 30 |
+
``family`` and ``manufacturer``.
|
| 31 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 32 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 33 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 34 |
+
target and transforms it.
|
| 35 |
+
download (bool, optional): If True, downloads the dataset from the internet and
|
| 36 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 37 |
+
downloaded again.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
_URL = "https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz"
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
root: str,
|
| 45 |
+
split: str = "trainval",
|
| 46 |
+
annotation_level: str = "variant",
|
| 47 |
+
transform: Optional[Callable] = None,
|
| 48 |
+
target_transform: Optional[Callable] = None,
|
| 49 |
+
download: bool = False,
|
| 50 |
+
) -> None:
|
| 51 |
+
super().__init__(root, transform=transform, target_transform=target_transform)
|
| 52 |
+
self._split = verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
|
| 53 |
+
self._annotation_level = verify_str_arg(
|
| 54 |
+
annotation_level, "annotation_level", ("variant", "family", "manufacturer")
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
self._data_path = os.path.join(self.root, "fgvc-aircraft-2013b")
|
| 58 |
+
if download:
|
| 59 |
+
self._download()
|
| 60 |
+
|
| 61 |
+
if not self._check_exists():
|
| 62 |
+
raise RuntimeError("Dataset not found. You can use download=True to download it")
|
| 63 |
+
|
| 64 |
+
annotation_file = os.path.join(
|
| 65 |
+
self._data_path,
|
| 66 |
+
"data",
|
| 67 |
+
{
|
| 68 |
+
"variant": "variants.txt",
|
| 69 |
+
"family": "families.txt",
|
| 70 |
+
"manufacturer": "manufacturers.txt",
|
| 71 |
+
}[self._annotation_level],
|
| 72 |
+
)
|
| 73 |
+
with open(annotation_file, "r") as f:
|
| 74 |
+
self.classes = [line.strip() for line in f]
|
| 75 |
+
|
| 76 |
+
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
|
| 77 |
+
|
| 78 |
+
image_data_folder = os.path.join(self._data_path, "data", "images")
|
| 79 |
+
labels_file = os.path.join(self._data_path, "data", f"images_{self._annotation_level}_{self._split}.txt")
|
| 80 |
+
|
| 81 |
+
self._image_files = []
|
| 82 |
+
self._labels = []
|
| 83 |
+
|
| 84 |
+
with open(labels_file, "r") as f:
|
| 85 |
+
for line in f:
|
| 86 |
+
image_name, label_name = line.strip().split(" ", 1)
|
| 87 |
+
self._image_files.append(os.path.join(image_data_folder, f"{image_name}.jpg"))
|
| 88 |
+
self._labels.append(self.class_to_idx[label_name])
|
| 89 |
+
|
| 90 |
+
def __len__(self) -> int:
|
| 91 |
+
return len(self._image_files)
|
| 92 |
+
|
| 93 |
+
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
|
| 94 |
+
image_file, label = self._image_files[idx], self._labels[idx]
|
| 95 |
+
image = PIL.Image.open(image_file).convert("RGB")
|
| 96 |
+
|
| 97 |
+
if self.transform:
|
| 98 |
+
image = self.transform(image)
|
| 99 |
+
|
| 100 |
+
if self.target_transform:
|
| 101 |
+
label = self.target_transform(label)
|
| 102 |
+
|
| 103 |
+
return image, label
|
| 104 |
+
|
| 105 |
+
def _download(self) -> None:
|
| 106 |
+
"""
|
| 107 |
+
Download the FGVC Aircraft dataset archive and extract it under root.
|
| 108 |
+
"""
|
| 109 |
+
if self._check_exists():
|
| 110 |
+
return
|
| 111 |
+
download_and_extract_archive(self._URL, self.root)
|
| 112 |
+
|
| 113 |
+
def _check_exists(self) -> bool:
|
| 114 |
+
return os.path.exists(self._data_path) and os.path.isdir(self._data_path)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/folder.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import os.path
|
| 3 |
+
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
from .vision import VisionDataset
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def has_file_allowed_extension(filename: str, extensions: Union[str, Tuple[str, ...]]) -> bool:
|
| 11 |
+
"""Checks if a file is an allowed extension.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
filename (string): path to a file
|
| 15 |
+
extensions (tuple of strings): extensions to consider (lowercase)
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
bool: True if the filename ends with one of given extensions
|
| 19 |
+
"""
|
| 20 |
+
return filename.lower().endswith(extensions if isinstance(extensions, str) else tuple(extensions))
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def is_image_file(filename: str) -> bool:
|
| 24 |
+
"""Checks if a file is an allowed image extension.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
filename (string): path to a file
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
bool: True if the filename ends with a known image extension
|
| 31 |
+
"""
|
| 32 |
+
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def find_classes(directory: str) -> Tuple[List[str], Dict[str, int]]:
|
| 36 |
+
"""Finds the class folders in a dataset.
|
| 37 |
+
|
| 38 |
+
See :class:`DatasetFolder` for details.
|
| 39 |
+
"""
|
| 40 |
+
classes = sorted(entry.name for entry in os.scandir(directory) if entry.is_dir())
|
| 41 |
+
if not classes:
|
| 42 |
+
raise FileNotFoundError(f"Couldn't find any class folder in {directory}.")
|
| 43 |
+
|
| 44 |
+
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
|
| 45 |
+
return classes, class_to_idx
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def make_dataset(
|
| 49 |
+
directory: str,
|
| 50 |
+
class_to_idx: Optional[Dict[str, int]] = None,
|
| 51 |
+
extensions: Optional[Union[str, Tuple[str, ...]]] = None,
|
| 52 |
+
is_valid_file: Optional[Callable[[str], bool]] = None,
|
| 53 |
+
) -> List[Tuple[str, int]]:
|
| 54 |
+
"""Generates a list of samples of a form (path_to_sample, class).
|
| 55 |
+
|
| 56 |
+
See :class:`DatasetFolder` for details.
|
| 57 |
+
|
| 58 |
+
Note: The class_to_idx parameter is here optional and will use the logic of the ``find_classes`` function
|
| 59 |
+
by default.
|
| 60 |
+
"""
|
| 61 |
+
directory = os.path.expanduser(directory)
|
| 62 |
+
|
| 63 |
+
if class_to_idx is None:
|
| 64 |
+
_, class_to_idx = find_classes(directory)
|
| 65 |
+
elif not class_to_idx:
|
| 66 |
+
raise ValueError("'class_to_index' must have at least one entry to collect any samples.")
|
| 67 |
+
|
| 68 |
+
both_none = extensions is None and is_valid_file is None
|
| 69 |
+
both_something = extensions is not None and is_valid_file is not None
|
| 70 |
+
if both_none or both_something:
|
| 71 |
+
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
|
| 72 |
+
|
| 73 |
+
if extensions is not None:
|
| 74 |
+
|
| 75 |
+
def is_valid_file(x: str) -> bool:
|
| 76 |
+
return has_file_allowed_extension(x, extensions) # type: ignore[arg-type]
|
| 77 |
+
|
| 78 |
+
is_valid_file = cast(Callable[[str], bool], is_valid_file)
|
| 79 |
+
|
| 80 |
+
instances = []
|
| 81 |
+
available_classes = set()
|
| 82 |
+
for target_class in sorted(class_to_idx.keys()):
|
| 83 |
+
class_index = class_to_idx[target_class]
|
| 84 |
+
target_dir = os.path.join(directory, target_class)
|
| 85 |
+
if not os.path.isdir(target_dir):
|
| 86 |
+
continue
|
| 87 |
+
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
|
| 88 |
+
for fname in sorted(fnames):
|
| 89 |
+
path = os.path.join(root, fname)
|
| 90 |
+
if is_valid_file(path):
|
| 91 |
+
item = path, class_index
|
| 92 |
+
instances.append(item)
|
| 93 |
+
|
| 94 |
+
if target_class not in available_classes:
|
| 95 |
+
available_classes.add(target_class)
|
| 96 |
+
|
| 97 |
+
empty_classes = set(class_to_idx.keys()) - available_classes
|
| 98 |
+
if empty_classes:
|
| 99 |
+
msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. "
|
| 100 |
+
if extensions is not None:
|
| 101 |
+
msg += f"Supported extensions are: {extensions if isinstance(extensions, str) else ', '.join(extensions)}"
|
| 102 |
+
raise FileNotFoundError(msg)
|
| 103 |
+
|
| 104 |
+
return instances
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class DatasetFolder(VisionDataset):
|
| 108 |
+
"""A generic data loader.
|
| 109 |
+
|
| 110 |
+
This default directory structure can be customized by overriding the
|
| 111 |
+
:meth:`find_classes` method.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
root (string): Root directory path.
|
| 115 |
+
loader (callable): A function to load a sample given its path.
|
| 116 |
+
extensions (tuple[string]): A list of allowed extensions.
|
| 117 |
+
both extensions and is_valid_file should not be passed.
|
| 118 |
+
transform (callable, optional): A function/transform that takes in
|
| 119 |
+
a sample and returns a transformed version.
|
| 120 |
+
E.g, ``transforms.RandomCrop`` for images.
|
| 121 |
+
target_transform (callable, optional): A function/transform that takes
|
| 122 |
+
in the target and transforms it.
|
| 123 |
+
is_valid_file (callable, optional): A function that takes path of a file
|
| 124 |
+
and check if the file is a valid file (used to check of corrupt files)
|
| 125 |
+
both extensions and is_valid_file should not be passed.
|
| 126 |
+
|
| 127 |
+
Attributes:
|
| 128 |
+
classes (list): List of the class names sorted alphabetically.
|
| 129 |
+
class_to_idx (dict): Dict with items (class_name, class_index).
|
| 130 |
+
samples (list): List of (sample path, class_index) tuples
|
| 131 |
+
targets (list): The class_index value for each image in the dataset
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
def __init__(
|
| 135 |
+
self,
|
| 136 |
+
root: str,
|
| 137 |
+
loader: Callable[[str], Any],
|
| 138 |
+
extensions: Optional[Tuple[str, ...]] = None,
|
| 139 |
+
transform: Optional[Callable] = None,
|
| 140 |
+
target_transform: Optional[Callable] = None,
|
| 141 |
+
is_valid_file: Optional[Callable[[str], bool]] = None,
|
| 142 |
+
) -> None:
|
| 143 |
+
super().__init__(root, transform=transform, target_transform=target_transform)
|
| 144 |
+
classes, class_to_idx = self.find_classes(self.root)
|
| 145 |
+
samples = self.make_dataset(self.root, class_to_idx, extensions, is_valid_file)
|
| 146 |
+
|
| 147 |
+
self.loader = loader
|
| 148 |
+
self.extensions = extensions
|
| 149 |
+
|
| 150 |
+
self.classes = classes
|
| 151 |
+
self.class_to_idx = class_to_idx
|
| 152 |
+
self.samples = samples
|
| 153 |
+
self.targets = [s[1] for s in samples]
|
| 154 |
+
|
| 155 |
+
@staticmethod
|
| 156 |
+
def make_dataset(
|
| 157 |
+
directory: str,
|
| 158 |
+
class_to_idx: Dict[str, int],
|
| 159 |
+
extensions: Optional[Tuple[str, ...]] = None,
|
| 160 |
+
is_valid_file: Optional[Callable[[str], bool]] = None,
|
| 161 |
+
) -> List[Tuple[str, int]]:
|
| 162 |
+
"""Generates a list of samples of a form (path_to_sample, class).
|
| 163 |
+
|
| 164 |
+
This can be overridden to e.g. read files from a compressed zip file instead of from the disk.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
directory (str): root dataset directory, corresponding to ``self.root``.
|
| 168 |
+
class_to_idx (Dict[str, int]): Dictionary mapping class name to class index.
|
| 169 |
+
extensions (optional): A list of allowed extensions.
|
| 170 |
+
Either extensions or is_valid_file should be passed. Defaults to None.
|
| 171 |
+
is_valid_file (optional): A function that takes path of a file
|
| 172 |
+
and checks if the file is a valid file
|
| 173 |
+
(used to check of corrupt files) both extensions and
|
| 174 |
+
is_valid_file should not be passed. Defaults to None.
|
| 175 |
+
|
| 176 |
+
Raises:
|
| 177 |
+
ValueError: In case ``class_to_idx`` is empty.
|
| 178 |
+
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
|
| 179 |
+
FileNotFoundError: In case no valid file was found for any class.
|
| 180 |
+
|
| 181 |
+
Returns:
|
| 182 |
+
List[Tuple[str, int]]: samples of a form (path_to_sample, class)
|
| 183 |
+
"""
|
| 184 |
+
if class_to_idx is None:
|
| 185 |
+
# prevent potential bug since make_dataset() would use the class_to_idx logic of the
|
| 186 |
+
# find_classes() function, instead of using that of the find_classes() method, which
|
| 187 |
+
# is potentially overridden and thus could have a different logic.
|
| 188 |
+
raise ValueError("The class_to_idx parameter cannot be None.")
|
| 189 |
+
return make_dataset(directory, class_to_idx, extensions=extensions, is_valid_file=is_valid_file)
|
| 190 |
+
|
| 191 |
+
def find_classes(self, directory: str) -> Tuple[List[str], Dict[str, int]]:
|
| 192 |
+
"""Find the class folders in a dataset structured as follows::
|
| 193 |
+
|
| 194 |
+
directory/
|
| 195 |
+
├── class_x
|
| 196 |
+
│ ├── xxx.ext
|
| 197 |
+
│ ├── xxy.ext
|
| 198 |
+
│ └── ...
|
| 199 |
+
│ └── xxz.ext
|
| 200 |
+
└── class_y
|
| 201 |
+
├── 123.ext
|
| 202 |
+
├── nsdf3.ext
|
| 203 |
+
└── ...
|
| 204 |
+
└── asd932_.ext
|
| 205 |
+
|
| 206 |
+
This method can be overridden to only consider
|
| 207 |
+
a subset of classes, or to adapt to a different dataset directory structure.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
directory(str): Root directory path, corresponding to ``self.root``
|
| 211 |
+
|
| 212 |
+
Raises:
|
| 213 |
+
FileNotFoundError: If ``dir`` has no class folders.
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
(Tuple[List[str], Dict[str, int]]): List of all classes and dictionary mapping each class to an index.
|
| 217 |
+
"""
|
| 218 |
+
return find_classes(directory)
|
| 219 |
+
|
| 220 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 221 |
+
"""
|
| 222 |
+
Args:
|
| 223 |
+
index (int): Index
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
tuple: (sample, target) where target is class_index of the target class.
|
| 227 |
+
"""
|
| 228 |
+
path, target = self.samples[index]
|
| 229 |
+
sample = self.loader(path)
|
| 230 |
+
if self.transform is not None:
|
| 231 |
+
sample = self.transform(sample)
|
| 232 |
+
if self.target_transform is not None:
|
| 233 |
+
target = self.target_transform(target)
|
| 234 |
+
|
| 235 |
+
return sample, target
|
| 236 |
+
|
| 237 |
+
def __len__(self) -> int:
|
| 238 |
+
return len(self.samples)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
IMG_EXTENSIONS = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp")
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def pil_loader(path: str) -> Image.Image:
|
| 245 |
+
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
|
| 246 |
+
with open(path, "rb") as f:
|
| 247 |
+
img = Image.open(f)
|
| 248 |
+
return img.convert("RGB")
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
# TODO: specify the return type
|
| 252 |
+
def accimage_loader(path: str) -> Any:
|
| 253 |
+
import accimage
|
| 254 |
+
|
| 255 |
+
try:
|
| 256 |
+
return accimage.Image(path)
|
| 257 |
+
except OSError:
|
| 258 |
+
# Potentially a decoding problem, fall back to PIL.Image
|
| 259 |
+
return pil_loader(path)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def default_loader(path: str) -> Any:
|
| 263 |
+
from torchvision import get_image_backend
|
| 264 |
+
|
| 265 |
+
if get_image_backend() == "accimage":
|
| 266 |
+
return accimage_loader(path)
|
| 267 |
+
else:
|
| 268 |
+
return pil_loader(path)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class ImageFolder(DatasetFolder):
|
| 272 |
+
"""A generic data loader where the images are arranged in this way by default: ::
|
| 273 |
+
|
| 274 |
+
root/dog/xxx.png
|
| 275 |
+
root/dog/xxy.png
|
| 276 |
+
root/dog/[...]/xxz.png
|
| 277 |
+
|
| 278 |
+
root/cat/123.png
|
| 279 |
+
root/cat/nsdf3.png
|
| 280 |
+
root/cat/[...]/asd932_.png
|
| 281 |
+
|
| 282 |
+
This class inherits from :class:`~torchvision.datasets.DatasetFolder` so
|
| 283 |
+
the same methods can be overridden to customize the dataset.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
root (string): Root directory path.
|
| 287 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 288 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 289 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 290 |
+
target and transforms it.
|
| 291 |
+
loader (callable, optional): A function to load an image given its path.
|
| 292 |
+
is_valid_file (callable, optional): A function that takes path of an Image file
|
| 293 |
+
and check if the file is a valid file (used to check of corrupt files)
|
| 294 |
+
|
| 295 |
+
Attributes:
|
| 296 |
+
classes (list): List of the class names sorted alphabetically.
|
| 297 |
+
class_to_idx (dict): Dict with items (class_name, class_index).
|
| 298 |
+
imgs (list): List of (image path, class_index) tuples
|
| 299 |
+
"""
|
| 300 |
+
|
| 301 |
+
def __init__(
|
| 302 |
+
self,
|
| 303 |
+
root: str,
|
| 304 |
+
transform: Optional[Callable] = None,
|
| 305 |
+
target_transform: Optional[Callable] = None,
|
| 306 |
+
loader: Callable[[str], Any] = default_loader,
|
| 307 |
+
is_valid_file: Optional[Callable[[str], bool]] = None,
|
| 308 |
+
):
|
| 309 |
+
super().__init__(
|
| 310 |
+
root,
|
| 311 |
+
loader,
|
| 312 |
+
IMG_EXTENSIONS if is_valid_file is None else None,
|
| 313 |
+
transform=transform,
|
| 314 |
+
target_transform=target_transform,
|
| 315 |
+
is_valid_file=is_valid_file,
|
| 316 |
+
)
|
| 317 |
+
self.imgs = self.samples
|
wemm/lib/python3.10/site-packages/torchvision/datasets/food101.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Any, Callable, Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import PIL.Image
|
| 6 |
+
|
| 7 |
+
from .utils import download_and_extract_archive, verify_str_arg
|
| 8 |
+
from .vision import VisionDataset
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Food101(VisionDataset):
|
| 12 |
+
"""`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/>`_.
|
| 13 |
+
|
| 14 |
+
The Food-101 is a challenging data set of 101 food categories with 101,000 images.
|
| 15 |
+
For each class, 250 manually reviewed test images are provided as well as 750 training images.
|
| 16 |
+
On purpose, the training images were not cleaned, and thus still contain some amount of noise.
|
| 17 |
+
This comes mostly in the form of intense colors and sometimes wrong labels. All images were
|
| 18 |
+
rescaled to have a maximum side length of 512 pixels.
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
root (string): Root directory of the dataset.
|
| 23 |
+
split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``.
|
| 24 |
+
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
|
| 25 |
+
version. E.g, ``transforms.RandomCrop``.
|
| 26 |
+
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
|
| 27 |
+
download (bool, optional): If True, downloads the dataset from the internet and
|
| 28 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 29 |
+
downloaded again. Default is False.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
|
| 33 |
+
_MD5 = "85eeb15f3717b99a5da872d97d918f87"
|
| 34 |
+
|
| 35 |
+
def __init__(
|
| 36 |
+
self,
|
| 37 |
+
root: str,
|
| 38 |
+
split: str = "train",
|
| 39 |
+
transform: Optional[Callable] = None,
|
| 40 |
+
target_transform: Optional[Callable] = None,
|
| 41 |
+
download: bool = False,
|
| 42 |
+
) -> None:
|
| 43 |
+
super().__init__(root, transform=transform, target_transform=target_transform)
|
| 44 |
+
self._split = verify_str_arg(split, "split", ("train", "test"))
|
| 45 |
+
self._base_folder = Path(self.root) / "food-101"
|
| 46 |
+
self._meta_folder = self._base_folder / "meta"
|
| 47 |
+
self._images_folder = self._base_folder / "images"
|
| 48 |
+
|
| 49 |
+
if download:
|
| 50 |
+
self._download()
|
| 51 |
+
|
| 52 |
+
if not self._check_exists():
|
| 53 |
+
raise RuntimeError("Dataset not found. You can use download=True to download it")
|
| 54 |
+
|
| 55 |
+
self._labels = []
|
| 56 |
+
self._image_files = []
|
| 57 |
+
with open(self._meta_folder / f"{split}.json") as f:
|
| 58 |
+
metadata = json.loads(f.read())
|
| 59 |
+
|
| 60 |
+
self.classes = sorted(metadata.keys())
|
| 61 |
+
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
|
| 62 |
+
|
| 63 |
+
for class_label, im_rel_paths in metadata.items():
|
| 64 |
+
self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths)
|
| 65 |
+
self._image_files += [
|
| 66 |
+
self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
def __len__(self) -> int:
|
| 70 |
+
return len(self._image_files)
|
| 71 |
+
|
| 72 |
+
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
|
| 73 |
+
image_file, label = self._image_files[idx], self._labels[idx]
|
| 74 |
+
image = PIL.Image.open(image_file).convert("RGB")
|
| 75 |
+
|
| 76 |
+
if self.transform:
|
| 77 |
+
image = self.transform(image)
|
| 78 |
+
|
| 79 |
+
if self.target_transform:
|
| 80 |
+
label = self.target_transform(label)
|
| 81 |
+
|
| 82 |
+
return image, label
|
| 83 |
+
|
| 84 |
+
def extra_repr(self) -> str:
|
| 85 |
+
return f"split={self._split}"
|
| 86 |
+
|
| 87 |
+
def _check_exists(self) -> bool:
|
| 88 |
+
return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder))
|
| 89 |
+
|
| 90 |
+
def _download(self) -> None:
|
| 91 |
+
if self._check_exists():
|
| 92 |
+
return
|
| 93 |
+
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/imagenet.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
import tempfile
|
| 4 |
+
from contextlib import contextmanager
|
| 5 |
+
from typing import Any, Dict, Iterator, List, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from .folder import ImageFolder
|
| 10 |
+
from .utils import check_integrity, extract_archive, verify_str_arg
|
| 11 |
+
|
| 12 |
+
ARCHIVE_META = {
|
| 13 |
+
"train": ("ILSVRC2012_img_train.tar", "1d675b47d978889d74fa0da5fadfb00e"),
|
| 14 |
+
"val": ("ILSVRC2012_img_val.tar", "29b22e2961454d5413ddabcf34fc5622"),
|
| 15 |
+
"devkit": ("ILSVRC2012_devkit_t12.tar.gz", "fa75699e90414af021442c21a62c3abf"),
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
META_FILE = "meta.bin"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ImageNet(ImageFolder):
|
| 22 |
+
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
root (string): Root directory of the ImageNet Dataset.
|
| 26 |
+
split (string, optional): The dataset split, supports ``train``, or ``val``.
|
| 27 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 28 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 29 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 30 |
+
target and transforms it.
|
| 31 |
+
loader (callable, optional): A function to load an image given its path.
|
| 32 |
+
|
| 33 |
+
Attributes:
|
| 34 |
+
classes (list): List of the class name tuples.
|
| 35 |
+
class_to_idx (dict): Dict with items (class_name, class_index).
|
| 36 |
+
wnids (list): List of the WordNet IDs.
|
| 37 |
+
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
|
| 38 |
+
imgs (list): List of (image path, class_index) tuples
|
| 39 |
+
targets (list): The class_index value for each image in the dataset
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def __init__(self, root: str, split: str = "train", **kwargs: Any) -> None:
|
| 43 |
+
root = self.root = os.path.expanduser(root)
|
| 44 |
+
self.split = verify_str_arg(split, "split", ("train", "val"))
|
| 45 |
+
|
| 46 |
+
self.parse_archives()
|
| 47 |
+
wnid_to_classes = load_meta_file(self.root)[0]
|
| 48 |
+
|
| 49 |
+
super().__init__(self.split_folder, **kwargs)
|
| 50 |
+
self.root = root
|
| 51 |
+
|
| 52 |
+
self.wnids = self.classes
|
| 53 |
+
self.wnid_to_idx = self.class_to_idx
|
| 54 |
+
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
|
| 55 |
+
self.class_to_idx = {cls: idx for idx, clss in enumerate(self.classes) for cls in clss}
|
| 56 |
+
|
| 57 |
+
def parse_archives(self) -> None:
|
| 58 |
+
if not check_integrity(os.path.join(self.root, META_FILE)):
|
| 59 |
+
parse_devkit_archive(self.root)
|
| 60 |
+
|
| 61 |
+
if not os.path.isdir(self.split_folder):
|
| 62 |
+
if self.split == "train":
|
| 63 |
+
parse_train_archive(self.root)
|
| 64 |
+
elif self.split == "val":
|
| 65 |
+
parse_val_archive(self.root)
|
| 66 |
+
|
| 67 |
+
@property
|
| 68 |
+
def split_folder(self) -> str:
|
| 69 |
+
return os.path.join(self.root, self.split)
|
| 70 |
+
|
| 71 |
+
def extra_repr(self) -> str:
|
| 72 |
+
return "Split: {split}".format(**self.__dict__)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def load_meta_file(root: str, file: Optional[str] = None) -> Tuple[Dict[str, str], List[str]]:
|
| 76 |
+
if file is None:
|
| 77 |
+
file = META_FILE
|
| 78 |
+
file = os.path.join(root, file)
|
| 79 |
+
|
| 80 |
+
if check_integrity(file):
|
| 81 |
+
return torch.load(file)
|
| 82 |
+
else:
|
| 83 |
+
msg = (
|
| 84 |
+
"The meta file {} is not present in the root directory or is corrupted. "
|
| 85 |
+
"This file is automatically created by the ImageNet dataset."
|
| 86 |
+
)
|
| 87 |
+
raise RuntimeError(msg.format(file, root))
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _verify_archive(root: str, file: str, md5: str) -> None:
|
| 91 |
+
if not check_integrity(os.path.join(root, file), md5):
|
| 92 |
+
msg = (
|
| 93 |
+
"The archive {} is not present in the root directory or is corrupted. "
|
| 94 |
+
"You need to download it externally and place it in {}."
|
| 95 |
+
)
|
| 96 |
+
raise RuntimeError(msg.format(file, root))
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def parse_devkit_archive(root: str, file: Optional[str] = None) -> None:
|
| 100 |
+
"""Parse the devkit archive of the ImageNet2012 classification dataset and save
|
| 101 |
+
the meta information in a binary file.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
root (str): Root directory containing the devkit archive
|
| 105 |
+
file (str, optional): Name of devkit archive. Defaults to
|
| 106 |
+
'ILSVRC2012_devkit_t12.tar.gz'
|
| 107 |
+
"""
|
| 108 |
+
import scipy.io as sio
|
| 109 |
+
|
| 110 |
+
def parse_meta_mat(devkit_root: str) -> Tuple[Dict[int, str], Dict[str, Tuple[str, ...]]]:
|
| 111 |
+
metafile = os.path.join(devkit_root, "data", "meta.mat")
|
| 112 |
+
meta = sio.loadmat(metafile, squeeze_me=True)["synsets"]
|
| 113 |
+
nums_children = list(zip(*meta))[4]
|
| 114 |
+
meta = [meta[idx] for idx, num_children in enumerate(nums_children) if num_children == 0]
|
| 115 |
+
idcs, wnids, classes = list(zip(*meta))[:3]
|
| 116 |
+
classes = [tuple(clss.split(", ")) for clss in classes]
|
| 117 |
+
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
|
| 118 |
+
wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
|
| 119 |
+
return idx_to_wnid, wnid_to_classes
|
| 120 |
+
|
| 121 |
+
def parse_val_groundtruth_txt(devkit_root: str) -> List[int]:
|
| 122 |
+
file = os.path.join(devkit_root, "data", "ILSVRC2012_validation_ground_truth.txt")
|
| 123 |
+
with open(file) as txtfh:
|
| 124 |
+
val_idcs = txtfh.readlines()
|
| 125 |
+
return [int(val_idx) for val_idx in val_idcs]
|
| 126 |
+
|
| 127 |
+
@contextmanager
|
| 128 |
+
def get_tmp_dir() -> Iterator[str]:
|
| 129 |
+
tmp_dir = tempfile.mkdtemp()
|
| 130 |
+
try:
|
| 131 |
+
yield tmp_dir
|
| 132 |
+
finally:
|
| 133 |
+
shutil.rmtree(tmp_dir)
|
| 134 |
+
|
| 135 |
+
archive_meta = ARCHIVE_META["devkit"]
|
| 136 |
+
if file is None:
|
| 137 |
+
file = archive_meta[0]
|
| 138 |
+
md5 = archive_meta[1]
|
| 139 |
+
|
| 140 |
+
_verify_archive(root, file, md5)
|
| 141 |
+
|
| 142 |
+
with get_tmp_dir() as tmp_dir:
|
| 143 |
+
extract_archive(os.path.join(root, file), tmp_dir)
|
| 144 |
+
|
| 145 |
+
devkit_root = os.path.join(tmp_dir, "ILSVRC2012_devkit_t12")
|
| 146 |
+
idx_to_wnid, wnid_to_classes = parse_meta_mat(devkit_root)
|
| 147 |
+
val_idcs = parse_val_groundtruth_txt(devkit_root)
|
| 148 |
+
val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
|
| 149 |
+
|
| 150 |
+
torch.save((wnid_to_classes, val_wnids), os.path.join(root, META_FILE))
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def parse_train_archive(root: str, file: Optional[str] = None, folder: str = "train") -> None:
|
| 154 |
+
"""Parse the train images archive of the ImageNet2012 classification dataset and
|
| 155 |
+
prepare it for usage with the ImageNet dataset.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
root (str): Root directory containing the train images archive
|
| 159 |
+
file (str, optional): Name of train images archive. Defaults to
|
| 160 |
+
'ILSVRC2012_img_train.tar'
|
| 161 |
+
folder (str, optional): Optional name for train images folder. Defaults to
|
| 162 |
+
'train'
|
| 163 |
+
"""
|
| 164 |
+
archive_meta = ARCHIVE_META["train"]
|
| 165 |
+
if file is None:
|
| 166 |
+
file = archive_meta[0]
|
| 167 |
+
md5 = archive_meta[1]
|
| 168 |
+
|
| 169 |
+
_verify_archive(root, file, md5)
|
| 170 |
+
|
| 171 |
+
train_root = os.path.join(root, folder)
|
| 172 |
+
extract_archive(os.path.join(root, file), train_root)
|
| 173 |
+
|
| 174 |
+
archives = [os.path.join(train_root, archive) for archive in os.listdir(train_root)]
|
| 175 |
+
for archive in archives:
|
| 176 |
+
extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def parse_val_archive(
|
| 180 |
+
root: str, file: Optional[str] = None, wnids: Optional[List[str]] = None, folder: str = "val"
|
| 181 |
+
) -> None:
|
| 182 |
+
"""Parse the validation images archive of the ImageNet2012 classification dataset
|
| 183 |
+
and prepare it for usage with the ImageNet dataset.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
root (str): Root directory containing the validation images archive
|
| 187 |
+
file (str, optional): Name of validation images archive. Defaults to
|
| 188 |
+
'ILSVRC2012_img_val.tar'
|
| 189 |
+
wnids (list, optional): List of WordNet IDs of the validation images. If None
|
| 190 |
+
is given, the IDs are loaded from the meta file in the root directory
|
| 191 |
+
folder (str, optional): Optional name for validation images folder. Defaults to
|
| 192 |
+
'val'
|
| 193 |
+
"""
|
| 194 |
+
archive_meta = ARCHIVE_META["val"]
|
| 195 |
+
if file is None:
|
| 196 |
+
file = archive_meta[0]
|
| 197 |
+
md5 = archive_meta[1]
|
| 198 |
+
if wnids is None:
|
| 199 |
+
wnids = load_meta_file(root)[1]
|
| 200 |
+
|
| 201 |
+
_verify_archive(root, file, md5)
|
| 202 |
+
|
| 203 |
+
val_root = os.path.join(root, folder)
|
| 204 |
+
extract_archive(os.path.join(root, file), val_root)
|
| 205 |
+
|
| 206 |
+
images = sorted(os.path.join(val_root, image) for image in os.listdir(val_root))
|
| 207 |
+
|
| 208 |
+
for wnid in set(wnids):
|
| 209 |
+
os.mkdir(os.path.join(val_root, wnid))
|
| 210 |
+
|
| 211 |
+
for wnid, img_file in zip(wnids, images):
|
| 212 |
+
shutil.move(img_file, os.path.join(val_root, wnid, os.path.basename(img_file)))
|
wemm/lib/python3.10/site-packages/torchvision/datasets/inaturalist.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import os.path
|
| 3 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
from .utils import download_and_extract_archive, verify_str_arg
|
| 8 |
+
from .vision import VisionDataset
|
| 9 |
+
|
| 10 |
+
CATEGORIES_2021 = ["kingdom", "phylum", "class", "order", "family", "genus"]
|
| 11 |
+
|
| 12 |
+
DATASET_URLS = {
|
| 13 |
+
"2017": "https://ml-inat-competition-datasets.s3.amazonaws.com/2017/train_val_images.tar.gz",
|
| 14 |
+
"2018": "https://ml-inat-competition-datasets.s3.amazonaws.com/2018/train_val2018.tar.gz",
|
| 15 |
+
"2019": "https://ml-inat-competition-datasets.s3.amazonaws.com/2019/train_val2019.tar.gz",
|
| 16 |
+
"2021_train": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train.tar.gz",
|
| 17 |
+
"2021_train_mini": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train_mini.tar.gz",
|
| 18 |
+
"2021_valid": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/val.tar.gz",
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
DATASET_MD5 = {
|
| 22 |
+
"2017": "7c784ea5e424efaec655bd392f87301f",
|
| 23 |
+
"2018": "b1c6952ce38f31868cc50ea72d066cc3",
|
| 24 |
+
"2019": "c60a6e2962c9b8ccbd458d12c8582644",
|
| 25 |
+
"2021_train": "e0526d53c7f7b2e3167b2b43bb2690ed",
|
| 26 |
+
"2021_train_mini": "db6ed8330e634445efc8fec83ae81442",
|
| 27 |
+
"2021_valid": "f6f6e0e242e3d4c9569ba56400938afc",
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class INaturalist(VisionDataset):
|
| 32 |
+
"""`iNaturalist <https://github.com/visipedia/inat_comp>`_ Dataset.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
root (string): Root directory of dataset where the image files are stored.
|
| 36 |
+
This class does not require/use annotation files.
|
| 37 |
+
version (string, optional): Which version of the dataset to download/use. One of
|
| 38 |
+
'2017', '2018', '2019', '2021_train', '2021_train_mini', '2021_valid'.
|
| 39 |
+
Default: `2021_train`.
|
| 40 |
+
target_type (string or list, optional): Type of target to use, for 2021 versions, one of:
|
| 41 |
+
|
| 42 |
+
- ``full``: the full category (species)
|
| 43 |
+
- ``kingdom``: e.g. "Animalia"
|
| 44 |
+
- ``phylum``: e.g. "Arthropoda"
|
| 45 |
+
- ``class``: e.g. "Insecta"
|
| 46 |
+
- ``order``: e.g. "Coleoptera"
|
| 47 |
+
- ``family``: e.g. "Cleridae"
|
| 48 |
+
- ``genus``: e.g. "Trichodes"
|
| 49 |
+
|
| 50 |
+
for 2017-2019 versions, one of:
|
| 51 |
+
|
| 52 |
+
- ``full``: the full (numeric) category
|
| 53 |
+
- ``super``: the super category, e.g. "Amphibians"
|
| 54 |
+
|
| 55 |
+
Can also be a list to output a tuple with all specified target types.
|
| 56 |
+
Defaults to ``full``.
|
| 57 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 58 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 59 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 60 |
+
target and transforms it.
|
| 61 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 62 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 63 |
+
downloaded again.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def __init__(
|
| 67 |
+
self,
|
| 68 |
+
root: str,
|
| 69 |
+
version: str = "2021_train",
|
| 70 |
+
target_type: Union[List[str], str] = "full",
|
| 71 |
+
transform: Optional[Callable] = None,
|
| 72 |
+
target_transform: Optional[Callable] = None,
|
| 73 |
+
download: bool = False,
|
| 74 |
+
) -> None:
|
| 75 |
+
self.version = verify_str_arg(version, "version", DATASET_URLS.keys())
|
| 76 |
+
|
| 77 |
+
super().__init__(os.path.join(root, version), transform=transform, target_transform=target_transform)
|
| 78 |
+
|
| 79 |
+
os.makedirs(root, exist_ok=True)
|
| 80 |
+
if download:
|
| 81 |
+
self.download()
|
| 82 |
+
|
| 83 |
+
if not self._check_integrity():
|
| 84 |
+
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
|
| 85 |
+
|
| 86 |
+
self.all_categories: List[str] = []
|
| 87 |
+
|
| 88 |
+
# map: category type -> name of category -> index
|
| 89 |
+
self.categories_index: Dict[str, Dict[str, int]] = {}
|
| 90 |
+
|
| 91 |
+
# list indexed by category id, containing mapping from category type -> index
|
| 92 |
+
self.categories_map: List[Dict[str, int]] = []
|
| 93 |
+
|
| 94 |
+
if not isinstance(target_type, list):
|
| 95 |
+
target_type = [target_type]
|
| 96 |
+
if self.version[:4] == "2021":
|
| 97 |
+
self.target_type = [verify_str_arg(t, "target_type", ("full", *CATEGORIES_2021)) for t in target_type]
|
| 98 |
+
self._init_2021()
|
| 99 |
+
else:
|
| 100 |
+
self.target_type = [verify_str_arg(t, "target_type", ("full", "super")) for t in target_type]
|
| 101 |
+
self._init_pre2021()
|
| 102 |
+
|
| 103 |
+
# index of all files: (full category id, filename)
|
| 104 |
+
self.index: List[Tuple[int, str]] = []
|
| 105 |
+
|
| 106 |
+
for dir_index, dir_name in enumerate(self.all_categories):
|
| 107 |
+
files = os.listdir(os.path.join(self.root, dir_name))
|
| 108 |
+
for fname in files:
|
| 109 |
+
self.index.append((dir_index, fname))
|
| 110 |
+
|
| 111 |
+
def _init_2021(self) -> None:
|
| 112 |
+
"""Initialize based on 2021 layout"""
|
| 113 |
+
|
| 114 |
+
self.all_categories = sorted(os.listdir(self.root))
|
| 115 |
+
|
| 116 |
+
# map: category type -> name of category -> index
|
| 117 |
+
self.categories_index = {k: {} for k in CATEGORIES_2021}
|
| 118 |
+
|
| 119 |
+
for dir_index, dir_name in enumerate(self.all_categories):
|
| 120 |
+
pieces = dir_name.split("_")
|
| 121 |
+
if len(pieces) != 8:
|
| 122 |
+
raise RuntimeError(f"Unexpected category name {dir_name}, wrong number of pieces")
|
| 123 |
+
if pieces[0] != f"{dir_index:05d}":
|
| 124 |
+
raise RuntimeError(f"Unexpected category id {pieces[0]}, expecting {dir_index:05d}")
|
| 125 |
+
cat_map = {}
|
| 126 |
+
for cat, name in zip(CATEGORIES_2021, pieces[1:7]):
|
| 127 |
+
if name in self.categories_index[cat]:
|
| 128 |
+
cat_id = self.categories_index[cat][name]
|
| 129 |
+
else:
|
| 130 |
+
cat_id = len(self.categories_index[cat])
|
| 131 |
+
self.categories_index[cat][name] = cat_id
|
| 132 |
+
cat_map[cat] = cat_id
|
| 133 |
+
self.categories_map.append(cat_map)
|
| 134 |
+
|
| 135 |
+
def _init_pre2021(self) -> None:
|
| 136 |
+
"""Initialize based on 2017-2019 layout"""
|
| 137 |
+
|
| 138 |
+
# map: category type -> name of category -> index
|
| 139 |
+
self.categories_index = {"super": {}}
|
| 140 |
+
|
| 141 |
+
cat_index = 0
|
| 142 |
+
super_categories = sorted(os.listdir(self.root))
|
| 143 |
+
for sindex, scat in enumerate(super_categories):
|
| 144 |
+
self.categories_index["super"][scat] = sindex
|
| 145 |
+
subcategories = sorted(os.listdir(os.path.join(self.root, scat)))
|
| 146 |
+
for subcat in subcategories:
|
| 147 |
+
if self.version == "2017":
|
| 148 |
+
# this version does not use ids as directory names
|
| 149 |
+
subcat_i = cat_index
|
| 150 |
+
cat_index += 1
|
| 151 |
+
else:
|
| 152 |
+
try:
|
| 153 |
+
subcat_i = int(subcat)
|
| 154 |
+
except ValueError:
|
| 155 |
+
raise RuntimeError(f"Unexpected non-numeric dir name: {subcat}")
|
| 156 |
+
if subcat_i >= len(self.categories_map):
|
| 157 |
+
old_len = len(self.categories_map)
|
| 158 |
+
self.categories_map.extend([{}] * (subcat_i - old_len + 1))
|
| 159 |
+
self.all_categories.extend([""] * (subcat_i - old_len + 1))
|
| 160 |
+
if self.categories_map[subcat_i]:
|
| 161 |
+
raise RuntimeError(f"Duplicate category {subcat}")
|
| 162 |
+
self.categories_map[subcat_i] = {"super": sindex}
|
| 163 |
+
self.all_categories[subcat_i] = os.path.join(scat, subcat)
|
| 164 |
+
|
| 165 |
+
# validate the dictionary
|
| 166 |
+
for cindex, c in enumerate(self.categories_map):
|
| 167 |
+
if not c:
|
| 168 |
+
raise RuntimeError(f"Missing category {cindex}")
|
| 169 |
+
|
| 170 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 171 |
+
"""
|
| 172 |
+
Args:
|
| 173 |
+
index (int): Index
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
tuple: (image, target) where the type of target specified by target_type.
|
| 177 |
+
"""
|
| 178 |
+
|
| 179 |
+
cat_id, fname = self.index[index]
|
| 180 |
+
img = Image.open(os.path.join(self.root, self.all_categories[cat_id], fname))
|
| 181 |
+
|
| 182 |
+
target: Any = []
|
| 183 |
+
for t in self.target_type:
|
| 184 |
+
if t == "full":
|
| 185 |
+
target.append(cat_id)
|
| 186 |
+
else:
|
| 187 |
+
target.append(self.categories_map[cat_id][t])
|
| 188 |
+
target = tuple(target) if len(target) > 1 else target[0]
|
| 189 |
+
|
| 190 |
+
if self.transform is not None:
|
| 191 |
+
img = self.transform(img)
|
| 192 |
+
|
| 193 |
+
if self.target_transform is not None:
|
| 194 |
+
target = self.target_transform(target)
|
| 195 |
+
|
| 196 |
+
return img, target
|
| 197 |
+
|
| 198 |
+
def __len__(self) -> int:
|
| 199 |
+
return len(self.index)
|
| 200 |
+
|
| 201 |
+
def category_name(self, category_type: str, category_id: int) -> str:
|
| 202 |
+
"""
|
| 203 |
+
Args:
|
| 204 |
+
category_type(str): one of "full", "kingdom", "phylum", "class", "order", "family", "genus" or "super"
|
| 205 |
+
category_id(int): an index (class id) from this category
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
the name of the category
|
| 209 |
+
"""
|
| 210 |
+
if category_type == "full":
|
| 211 |
+
return self.all_categories[category_id]
|
| 212 |
+
else:
|
| 213 |
+
if category_type not in self.categories_index:
|
| 214 |
+
raise ValueError(f"Invalid category type '{category_type}'")
|
| 215 |
+
else:
|
| 216 |
+
for name, id in self.categories_index[category_type].items():
|
| 217 |
+
if id == category_id:
|
| 218 |
+
return name
|
| 219 |
+
raise ValueError(f"Invalid category id {category_id} for {category_type}")
|
| 220 |
+
|
| 221 |
+
def _check_integrity(self) -> bool:
|
| 222 |
+
return os.path.exists(self.root) and len(os.listdir(self.root)) > 0
|
| 223 |
+
|
| 224 |
+
def download(self) -> None:
|
| 225 |
+
if self._check_integrity():
|
| 226 |
+
raise RuntimeError(
|
| 227 |
+
f"The directory {self.root} already exists. "
|
| 228 |
+
f"If you want to re-download or re-extract the images, delete the directory."
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
base_root = os.path.dirname(self.root)
|
| 232 |
+
|
| 233 |
+
download_and_extract_archive(
|
| 234 |
+
DATASET_URLS[self.version], base_root, filename=f"{self.version}.tgz", md5=DATASET_MD5[self.version]
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
orig_dir_name = os.path.join(base_root, os.path.basename(DATASET_URLS[self.version]).rstrip(".tar.gz"))
|
| 238 |
+
if not os.path.exists(orig_dir_name):
|
| 239 |
+
raise RuntimeError(f"Unable to find downloaded files at {orig_dir_name}")
|
| 240 |
+
os.rename(orig_dir_name, self.root)
|
| 241 |
+
print(f"Dataset version '{self.version}' has been downloaded and prepared for use")
|
wemm/lib/python3.10/site-packages/torchvision/datasets/lfw.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
|
| 7 |
+
from .vision import VisionDataset
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class _LFW(VisionDataset):
|
| 11 |
+
|
| 12 |
+
base_folder = "lfw-py"
|
| 13 |
+
download_url_prefix = "http://vis-www.cs.umass.edu/lfw/"
|
| 14 |
+
|
| 15 |
+
file_dict = {
|
| 16 |
+
"original": ("lfw", "lfw.tgz", "a17d05bd522c52d84eca14327a23d494"),
|
| 17 |
+
"funneled": ("lfw_funneled", "lfw-funneled.tgz", "1b42dfed7d15c9b2dd63d5e5840c86ad"),
|
| 18 |
+
"deepfunneled": ("lfw-deepfunneled", "lfw-deepfunneled.tgz", "68331da3eb755a505a502b5aacb3c201"),
|
| 19 |
+
}
|
| 20 |
+
checksums = {
|
| 21 |
+
"pairs.txt": "9f1ba174e4e1c508ff7cdf10ac338a7d",
|
| 22 |
+
"pairsDevTest.txt": "5132f7440eb68cf58910c8a45a2ac10b",
|
| 23 |
+
"pairsDevTrain.txt": "4f27cbf15b2da4a85c1907eb4181ad21",
|
| 24 |
+
"people.txt": "450f0863dd89e85e73936a6d71a3474b",
|
| 25 |
+
"peopleDevTest.txt": "e4bf5be0a43b5dcd9dc5ccfcb8fb19c5",
|
| 26 |
+
"peopleDevTrain.txt": "54eaac34beb6d042ed3a7d883e247a21",
|
| 27 |
+
"lfw-names.txt": "a6d0a479bd074669f656265a6e693f6d",
|
| 28 |
+
}
|
| 29 |
+
annot_file = {"10fold": "", "train": "DevTrain", "test": "DevTest"}
|
| 30 |
+
names = "lfw-names.txt"
|
| 31 |
+
|
| 32 |
+
def __init__(
|
| 33 |
+
self,
|
| 34 |
+
root: str,
|
| 35 |
+
split: str,
|
| 36 |
+
image_set: str,
|
| 37 |
+
view: str,
|
| 38 |
+
transform: Optional[Callable] = None,
|
| 39 |
+
target_transform: Optional[Callable] = None,
|
| 40 |
+
download: bool = False,
|
| 41 |
+
) -> None:
|
| 42 |
+
super().__init__(os.path.join(root, self.base_folder), transform=transform, target_transform=target_transform)
|
| 43 |
+
|
| 44 |
+
self.image_set = verify_str_arg(image_set.lower(), "image_set", self.file_dict.keys())
|
| 45 |
+
images_dir, self.filename, self.md5 = self.file_dict[self.image_set]
|
| 46 |
+
|
| 47 |
+
self.view = verify_str_arg(view.lower(), "view", ["people", "pairs"])
|
| 48 |
+
self.split = verify_str_arg(split.lower(), "split", ["10fold", "train", "test"])
|
| 49 |
+
self.labels_file = f"{self.view}{self.annot_file[self.split]}.txt"
|
| 50 |
+
self.data: List[Any] = []
|
| 51 |
+
|
| 52 |
+
if download:
|
| 53 |
+
self.download()
|
| 54 |
+
|
| 55 |
+
if not self._check_integrity():
|
| 56 |
+
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
|
| 57 |
+
|
| 58 |
+
self.images_dir = os.path.join(self.root, images_dir)
|
| 59 |
+
|
| 60 |
+
def _loader(self, path: str) -> Image.Image:
|
| 61 |
+
with open(path, "rb") as f:
|
| 62 |
+
img = Image.open(f)
|
| 63 |
+
return img.convert("RGB")
|
| 64 |
+
|
| 65 |
+
def _check_integrity(self) -> bool:
|
| 66 |
+
st1 = check_integrity(os.path.join(self.root, self.filename), self.md5)
|
| 67 |
+
st2 = check_integrity(os.path.join(self.root, self.labels_file), self.checksums[self.labels_file])
|
| 68 |
+
if not st1 or not st2:
|
| 69 |
+
return False
|
| 70 |
+
if self.view == "people":
|
| 71 |
+
return check_integrity(os.path.join(self.root, self.names), self.checksums[self.names])
|
| 72 |
+
return True
|
| 73 |
+
|
| 74 |
+
def download(self) -> None:
|
| 75 |
+
if self._check_integrity():
|
| 76 |
+
print("Files already downloaded and verified")
|
| 77 |
+
return
|
| 78 |
+
url = f"{self.download_url_prefix}{self.filename}"
|
| 79 |
+
download_and_extract_archive(url, self.root, filename=self.filename, md5=self.md5)
|
| 80 |
+
download_url(f"{self.download_url_prefix}{self.labels_file}", self.root)
|
| 81 |
+
if self.view == "people":
|
| 82 |
+
download_url(f"{self.download_url_prefix}{self.names}", self.root)
|
| 83 |
+
|
| 84 |
+
def _get_path(self, identity: str, no: Union[int, str]) -> str:
|
| 85 |
+
return os.path.join(self.images_dir, identity, f"{identity}_{int(no):04d}.jpg")
|
| 86 |
+
|
| 87 |
+
def extra_repr(self) -> str:
|
| 88 |
+
return f"Alignment: {self.image_set}\nSplit: {self.split}"
|
| 89 |
+
|
| 90 |
+
def __len__(self) -> int:
|
| 91 |
+
return len(self.data)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class LFWPeople(_LFW):
|
| 95 |
+
"""`LFW <http://vis-www.cs.umass.edu/lfw/>`_ Dataset.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
root (string): Root directory of dataset where directory
|
| 99 |
+
``lfw-py`` exists or will be saved to if download is set to True.
|
| 100 |
+
split (string, optional): The image split to use. Can be one of ``train``, ``test``,
|
| 101 |
+
``10fold`` (default).
|
| 102 |
+
image_set (str, optional): Type of image funneling to use, ``original``, ``funneled`` or
|
| 103 |
+
``deepfunneled``. Defaults to ``funneled``.
|
| 104 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 105 |
+
and returns a transformed version. E.g, ``transforms.RandomRotation``
|
| 106 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 107 |
+
target and transforms it.
|
| 108 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 109 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 110 |
+
downloaded again.
|
| 111 |
+
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
def __init__(
|
| 115 |
+
self,
|
| 116 |
+
root: str,
|
| 117 |
+
split: str = "10fold",
|
| 118 |
+
image_set: str = "funneled",
|
| 119 |
+
transform: Optional[Callable] = None,
|
| 120 |
+
target_transform: Optional[Callable] = None,
|
| 121 |
+
download: bool = False,
|
| 122 |
+
) -> None:
|
| 123 |
+
super().__init__(root, split, image_set, "people", transform, target_transform, download)
|
| 124 |
+
|
| 125 |
+
self.class_to_idx = self._get_classes()
|
| 126 |
+
self.data, self.targets = self._get_people()
|
| 127 |
+
|
| 128 |
+
def _get_people(self) -> Tuple[List[str], List[int]]:
|
| 129 |
+
data, targets = [], []
|
| 130 |
+
with open(os.path.join(self.root, self.labels_file)) as f:
|
| 131 |
+
lines = f.readlines()
|
| 132 |
+
n_folds, s = (int(lines[0]), 1) if self.split == "10fold" else (1, 0)
|
| 133 |
+
|
| 134 |
+
for fold in range(n_folds):
|
| 135 |
+
n_lines = int(lines[s])
|
| 136 |
+
people = [line.strip().split("\t") for line in lines[s + 1 : s + n_lines + 1]]
|
| 137 |
+
s += n_lines + 1
|
| 138 |
+
for i, (identity, num_imgs) in enumerate(people):
|
| 139 |
+
for num in range(1, int(num_imgs) + 1):
|
| 140 |
+
img = self._get_path(identity, num)
|
| 141 |
+
data.append(img)
|
| 142 |
+
targets.append(self.class_to_idx[identity])
|
| 143 |
+
|
| 144 |
+
return data, targets
|
| 145 |
+
|
| 146 |
+
def _get_classes(self) -> Dict[str, int]:
|
| 147 |
+
with open(os.path.join(self.root, self.names)) as f:
|
| 148 |
+
lines = f.readlines()
|
| 149 |
+
names = [line.strip().split()[0] for line in lines]
|
| 150 |
+
class_to_idx = {name: i for i, name in enumerate(names)}
|
| 151 |
+
return class_to_idx
|
| 152 |
+
|
| 153 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 154 |
+
"""
|
| 155 |
+
Args:
|
| 156 |
+
index (int): Index
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
tuple: Tuple (image, target) where target is the identity of the person.
|
| 160 |
+
"""
|
| 161 |
+
img = self._loader(self.data[index])
|
| 162 |
+
target = self.targets[index]
|
| 163 |
+
|
| 164 |
+
if self.transform is not None:
|
| 165 |
+
img = self.transform(img)
|
| 166 |
+
|
| 167 |
+
if self.target_transform is not None:
|
| 168 |
+
target = self.target_transform(target)
|
| 169 |
+
|
| 170 |
+
return img, target
|
| 171 |
+
|
| 172 |
+
def extra_repr(self) -> str:
|
| 173 |
+
return super().extra_repr() + f"\nClasses (identities): {len(self.class_to_idx)}"
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class LFWPairs(_LFW):
|
| 177 |
+
"""`LFW <http://vis-www.cs.umass.edu/lfw/>`_ Dataset.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
root (string): Root directory of dataset where directory
|
| 181 |
+
``lfw-py`` exists or will be saved to if download is set to True.
|
| 182 |
+
split (string, optional): The image split to use. Can be one of ``train``, ``test``,
|
| 183 |
+
``10fold``. Defaults to ``10fold``.
|
| 184 |
+
image_set (str, optional): Type of image funneling to use, ``original``, ``funneled`` or
|
| 185 |
+
``deepfunneled``. Defaults to ``funneled``.
|
| 186 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 187 |
+
and returns a transformed version. E.g, ``transforms.RandomRotation``
|
| 188 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 189 |
+
target and transforms it.
|
| 190 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 191 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 192 |
+
downloaded again.
|
| 193 |
+
|
| 194 |
+
"""
|
| 195 |
+
|
| 196 |
+
def __init__(
|
| 197 |
+
self,
|
| 198 |
+
root: str,
|
| 199 |
+
split: str = "10fold",
|
| 200 |
+
image_set: str = "funneled",
|
| 201 |
+
transform: Optional[Callable] = None,
|
| 202 |
+
target_transform: Optional[Callable] = None,
|
| 203 |
+
download: bool = False,
|
| 204 |
+
) -> None:
|
| 205 |
+
super().__init__(root, split, image_set, "pairs", transform, target_transform, download)
|
| 206 |
+
|
| 207 |
+
self.pair_names, self.data, self.targets = self._get_pairs(self.images_dir)
|
| 208 |
+
|
| 209 |
+
def _get_pairs(self, images_dir: str) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]], List[int]]:
|
| 210 |
+
pair_names, data, targets = [], [], []
|
| 211 |
+
with open(os.path.join(self.root, self.labels_file)) as f:
|
| 212 |
+
lines = f.readlines()
|
| 213 |
+
if self.split == "10fold":
|
| 214 |
+
n_folds, n_pairs = lines[0].split("\t")
|
| 215 |
+
n_folds, n_pairs = int(n_folds), int(n_pairs)
|
| 216 |
+
else:
|
| 217 |
+
n_folds, n_pairs = 1, int(lines[0])
|
| 218 |
+
s = 1
|
| 219 |
+
|
| 220 |
+
for fold in range(n_folds):
|
| 221 |
+
matched_pairs = [line.strip().split("\t") for line in lines[s : s + n_pairs]]
|
| 222 |
+
unmatched_pairs = [line.strip().split("\t") for line in lines[s + n_pairs : s + (2 * n_pairs)]]
|
| 223 |
+
s += 2 * n_pairs
|
| 224 |
+
for pair in matched_pairs:
|
| 225 |
+
img1, img2, same = self._get_path(pair[0], pair[1]), self._get_path(pair[0], pair[2]), 1
|
| 226 |
+
pair_names.append((pair[0], pair[0]))
|
| 227 |
+
data.append((img1, img2))
|
| 228 |
+
targets.append(same)
|
| 229 |
+
for pair in unmatched_pairs:
|
| 230 |
+
img1, img2, same = self._get_path(pair[0], pair[1]), self._get_path(pair[2], pair[3]), 0
|
| 231 |
+
pair_names.append((pair[0], pair[2]))
|
| 232 |
+
data.append((img1, img2))
|
| 233 |
+
targets.append(same)
|
| 234 |
+
|
| 235 |
+
return pair_names, data, targets
|
| 236 |
+
|
| 237 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any, int]:
|
| 238 |
+
"""
|
| 239 |
+
Args:
|
| 240 |
+
index (int): Index
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
tuple: (image1, image2, target) where target is `0` for different indentities and `1` for same identities.
|
| 244 |
+
"""
|
| 245 |
+
img1, img2 = self.data[index]
|
| 246 |
+
img1, img2 = self._loader(img1), self._loader(img2)
|
| 247 |
+
target = self.targets[index]
|
| 248 |
+
|
| 249 |
+
if self.transform is not None:
|
| 250 |
+
img1, img2 = self.transform(img1), self.transform(img2)
|
| 251 |
+
|
| 252 |
+
if self.target_transform is not None:
|
| 253 |
+
target = self.target_transform(target)
|
| 254 |
+
|
| 255 |
+
return img1, img2, target
|
wemm/lib/python3.10/site-packages/torchvision/datasets/oxford_iiit_pet.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import os.path
|
| 3 |
+
import pathlib
|
| 4 |
+
from typing import Any, Callable, Optional, Sequence, Tuple, Union
|
| 5 |
+
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
from .utils import download_and_extract_archive, verify_str_arg
|
| 9 |
+
from .vision import VisionDataset
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class OxfordIIITPet(VisionDataset):
|
| 13 |
+
"""`Oxford-IIIT Pet Dataset <https://www.robots.ox.ac.uk/~vgg/data/pets/>`_.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
root (string): Root directory of the dataset.
|
| 17 |
+
split (string, optional): The dataset split, supports ``"trainval"`` (default) or ``"test"``.
|
| 18 |
+
target_types (string, sequence of strings, optional): Types of target to use. Can be ``category`` (default) or
|
| 19 |
+
``segmentation``. Can also be a list to output a tuple with all specified target types. The types represent:
|
| 20 |
+
|
| 21 |
+
- ``category`` (int): Label for one of the 37 pet categories.
|
| 22 |
+
- ``segmentation`` (PIL image): Segmentation trimap of the image.
|
| 23 |
+
|
| 24 |
+
If empty, ``None`` will be returned as target.
|
| 25 |
+
|
| 26 |
+
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
|
| 27 |
+
version. E.g, ``transforms.RandomCrop``.
|
| 28 |
+
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
|
| 29 |
+
download (bool, optional): If True, downloads the dataset from the internet and puts it into
|
| 30 |
+
``root/oxford-iiit-pet``. If dataset is already downloaded, it is not downloaded again.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
_RESOURCES = (
|
| 34 |
+
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz", "5c4f3ee8e5d25df40f4fd59a7f44e54c"),
|
| 35 |
+
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz", "95a8c909bbe2e81eed6a22bccdf3f68f"),
|
| 36 |
+
)
|
| 37 |
+
_VALID_TARGET_TYPES = ("category", "segmentation")
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
root: str,
|
| 42 |
+
split: str = "trainval",
|
| 43 |
+
target_types: Union[Sequence[str], str] = "category",
|
| 44 |
+
transforms: Optional[Callable] = None,
|
| 45 |
+
transform: Optional[Callable] = None,
|
| 46 |
+
target_transform: Optional[Callable] = None,
|
| 47 |
+
download: bool = False,
|
| 48 |
+
):
|
| 49 |
+
self._split = verify_str_arg(split, "split", ("trainval", "test"))
|
| 50 |
+
if isinstance(target_types, str):
|
| 51 |
+
target_types = [target_types]
|
| 52 |
+
self._target_types = [
|
| 53 |
+
verify_str_arg(target_type, "target_types", self._VALID_TARGET_TYPES) for target_type in target_types
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
super().__init__(root, transforms=transforms, transform=transform, target_transform=target_transform)
|
| 57 |
+
self._base_folder = pathlib.Path(self.root) / "oxford-iiit-pet"
|
| 58 |
+
self._images_folder = self._base_folder / "images"
|
| 59 |
+
self._anns_folder = self._base_folder / "annotations"
|
| 60 |
+
self._segs_folder = self._anns_folder / "trimaps"
|
| 61 |
+
|
| 62 |
+
if download:
|
| 63 |
+
self._download()
|
| 64 |
+
|
| 65 |
+
if not self._check_exists():
|
| 66 |
+
raise RuntimeError("Dataset not found. You can use download=True to download it")
|
| 67 |
+
|
| 68 |
+
image_ids = []
|
| 69 |
+
self._labels = []
|
| 70 |
+
with open(self._anns_folder / f"{self._split}.txt") as file:
|
| 71 |
+
for line in file:
|
| 72 |
+
image_id, label, *_ = line.strip().split()
|
| 73 |
+
image_ids.append(image_id)
|
| 74 |
+
self._labels.append(int(label) - 1)
|
| 75 |
+
|
| 76 |
+
self.classes = [
|
| 77 |
+
" ".join(part.title() for part in raw_cls.split("_"))
|
| 78 |
+
for raw_cls, _ in sorted(
|
| 79 |
+
{(image_id.rsplit("_", 1)[0], label) for image_id, label in zip(image_ids, self._labels)},
|
| 80 |
+
key=lambda image_id_and_label: image_id_and_label[1],
|
| 81 |
+
)
|
| 82 |
+
]
|
| 83 |
+
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
|
| 84 |
+
|
| 85 |
+
self._images = [self._images_folder / f"{image_id}.jpg" for image_id in image_ids]
|
| 86 |
+
self._segs = [self._segs_folder / f"{image_id}.png" for image_id in image_ids]
|
| 87 |
+
|
| 88 |
+
def __len__(self) -> int:
|
| 89 |
+
return len(self._images)
|
| 90 |
+
|
| 91 |
+
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
|
| 92 |
+
image = Image.open(self._images[idx]).convert("RGB")
|
| 93 |
+
|
| 94 |
+
target: Any = []
|
| 95 |
+
for target_type in self._target_types:
|
| 96 |
+
if target_type == "category":
|
| 97 |
+
target.append(self._labels[idx])
|
| 98 |
+
else: # target_type == "segmentation"
|
| 99 |
+
target.append(Image.open(self._segs[idx]))
|
| 100 |
+
|
| 101 |
+
if not target:
|
| 102 |
+
target = None
|
| 103 |
+
elif len(target) == 1:
|
| 104 |
+
target = target[0]
|
| 105 |
+
else:
|
| 106 |
+
target = tuple(target)
|
| 107 |
+
|
| 108 |
+
if self.transforms:
|
| 109 |
+
image, target = self.transforms(image, target)
|
| 110 |
+
|
| 111 |
+
return image, target
|
| 112 |
+
|
| 113 |
+
def _check_exists(self) -> bool:
|
| 114 |
+
for folder in (self._images_folder, self._anns_folder):
|
| 115 |
+
if not (os.path.exists(folder) and os.path.isdir(folder)):
|
| 116 |
+
return False
|
| 117 |
+
else:
|
| 118 |
+
return True
|
| 119 |
+
|
| 120 |
+
def _download(self) -> None:
|
| 121 |
+
if self._check_exists():
|
| 122 |
+
return
|
| 123 |
+
|
| 124 |
+
for url, md5 in self._RESOURCES:
|
| 125 |
+
download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/phototour.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Any, Callable, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
from .utils import download_url
|
| 9 |
+
from .vision import VisionDataset
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PhotoTour(VisionDataset):
|
| 13 |
+
"""`Multi-view Stereo Correspondence <http://matthewalunbrown.com/patchdata/patchdata.html>`_ Dataset.
|
| 14 |
+
|
| 15 |
+
.. note::
|
| 16 |
+
|
| 17 |
+
We only provide the newer version of the dataset, since the authors state that it
|
| 18 |
+
|
| 19 |
+
is more suitable for training descriptors based on difference of Gaussian, or Harris corners, as the
|
| 20 |
+
patches are centred on real interest point detections, rather than being projections of 3D points as is the
|
| 21 |
+
case in the old dataset.
|
| 22 |
+
|
| 23 |
+
The original dataset is available under http://phototour.cs.washington.edu/patches/default.htm.
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
root (string): Root directory where images are.
|
| 28 |
+
name (string): Name of the dataset to load.
|
| 29 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 30 |
+
and returns a transformed version.
|
| 31 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 32 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 33 |
+
downloaded again.
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
urls = {
|
| 38 |
+
"notredame_harris": [
|
| 39 |
+
"http://matthewalunbrown.com/patchdata/notredame_harris.zip",
|
| 40 |
+
"notredame_harris.zip",
|
| 41 |
+
"69f8c90f78e171349abdf0307afefe4d",
|
| 42 |
+
],
|
| 43 |
+
"yosemite_harris": [
|
| 44 |
+
"http://matthewalunbrown.com/patchdata/yosemite_harris.zip",
|
| 45 |
+
"yosemite_harris.zip",
|
| 46 |
+
"a73253d1c6fbd3ba2613c45065c00d46",
|
| 47 |
+
],
|
| 48 |
+
"liberty_harris": [
|
| 49 |
+
"http://matthewalunbrown.com/patchdata/liberty_harris.zip",
|
| 50 |
+
"liberty_harris.zip",
|
| 51 |
+
"c731fcfb3abb4091110d0ae8c7ba182c",
|
| 52 |
+
],
|
| 53 |
+
"notredame": [
|
| 54 |
+
"http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip",
|
| 55 |
+
"notredame.zip",
|
| 56 |
+
"509eda8535847b8c0a90bbb210c83484",
|
| 57 |
+
],
|
| 58 |
+
"yosemite": ["http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip", "yosemite.zip", "533b2e8eb7ede31be40abc317b2fd4f0"],
|
| 59 |
+
"liberty": ["http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip", "liberty.zip", "fdd9152f138ea5ef2091746689176414"],
|
| 60 |
+
}
|
| 61 |
+
means = {
|
| 62 |
+
"notredame": 0.4854,
|
| 63 |
+
"yosemite": 0.4844,
|
| 64 |
+
"liberty": 0.4437,
|
| 65 |
+
"notredame_harris": 0.4854,
|
| 66 |
+
"yosemite_harris": 0.4844,
|
| 67 |
+
"liberty_harris": 0.4437,
|
| 68 |
+
}
|
| 69 |
+
stds = {
|
| 70 |
+
"notredame": 0.1864,
|
| 71 |
+
"yosemite": 0.1818,
|
| 72 |
+
"liberty": 0.2019,
|
| 73 |
+
"notredame_harris": 0.1864,
|
| 74 |
+
"yosemite_harris": 0.1818,
|
| 75 |
+
"liberty_harris": 0.2019,
|
| 76 |
+
}
|
| 77 |
+
lens = {
|
| 78 |
+
"notredame": 468159,
|
| 79 |
+
"yosemite": 633587,
|
| 80 |
+
"liberty": 450092,
|
| 81 |
+
"liberty_harris": 379587,
|
| 82 |
+
"yosemite_harris": 450912,
|
| 83 |
+
"notredame_harris": 325295,
|
| 84 |
+
}
|
| 85 |
+
image_ext = "bmp"
|
| 86 |
+
info_file = "info.txt"
|
| 87 |
+
matches_files = "m50_100000_100000_0.txt"
|
| 88 |
+
|
| 89 |
+
def __init__(
|
| 90 |
+
self, root: str, name: str, train: bool = True, transform: Optional[Callable] = None, download: bool = False
|
| 91 |
+
) -> None:
|
| 92 |
+
super().__init__(root, transform=transform)
|
| 93 |
+
self.name = name
|
| 94 |
+
self.data_dir = os.path.join(self.root, name)
|
| 95 |
+
self.data_down = os.path.join(self.root, f"{name}.zip")
|
| 96 |
+
self.data_file = os.path.join(self.root, f"{name}.pt")
|
| 97 |
+
|
| 98 |
+
self.train = train
|
| 99 |
+
self.mean = self.means[name]
|
| 100 |
+
self.std = self.stds[name]
|
| 101 |
+
|
| 102 |
+
if download:
|
| 103 |
+
self.download()
|
| 104 |
+
|
| 105 |
+
if not self._check_datafile_exists():
|
| 106 |
+
self.cache()
|
| 107 |
+
|
| 108 |
+
# load the serialized data
|
| 109 |
+
self.data, self.labels, self.matches = torch.load(self.data_file)
|
| 110 |
+
|
| 111 |
+
def __getitem__(self, index: int) -> Union[torch.Tensor, Tuple[Any, Any, torch.Tensor]]:
|
| 112 |
+
"""
|
| 113 |
+
Args:
|
| 114 |
+
index (int): Index
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
tuple: (data1, data2, matches)
|
| 118 |
+
"""
|
| 119 |
+
if self.train:
|
| 120 |
+
data = self.data[index]
|
| 121 |
+
if self.transform is not None:
|
| 122 |
+
data = self.transform(data)
|
| 123 |
+
return data
|
| 124 |
+
m = self.matches[index]
|
| 125 |
+
data1, data2 = self.data[m[0]], self.data[m[1]]
|
| 126 |
+
if self.transform is not None:
|
| 127 |
+
data1 = self.transform(data1)
|
| 128 |
+
data2 = self.transform(data2)
|
| 129 |
+
return data1, data2, m[2]
|
| 130 |
+
|
| 131 |
+
def __len__(self) -> int:
|
| 132 |
+
return len(self.data if self.train else self.matches)
|
| 133 |
+
|
| 134 |
+
def _check_datafile_exists(self) -> bool:
|
| 135 |
+
return os.path.exists(self.data_file)
|
| 136 |
+
|
| 137 |
+
def _check_downloaded(self) -> bool:
|
| 138 |
+
return os.path.exists(self.data_dir)
|
| 139 |
+
|
| 140 |
+
def download(self) -> None:
|
| 141 |
+
if self._check_datafile_exists():
|
| 142 |
+
print(f"# Found cached data {self.data_file}")
|
| 143 |
+
return
|
| 144 |
+
|
| 145 |
+
if not self._check_downloaded():
|
| 146 |
+
# download files
|
| 147 |
+
url = self.urls[self.name][0]
|
| 148 |
+
filename = self.urls[self.name][1]
|
| 149 |
+
md5 = self.urls[self.name][2]
|
| 150 |
+
fpath = os.path.join(self.root, filename)
|
| 151 |
+
|
| 152 |
+
download_url(url, self.root, filename, md5)
|
| 153 |
+
|
| 154 |
+
print(f"# Extracting data {self.data_down}\n")
|
| 155 |
+
|
| 156 |
+
import zipfile
|
| 157 |
+
|
| 158 |
+
with zipfile.ZipFile(fpath, "r") as z:
|
| 159 |
+
z.extractall(self.data_dir)
|
| 160 |
+
|
| 161 |
+
os.unlink(fpath)
|
| 162 |
+
|
| 163 |
+
def cache(self) -> None:
|
| 164 |
+
# process and save as torch files
|
| 165 |
+
print(f"# Caching data {self.data_file}")
|
| 166 |
+
|
| 167 |
+
dataset = (
|
| 168 |
+
read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),
|
| 169 |
+
read_info_file(self.data_dir, self.info_file),
|
| 170 |
+
read_matches_files(self.data_dir, self.matches_files),
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
with open(self.data_file, "wb") as f:
|
| 174 |
+
torch.save(dataset, f)
|
| 175 |
+
|
| 176 |
+
def extra_repr(self) -> str:
|
| 177 |
+
split = "Train" if self.train is True else "Test"
|
| 178 |
+
return f"Split: {split}"
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor:
|
| 182 |
+
"""Return a Tensor containing the patches"""
|
| 183 |
+
|
| 184 |
+
def PIL2array(_img: Image.Image) -> np.ndarray:
|
| 185 |
+
"""Convert PIL image type to numpy 2D array"""
|
| 186 |
+
return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
|
| 187 |
+
|
| 188 |
+
def find_files(_data_dir: str, _image_ext: str) -> List[str]:
|
| 189 |
+
"""Return a list with the file names of the images containing the patches"""
|
| 190 |
+
files = []
|
| 191 |
+
# find those files with the specified extension
|
| 192 |
+
for file_dir in os.listdir(_data_dir):
|
| 193 |
+
if file_dir.endswith(_image_ext):
|
| 194 |
+
files.append(os.path.join(_data_dir, file_dir))
|
| 195 |
+
return sorted(files) # sort files in ascend order to keep relations
|
| 196 |
+
|
| 197 |
+
patches = []
|
| 198 |
+
list_files = find_files(data_dir, image_ext)
|
| 199 |
+
|
| 200 |
+
for fpath in list_files:
|
| 201 |
+
img = Image.open(fpath)
|
| 202 |
+
for y in range(0, img.height, 64):
|
| 203 |
+
for x in range(0, img.width, 64):
|
| 204 |
+
patch = img.crop((x, y, x + 64, y + 64))
|
| 205 |
+
patches.append(PIL2array(patch))
|
| 206 |
+
return torch.ByteTensor(np.array(patches[:n]))
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def read_info_file(data_dir: str, info_file: str) -> torch.Tensor:
|
| 210 |
+
"""Return a Tensor containing the list of labels
|
| 211 |
+
Read the file and keep only the ID of the 3D point.
|
| 212 |
+
"""
|
| 213 |
+
with open(os.path.join(data_dir, info_file)) as f:
|
| 214 |
+
labels = [int(line.split()[0]) for line in f]
|
| 215 |
+
return torch.LongTensor(labels)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def read_matches_files(data_dir: str, matches_file: str) -> torch.Tensor:
|
| 219 |
+
"""Return a Tensor containing the ground truth matches
|
| 220 |
+
Read the file and keep only 3D point ID.
|
| 221 |
+
Matches are represented with a 1, non matches with a 0.
|
| 222 |
+
"""
|
| 223 |
+
matches = []
|
| 224 |
+
with open(os.path.join(data_dir, matches_file)) as f:
|
| 225 |
+
for line in f:
|
| 226 |
+
line_split = line.split()
|
| 227 |
+
matches.append([int(line_split[0]), int(line_split[3]), int(line_split[1] == line_split[4])])
|
| 228 |
+
return torch.LongTensor(matches)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/samplers/__pycache__/clip_sampler.cpython-310.pyc
ADDED
|
Binary file (6.22 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/datasets/stl10.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path
|
| 2 |
+
from typing import Any, Callable, cast, Optional, Tuple
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
from .utils import check_integrity, download_and_extract_archive, verify_str_arg
|
| 8 |
+
from .vision import VisionDataset
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class STL10(VisionDataset):
|
| 12 |
+
"""`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
root (string): Root directory of dataset where directory
|
| 16 |
+
``stl10_binary`` exists.
|
| 17 |
+
split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
|
| 18 |
+
Accordingly, dataset is selected.
|
| 19 |
+
folds (int, optional): One of {0-9} or None.
|
| 20 |
+
For training, loads one of the 10 pre-defined folds of 1k samples for the
|
| 21 |
+
standard evaluation procedure. If no value is passed, loads the 5k samples.
|
| 22 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 23 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 24 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 25 |
+
target and transforms it.
|
| 26 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 27 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 28 |
+
downloaded again.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
base_folder = "stl10_binary"
|
| 32 |
+
url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"
|
| 33 |
+
filename = "stl10_binary.tar.gz"
|
| 34 |
+
tgz_md5 = "91f7769df0f17e558f3565bffb0c7dfb"
|
| 35 |
+
class_names_file = "class_names.txt"
|
| 36 |
+
folds_list_file = "fold_indices.txt"
|
| 37 |
+
train_list = [
|
| 38 |
+
["train_X.bin", "918c2871b30a85fa023e0c44e0bee87f"],
|
| 39 |
+
["train_y.bin", "5a34089d4802c674881badbb80307741"],
|
| 40 |
+
["unlabeled_X.bin", "5242ba1fed5e4be9e1e742405eb56ca4"],
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
test_list = [["test_X.bin", "7f263ba9f9e0b06b93213547f721ac82"], ["test_y.bin", "36f9794fa4beb8a2c72628de14fa638e"]]
|
| 44 |
+
splits = ("train", "train+unlabeled", "unlabeled", "test")
|
| 45 |
+
|
| 46 |
+
def __init__(
|
| 47 |
+
self,
|
| 48 |
+
root: str,
|
| 49 |
+
split: str = "train",
|
| 50 |
+
folds: Optional[int] = None,
|
| 51 |
+
transform: Optional[Callable] = None,
|
| 52 |
+
target_transform: Optional[Callable] = None,
|
| 53 |
+
download: bool = False,
|
| 54 |
+
) -> None:
|
| 55 |
+
super().__init__(root, transform=transform, target_transform=target_transform)
|
| 56 |
+
self.split = verify_str_arg(split, "split", self.splits)
|
| 57 |
+
self.folds = self._verify_folds(folds)
|
| 58 |
+
|
| 59 |
+
if download:
|
| 60 |
+
self.download()
|
| 61 |
+
elif not self._check_integrity():
|
| 62 |
+
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
|
| 63 |
+
|
| 64 |
+
# now load the picked numpy arrays
|
| 65 |
+
self.labels: Optional[np.ndarray]
|
| 66 |
+
if self.split == "train":
|
| 67 |
+
self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
|
| 68 |
+
self.labels = cast(np.ndarray, self.labels)
|
| 69 |
+
self.__load_folds(folds)
|
| 70 |
+
|
| 71 |
+
elif self.split == "train+unlabeled":
|
| 72 |
+
self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
|
| 73 |
+
self.labels = cast(np.ndarray, self.labels)
|
| 74 |
+
self.__load_folds(folds)
|
| 75 |
+
unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
|
| 76 |
+
self.data = np.concatenate((self.data, unlabeled_data))
|
| 77 |
+
self.labels = np.concatenate((self.labels, np.asarray([-1] * unlabeled_data.shape[0])))
|
| 78 |
+
|
| 79 |
+
elif self.split == "unlabeled":
|
| 80 |
+
self.data, _ = self.__loadfile(self.train_list[2][0])
|
| 81 |
+
self.labels = np.asarray([-1] * self.data.shape[0])
|
| 82 |
+
else: # self.split == 'test':
|
| 83 |
+
self.data, self.labels = self.__loadfile(self.test_list[0][0], self.test_list[1][0])
|
| 84 |
+
|
| 85 |
+
class_file = os.path.join(self.root, self.base_folder, self.class_names_file)
|
| 86 |
+
if os.path.isfile(class_file):
|
| 87 |
+
with open(class_file) as f:
|
| 88 |
+
self.classes = f.read().splitlines()
|
| 89 |
+
|
| 90 |
+
def _verify_folds(self, folds: Optional[int]) -> Optional[int]:
|
| 91 |
+
if folds is None:
|
| 92 |
+
return folds
|
| 93 |
+
elif isinstance(folds, int):
|
| 94 |
+
if folds in range(10):
|
| 95 |
+
return folds
|
| 96 |
+
msg = "Value for argument folds should be in the range [0, 10), but got {}."
|
| 97 |
+
raise ValueError(msg.format(folds))
|
| 98 |
+
else:
|
| 99 |
+
msg = "Expected type None or int for argument folds, but got type {}."
|
| 100 |
+
raise ValueError(msg.format(type(folds)))
|
| 101 |
+
|
| 102 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 103 |
+
"""
|
| 104 |
+
Args:
|
| 105 |
+
index (int): Index
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
tuple: (image, target) where target is index of the target class.
|
| 109 |
+
"""
|
| 110 |
+
target: Optional[int]
|
| 111 |
+
if self.labels is not None:
|
| 112 |
+
img, target = self.data[index], int(self.labels[index])
|
| 113 |
+
else:
|
| 114 |
+
img, target = self.data[index], None
|
| 115 |
+
|
| 116 |
+
# doing this so that it is consistent with all other datasets
|
| 117 |
+
# to return a PIL Image
|
| 118 |
+
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
|
| 119 |
+
|
| 120 |
+
if self.transform is not None:
|
| 121 |
+
img = self.transform(img)
|
| 122 |
+
|
| 123 |
+
if self.target_transform is not None:
|
| 124 |
+
target = self.target_transform(target)
|
| 125 |
+
|
| 126 |
+
return img, target
|
| 127 |
+
|
| 128 |
+
def __len__(self) -> int:
|
| 129 |
+
return self.data.shape[0]
|
| 130 |
+
|
| 131 |
+
def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
|
| 132 |
+
labels = None
|
| 133 |
+
if labels_file:
|
| 134 |
+
path_to_labels = os.path.join(self.root, self.base_folder, labels_file)
|
| 135 |
+
with open(path_to_labels, "rb") as f:
|
| 136 |
+
labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based
|
| 137 |
+
|
| 138 |
+
path_to_data = os.path.join(self.root, self.base_folder, data_file)
|
| 139 |
+
with open(path_to_data, "rb") as f:
|
| 140 |
+
# read whole file in uint8 chunks
|
| 141 |
+
everything = np.fromfile(f, dtype=np.uint8)
|
| 142 |
+
images = np.reshape(everything, (-1, 3, 96, 96))
|
| 143 |
+
images = np.transpose(images, (0, 1, 3, 2))
|
| 144 |
+
|
| 145 |
+
return images, labels
|
| 146 |
+
|
| 147 |
+
def _check_integrity(self) -> bool:
|
| 148 |
+
for filename, md5 in self.train_list + self.test_list:
|
| 149 |
+
fpath = os.path.join(self.root, self.base_folder, filename)
|
| 150 |
+
if not check_integrity(fpath, md5):
|
| 151 |
+
return False
|
| 152 |
+
return True
|
| 153 |
+
|
| 154 |
+
def download(self) -> None:
|
| 155 |
+
if self._check_integrity():
|
| 156 |
+
print("Files already downloaded and verified")
|
| 157 |
+
return
|
| 158 |
+
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
|
| 159 |
+
self._check_integrity()
|
| 160 |
+
|
| 161 |
+
def extra_repr(self) -> str:
|
| 162 |
+
return "Split: {split}".format(**self.__dict__)
|
| 163 |
+
|
| 164 |
+
def __load_folds(self, folds: Optional[int]) -> None:
|
| 165 |
+
# loads one of the folds if specified
|
| 166 |
+
if folds is None:
|
| 167 |
+
return
|
| 168 |
+
path_to_folds = os.path.join(self.root, self.base_folder, self.folds_list_file)
|
| 169 |
+
with open(path_to_folds) as f:
|
| 170 |
+
str_idx = f.read().splitlines()[folds]
|
| 171 |
+
list_idx = np.fromstring(str_idx, dtype=np.int64, sep=" ")
|
| 172 |
+
self.data = self.data[list_idx, :, :, :]
|
| 173 |
+
if self.labels is not None:
|
| 174 |
+
self.labels = self.labels[list_idx]
|
wemm/lib/python3.10/site-packages/torchvision/datasets/sun397.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import Any, Callable, Optional, Tuple
|
| 3 |
+
|
| 4 |
+
import PIL.Image
|
| 5 |
+
|
| 6 |
+
from .utils import download_and_extract_archive
|
| 7 |
+
from .vision import VisionDataset
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class SUN397(VisionDataset):
|
| 11 |
+
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
|
| 12 |
+
|
| 13 |
+
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
|
| 14 |
+
397 categories with 108'754 images.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
root (string): Root directory of the dataset.
|
| 18 |
+
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
|
| 19 |
+
version. E.g, ``transforms.RandomCrop``.
|
| 20 |
+
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
|
| 21 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 22 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 23 |
+
downloaded again.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
|
| 27 |
+
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
root: str,
|
| 32 |
+
transform: Optional[Callable] = None,
|
| 33 |
+
target_transform: Optional[Callable] = None,
|
| 34 |
+
download: bool = False,
|
| 35 |
+
) -> None:
|
| 36 |
+
super().__init__(root, transform=transform, target_transform=target_transform)
|
| 37 |
+
self._data_dir = Path(self.root) / "SUN397"
|
| 38 |
+
|
| 39 |
+
if download:
|
| 40 |
+
self._download()
|
| 41 |
+
|
| 42 |
+
if not self._check_exists():
|
| 43 |
+
raise RuntimeError("Dataset not found. You can use download=True to download it")
|
| 44 |
+
|
| 45 |
+
with open(self._data_dir / "ClassName.txt") as f:
|
| 46 |
+
self.classes = [c[3:].strip() for c in f]
|
| 47 |
+
|
| 48 |
+
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
|
| 49 |
+
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
|
| 50 |
+
|
| 51 |
+
self._labels = [
|
| 52 |
+
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
|
| 53 |
+
]
|
| 54 |
+
|
| 55 |
+
def __len__(self) -> int:
|
| 56 |
+
return len(self._image_files)
|
| 57 |
+
|
| 58 |
+
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
|
| 59 |
+
image_file, label = self._image_files[idx], self._labels[idx]
|
| 60 |
+
image = PIL.Image.open(image_file).convert("RGB")
|
| 61 |
+
|
| 62 |
+
if self.transform:
|
| 63 |
+
image = self.transform(image)
|
| 64 |
+
|
| 65 |
+
if self.target_transform:
|
| 66 |
+
label = self.target_transform(label)
|
| 67 |
+
|
| 68 |
+
return image, label
|
| 69 |
+
|
| 70 |
+
def _check_exists(self) -> bool:
|
| 71 |
+
return self._data_dir.is_dir()
|
| 72 |
+
|
| 73 |
+
def _download(self) -> None:
|
| 74 |
+
if self._check_exists():
|
| 75 |
+
return
|
| 76 |
+
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/ucf101.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
| 3 |
+
|
| 4 |
+
from torch import Tensor
|
| 5 |
+
|
| 6 |
+
from .folder import find_classes, make_dataset
|
| 7 |
+
from .video_utils import VideoClips
|
| 8 |
+
from .vision import VisionDataset
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class UCF101(VisionDataset):
|
| 12 |
+
"""
|
| 13 |
+
`UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.
|
| 14 |
+
|
| 15 |
+
UCF101 is an action recognition video dataset.
|
| 16 |
+
This dataset consider every video as a collection of video clips of fixed size, specified
|
| 17 |
+
by ``frames_per_clip``, where the step in frames between each clip is given by
|
| 18 |
+
``step_between_clips``. The dataset itself can be downloaded from the dataset website;
|
| 19 |
+
annotations that ``annotation_path`` should be pointing to can be downloaded from `here
|
| 20 |
+
<https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip>`_.
|
| 21 |
+
|
| 22 |
+
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
|
| 23 |
+
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
|
| 24 |
+
elements will come from video 1, and the next three elements from video 2.
|
| 25 |
+
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
|
| 26 |
+
frames in a video might be present.
|
| 27 |
+
|
| 28 |
+
Internally, it uses a VideoClips object to handle clip creation.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
root (string): Root directory of the UCF101 Dataset.
|
| 32 |
+
annotation_path (str): path to the folder containing the split files;
|
| 33 |
+
see docstring above for download instructions of these files
|
| 34 |
+
frames_per_clip (int): number of frames in a clip.
|
| 35 |
+
step_between_clips (int, optional): number of frames between each clip.
|
| 36 |
+
fold (int, optional): which fold to use. Should be between 1 and 3.
|
| 37 |
+
train (bool, optional): if ``True``, creates a dataset from the train split,
|
| 38 |
+
otherwise from the ``test`` split.
|
| 39 |
+
transform (callable, optional): A function/transform that takes in a TxHxWxC video
|
| 40 |
+
and returns a transformed version.
|
| 41 |
+
output_format (str, optional): The format of the output video tensors (before transforms).
|
| 42 |
+
Can be either "THWC" (default) or "TCHW".
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
tuple: A 3-tuple with the following entries:
|
| 46 |
+
|
| 47 |
+
- video (Tensor[T, H, W, C] or Tensor[T, C, H, W]): The `T` video frames
|
| 48 |
+
- audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
|
| 49 |
+
and `L` is the number of points
|
| 50 |
+
- label (int): class of the video clip
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(
|
| 54 |
+
self,
|
| 55 |
+
root: str,
|
| 56 |
+
annotation_path: str,
|
| 57 |
+
frames_per_clip: int,
|
| 58 |
+
step_between_clips: int = 1,
|
| 59 |
+
frame_rate: Optional[int] = None,
|
| 60 |
+
fold: int = 1,
|
| 61 |
+
train: bool = True,
|
| 62 |
+
transform: Optional[Callable] = None,
|
| 63 |
+
_precomputed_metadata: Optional[Dict[str, Any]] = None,
|
| 64 |
+
num_workers: int = 1,
|
| 65 |
+
_video_width: int = 0,
|
| 66 |
+
_video_height: int = 0,
|
| 67 |
+
_video_min_dimension: int = 0,
|
| 68 |
+
_audio_samples: int = 0,
|
| 69 |
+
output_format: str = "THWC",
|
| 70 |
+
) -> None:
|
| 71 |
+
super().__init__(root)
|
| 72 |
+
if not 1 <= fold <= 3:
|
| 73 |
+
raise ValueError(f"fold should be between 1 and 3, got {fold}")
|
| 74 |
+
|
| 75 |
+
extensions = ("avi",)
|
| 76 |
+
self.fold = fold
|
| 77 |
+
self.train = train
|
| 78 |
+
|
| 79 |
+
self.classes, class_to_idx = find_classes(self.root)
|
| 80 |
+
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
|
| 81 |
+
video_list = [x[0] for x in self.samples]
|
| 82 |
+
video_clips = VideoClips(
|
| 83 |
+
video_list,
|
| 84 |
+
frames_per_clip,
|
| 85 |
+
step_between_clips,
|
| 86 |
+
frame_rate,
|
| 87 |
+
_precomputed_metadata,
|
| 88 |
+
num_workers=num_workers,
|
| 89 |
+
_video_width=_video_width,
|
| 90 |
+
_video_height=_video_height,
|
| 91 |
+
_video_min_dimension=_video_min_dimension,
|
| 92 |
+
_audio_samples=_audio_samples,
|
| 93 |
+
output_format=output_format,
|
| 94 |
+
)
|
| 95 |
+
# we bookkeep the full version of video clips because we want to be able
|
| 96 |
+
# to return the metadata of full version rather than the subset version of
|
| 97 |
+
# video clips
|
| 98 |
+
self.full_video_clips = video_clips
|
| 99 |
+
self.indices = self._select_fold(video_list, annotation_path, fold, train)
|
| 100 |
+
self.video_clips = video_clips.subset(self.indices)
|
| 101 |
+
self.transform = transform
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def metadata(self) -> Dict[str, Any]:
|
| 105 |
+
return self.full_video_clips.metadata
|
| 106 |
+
|
| 107 |
+
def _select_fold(self, video_list: List[str], annotation_path: str, fold: int, train: bool) -> List[int]:
|
| 108 |
+
name = "train" if train else "test"
|
| 109 |
+
name = f"{name}list{fold:02d}.txt"
|
| 110 |
+
f = os.path.join(annotation_path, name)
|
| 111 |
+
selected_files = set()
|
| 112 |
+
with open(f) as fid:
|
| 113 |
+
data = fid.readlines()
|
| 114 |
+
data = [x.strip().split(" ")[0] for x in data]
|
| 115 |
+
data = [os.path.join(self.root, *x.split("/")) for x in data]
|
| 116 |
+
selected_files.update(data)
|
| 117 |
+
indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]
|
| 118 |
+
return indices
|
| 119 |
+
|
| 120 |
+
def __len__(self) -> int:
|
| 121 |
+
return self.video_clips.num_clips()
|
| 122 |
+
|
| 123 |
+
def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
|
| 124 |
+
video, audio, info, video_idx = self.video_clips.get_clip(idx)
|
| 125 |
+
label = self.samples[self.indices[video_idx]][1]
|
| 126 |
+
|
| 127 |
+
if self.transform is not None:
|
| 128 |
+
video = self.transform(video)
|
| 129 |
+
|
| 130 |
+
return video, audio, label
|
wemm/lib/python3.10/site-packages/torchvision/datasets/usps.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Any, Callable, Optional, Tuple
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
from .utils import download_url
|
| 8 |
+
from .vision import VisionDataset
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class USPS(VisionDataset):
|
| 12 |
+
"""`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset.
|
| 13 |
+
The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``.
|
| 14 |
+
The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]``
|
| 15 |
+
and make pixel values in ``[0, 255]``.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
root (string): Root directory of dataset to store``USPS`` data files.
|
| 19 |
+
train (bool, optional): If True, creates dataset from ``usps.bz2``,
|
| 20 |
+
otherwise from ``usps.t.bz2``.
|
| 21 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 22 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 23 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 24 |
+
target and transforms it.
|
| 25 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 26 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 27 |
+
downloaded again.
|
| 28 |
+
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
split_list = {
|
| 32 |
+
"train": [
|
| 33 |
+
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2",
|
| 34 |
+
"usps.bz2",
|
| 35 |
+
"ec16c51db3855ca6c91edd34d0e9b197",
|
| 36 |
+
],
|
| 37 |
+
"test": [
|
| 38 |
+
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2",
|
| 39 |
+
"usps.t.bz2",
|
| 40 |
+
"8ea070ee2aca1ac39742fdd1ef5ed118",
|
| 41 |
+
],
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
root: str,
|
| 47 |
+
train: bool = True,
|
| 48 |
+
transform: Optional[Callable] = None,
|
| 49 |
+
target_transform: Optional[Callable] = None,
|
| 50 |
+
download: bool = False,
|
| 51 |
+
) -> None:
|
| 52 |
+
super().__init__(root, transform=transform, target_transform=target_transform)
|
| 53 |
+
split = "train" if train else "test"
|
| 54 |
+
url, filename, checksum = self.split_list[split]
|
| 55 |
+
full_path = os.path.join(self.root, filename)
|
| 56 |
+
|
| 57 |
+
if download and not os.path.exists(full_path):
|
| 58 |
+
download_url(url, self.root, filename, md5=checksum)
|
| 59 |
+
|
| 60 |
+
import bz2
|
| 61 |
+
|
| 62 |
+
with bz2.open(full_path) as fp:
|
| 63 |
+
raw_data = [line.decode().split() for line in fp.readlines()]
|
| 64 |
+
tmp_list = [[x.split(":")[-1] for x in data[1:]] for data in raw_data]
|
| 65 |
+
imgs = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16))
|
| 66 |
+
imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8)
|
| 67 |
+
targets = [int(d[0]) - 1 for d in raw_data]
|
| 68 |
+
|
| 69 |
+
self.data = imgs
|
| 70 |
+
self.targets = targets
|
| 71 |
+
|
| 72 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 73 |
+
"""
|
| 74 |
+
Args:
|
| 75 |
+
index (int): Index
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
tuple: (image, target) where target is index of the target class.
|
| 79 |
+
"""
|
| 80 |
+
img, target = self.data[index], int(self.targets[index])
|
| 81 |
+
|
| 82 |
+
# doing this so that it is consistent with all other datasets
|
| 83 |
+
# to return a PIL Image
|
| 84 |
+
img = Image.fromarray(img, mode="L")
|
| 85 |
+
|
| 86 |
+
if self.transform is not None:
|
| 87 |
+
img = self.transform(img)
|
| 88 |
+
|
| 89 |
+
if self.target_transform is not None:
|
| 90 |
+
target = self.target_transform(target)
|
| 91 |
+
|
| 92 |
+
return img, target
|
| 93 |
+
|
| 94 |
+
def __len__(self) -> int:
|
| 95 |
+
return len(self.data)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/video_utils.py
ADDED
|
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import bisect
|
| 2 |
+
import math
|
| 3 |
+
import warnings
|
| 4 |
+
from fractions import Fraction
|
| 5 |
+
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, TypeVar, Union
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torchvision.io import _probe_video_from_file, _read_video_from_file, read_video, read_video_timestamps
|
| 9 |
+
|
| 10 |
+
from .utils import tqdm
|
| 11 |
+
|
| 12 |
+
T = TypeVar("T")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def pts_convert(pts: int, timebase_from: Fraction, timebase_to: Fraction, round_func: Callable = math.floor) -> int:
|
| 16 |
+
"""convert pts between different time bases
|
| 17 |
+
Args:
|
| 18 |
+
pts: presentation timestamp, float
|
| 19 |
+
timebase_from: original timebase. Fraction
|
| 20 |
+
timebase_to: new timebase. Fraction
|
| 21 |
+
round_func: rounding function.
|
| 22 |
+
"""
|
| 23 |
+
new_pts = Fraction(pts, 1) * timebase_from / timebase_to
|
| 24 |
+
return round_func(new_pts)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def unfold(tensor: torch.Tensor, size: int, step: int, dilation: int = 1) -> torch.Tensor:
|
| 28 |
+
"""
|
| 29 |
+
similar to tensor.unfold, but with the dilation
|
| 30 |
+
and specialized for 1d tensors
|
| 31 |
+
|
| 32 |
+
Returns all consecutive windows of `size` elements, with
|
| 33 |
+
`step` between windows. The distance between each element
|
| 34 |
+
in a window is given by `dilation`.
|
| 35 |
+
"""
|
| 36 |
+
if tensor.dim() != 1:
|
| 37 |
+
raise ValueError(f"tensor should have 1 dimension instead of {tensor.dim()}")
|
| 38 |
+
o_stride = tensor.stride(0)
|
| 39 |
+
numel = tensor.numel()
|
| 40 |
+
new_stride = (step * o_stride, dilation * o_stride)
|
| 41 |
+
new_size = ((numel - (dilation * (size - 1) + 1)) // step + 1, size)
|
| 42 |
+
if new_size[0] < 1:
|
| 43 |
+
new_size = (0, size)
|
| 44 |
+
return torch.as_strided(tensor, new_size, new_stride)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class _VideoTimestampsDataset:
|
| 48 |
+
"""
|
| 49 |
+
Dataset used to parallelize the reading of the timestamps
|
| 50 |
+
of a list of videos, given their paths in the filesystem.
|
| 51 |
+
|
| 52 |
+
Used in VideoClips and defined at top level, so it can be
|
| 53 |
+
pickled when forking.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(self, video_paths: List[str]) -> None:
|
| 57 |
+
self.video_paths = video_paths
|
| 58 |
+
|
| 59 |
+
def __len__(self) -> int:
|
| 60 |
+
return len(self.video_paths)
|
| 61 |
+
|
| 62 |
+
def __getitem__(self, idx: int) -> Tuple[List[int], Optional[float]]:
|
| 63 |
+
return read_video_timestamps(self.video_paths[idx])
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _collate_fn(x: T) -> T:
|
| 67 |
+
"""
|
| 68 |
+
Dummy collate function to be used with _VideoTimestampsDataset
|
| 69 |
+
"""
|
| 70 |
+
return x
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class VideoClips:
|
| 74 |
+
"""
|
| 75 |
+
Given a list of video files, computes all consecutive subvideos of size
|
| 76 |
+
`clip_length_in_frames`, where the distance between each subvideo in the
|
| 77 |
+
same video is defined by `frames_between_clips`.
|
| 78 |
+
If `frame_rate` is specified, it will also resample all the videos to have
|
| 79 |
+
the same frame rate, and the clips will refer to this frame rate.
|
| 80 |
+
|
| 81 |
+
Creating this instance the first time is time-consuming, as it needs to
|
| 82 |
+
decode all the videos in `video_paths`. It is recommended that you
|
| 83 |
+
cache the results after instantiation of the class.
|
| 84 |
+
|
| 85 |
+
Recreating the clips for different clip lengths is fast, and can be done
|
| 86 |
+
with the `compute_clips` method.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
video_paths (List[str]): paths to the video files
|
| 90 |
+
clip_length_in_frames (int): size of a clip in number of frames
|
| 91 |
+
frames_between_clips (int): step (in frames) between each clip
|
| 92 |
+
frame_rate (int, optional): if specified, it will resample the video
|
| 93 |
+
so that it has `frame_rate`, and then the clips will be defined
|
| 94 |
+
on the resampled video
|
| 95 |
+
num_workers (int): how many subprocesses to use for data loading.
|
| 96 |
+
0 means that the data will be loaded in the main process. (default: 0)
|
| 97 |
+
output_format (str): The format of the output video tensors. Can be either "THWC" (default) or "TCHW".
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
video_paths: List[str],
|
| 103 |
+
clip_length_in_frames: int = 16,
|
| 104 |
+
frames_between_clips: int = 1,
|
| 105 |
+
frame_rate: Optional[int] = None,
|
| 106 |
+
_precomputed_metadata: Optional[Dict[str, Any]] = None,
|
| 107 |
+
num_workers: int = 0,
|
| 108 |
+
_video_width: int = 0,
|
| 109 |
+
_video_height: int = 0,
|
| 110 |
+
_video_min_dimension: int = 0,
|
| 111 |
+
_video_max_dimension: int = 0,
|
| 112 |
+
_audio_samples: int = 0,
|
| 113 |
+
_audio_channels: int = 0,
|
| 114 |
+
output_format: str = "THWC",
|
| 115 |
+
) -> None:
|
| 116 |
+
|
| 117 |
+
self.video_paths = video_paths
|
| 118 |
+
self.num_workers = num_workers
|
| 119 |
+
|
| 120 |
+
# these options are not valid for pyav backend
|
| 121 |
+
self._video_width = _video_width
|
| 122 |
+
self._video_height = _video_height
|
| 123 |
+
self._video_min_dimension = _video_min_dimension
|
| 124 |
+
self._video_max_dimension = _video_max_dimension
|
| 125 |
+
self._audio_samples = _audio_samples
|
| 126 |
+
self._audio_channels = _audio_channels
|
| 127 |
+
self.output_format = output_format.upper()
|
| 128 |
+
if self.output_format not in ("THWC", "TCHW"):
|
| 129 |
+
raise ValueError(f"output_format should be either 'THWC' or 'TCHW', got {output_format}.")
|
| 130 |
+
|
| 131 |
+
if _precomputed_metadata is None:
|
| 132 |
+
self._compute_frame_pts()
|
| 133 |
+
else:
|
| 134 |
+
self._init_from_metadata(_precomputed_metadata)
|
| 135 |
+
self.compute_clips(clip_length_in_frames, frames_between_clips, frame_rate)
|
| 136 |
+
|
| 137 |
+
def _compute_frame_pts(self) -> None:
|
| 138 |
+
self.video_pts = []
|
| 139 |
+
self.video_fps = []
|
| 140 |
+
|
| 141 |
+
# strategy: use a DataLoader to parallelize read_video_timestamps
|
| 142 |
+
# so need to create a dummy dataset first
|
| 143 |
+
import torch.utils.data
|
| 144 |
+
|
| 145 |
+
dl: torch.utils.data.DataLoader = torch.utils.data.DataLoader(
|
| 146 |
+
_VideoTimestampsDataset(self.video_paths), # type: ignore[arg-type]
|
| 147 |
+
batch_size=16,
|
| 148 |
+
num_workers=self.num_workers,
|
| 149 |
+
collate_fn=_collate_fn,
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
with tqdm(total=len(dl)) as pbar:
|
| 153 |
+
for batch in dl:
|
| 154 |
+
pbar.update(1)
|
| 155 |
+
clips, fps = list(zip(*batch))
|
| 156 |
+
# we need to specify dtype=torch.long because for empty list,
|
| 157 |
+
# torch.as_tensor will use torch.float as default dtype. This
|
| 158 |
+
# happens when decoding fails and no pts is returned in the list.
|
| 159 |
+
clips = [torch.as_tensor(c, dtype=torch.long) for c in clips]
|
| 160 |
+
self.video_pts.extend(clips)
|
| 161 |
+
self.video_fps.extend(fps)
|
| 162 |
+
|
| 163 |
+
def _init_from_metadata(self, metadata: Dict[str, Any]) -> None:
|
| 164 |
+
self.video_paths = metadata["video_paths"]
|
| 165 |
+
assert len(self.video_paths) == len(metadata["video_pts"])
|
| 166 |
+
self.video_pts = metadata["video_pts"]
|
| 167 |
+
assert len(self.video_paths) == len(metadata["video_fps"])
|
| 168 |
+
self.video_fps = metadata["video_fps"]
|
| 169 |
+
|
| 170 |
+
@property
|
| 171 |
+
def metadata(self) -> Dict[str, Any]:
|
| 172 |
+
_metadata = {
|
| 173 |
+
"video_paths": self.video_paths,
|
| 174 |
+
"video_pts": self.video_pts,
|
| 175 |
+
"video_fps": self.video_fps,
|
| 176 |
+
}
|
| 177 |
+
return _metadata
|
| 178 |
+
|
| 179 |
+
def subset(self, indices: List[int]) -> "VideoClips":
|
| 180 |
+
video_paths = [self.video_paths[i] for i in indices]
|
| 181 |
+
video_pts = [self.video_pts[i] for i in indices]
|
| 182 |
+
video_fps = [self.video_fps[i] for i in indices]
|
| 183 |
+
metadata = {
|
| 184 |
+
"video_paths": video_paths,
|
| 185 |
+
"video_pts": video_pts,
|
| 186 |
+
"video_fps": video_fps,
|
| 187 |
+
}
|
| 188 |
+
return type(self)(
|
| 189 |
+
video_paths,
|
| 190 |
+
self.num_frames,
|
| 191 |
+
self.step,
|
| 192 |
+
self.frame_rate,
|
| 193 |
+
_precomputed_metadata=metadata,
|
| 194 |
+
num_workers=self.num_workers,
|
| 195 |
+
_video_width=self._video_width,
|
| 196 |
+
_video_height=self._video_height,
|
| 197 |
+
_video_min_dimension=self._video_min_dimension,
|
| 198 |
+
_video_max_dimension=self._video_max_dimension,
|
| 199 |
+
_audio_samples=self._audio_samples,
|
| 200 |
+
_audio_channels=self._audio_channels,
|
| 201 |
+
output_format=self.output_format,
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
@staticmethod
|
| 205 |
+
def compute_clips_for_video(
|
| 206 |
+
video_pts: torch.Tensor, num_frames: int, step: int, fps: int, frame_rate: Optional[int] = None
|
| 207 |
+
) -> Tuple[torch.Tensor, Union[List[slice], torch.Tensor]]:
|
| 208 |
+
if fps is None:
|
| 209 |
+
# if for some reason the video doesn't have fps (because doesn't have a video stream)
|
| 210 |
+
# set the fps to 1. The value doesn't matter, because video_pts is empty anyway
|
| 211 |
+
fps = 1
|
| 212 |
+
if frame_rate is None:
|
| 213 |
+
frame_rate = fps
|
| 214 |
+
total_frames = len(video_pts) * (float(frame_rate) / fps)
|
| 215 |
+
_idxs = VideoClips._resample_video_idx(int(math.floor(total_frames)), fps, frame_rate)
|
| 216 |
+
video_pts = video_pts[_idxs]
|
| 217 |
+
clips = unfold(video_pts, num_frames, step)
|
| 218 |
+
if not clips.numel():
|
| 219 |
+
warnings.warn(
|
| 220 |
+
"There aren't enough frames in the current video to get a clip for the given clip length and "
|
| 221 |
+
"frames between clips. The video (and potentially others) will be skipped."
|
| 222 |
+
)
|
| 223 |
+
idxs: Union[List[slice], torch.Tensor]
|
| 224 |
+
if isinstance(_idxs, slice):
|
| 225 |
+
idxs = [_idxs] * len(clips)
|
| 226 |
+
else:
|
| 227 |
+
idxs = unfold(_idxs, num_frames, step)
|
| 228 |
+
return clips, idxs
|
| 229 |
+
|
| 230 |
+
def compute_clips(self, num_frames: int, step: int, frame_rate: Optional[int] = None) -> None:
|
| 231 |
+
"""
|
| 232 |
+
Compute all consecutive sequences of clips from video_pts.
|
| 233 |
+
Always returns clips of size `num_frames`, meaning that the
|
| 234 |
+
last few frames in a video can potentially be dropped.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
num_frames (int): number of frames for the clip
|
| 238 |
+
step (int): distance between two clips
|
| 239 |
+
frame_rate (int, optional): The frame rate
|
| 240 |
+
"""
|
| 241 |
+
self.num_frames = num_frames
|
| 242 |
+
self.step = step
|
| 243 |
+
self.frame_rate = frame_rate
|
| 244 |
+
self.clips = []
|
| 245 |
+
self.resampling_idxs = []
|
| 246 |
+
for video_pts, fps in zip(self.video_pts, self.video_fps):
|
| 247 |
+
clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate)
|
| 248 |
+
self.clips.append(clips)
|
| 249 |
+
self.resampling_idxs.append(idxs)
|
| 250 |
+
clip_lengths = torch.as_tensor([len(v) for v in self.clips])
|
| 251 |
+
self.cumulative_sizes = clip_lengths.cumsum(0).tolist()
|
| 252 |
+
|
| 253 |
+
def __len__(self) -> int:
|
| 254 |
+
return self.num_clips()
|
| 255 |
+
|
| 256 |
+
def num_videos(self) -> int:
|
| 257 |
+
return len(self.video_paths)
|
| 258 |
+
|
| 259 |
+
def num_clips(self) -> int:
|
| 260 |
+
"""
|
| 261 |
+
Number of subclips that are available in the video list.
|
| 262 |
+
"""
|
| 263 |
+
return self.cumulative_sizes[-1]
|
| 264 |
+
|
| 265 |
+
def get_clip_location(self, idx: int) -> Tuple[int, int]:
|
| 266 |
+
"""
|
| 267 |
+
Converts a flattened representation of the indices into a video_idx, clip_idx
|
| 268 |
+
representation.
|
| 269 |
+
"""
|
| 270 |
+
video_idx = bisect.bisect_right(self.cumulative_sizes, idx)
|
| 271 |
+
if video_idx == 0:
|
| 272 |
+
clip_idx = idx
|
| 273 |
+
else:
|
| 274 |
+
clip_idx = idx - self.cumulative_sizes[video_idx - 1]
|
| 275 |
+
return video_idx, clip_idx
|
| 276 |
+
|
| 277 |
+
@staticmethod
|
| 278 |
+
def _resample_video_idx(num_frames: int, original_fps: int, new_fps: int) -> Union[slice, torch.Tensor]:
|
| 279 |
+
step = float(original_fps) / new_fps
|
| 280 |
+
if step.is_integer():
|
| 281 |
+
# optimization: if step is integer, don't need to perform
|
| 282 |
+
# advanced indexing
|
| 283 |
+
step = int(step)
|
| 284 |
+
return slice(None, None, step)
|
| 285 |
+
idxs = torch.arange(num_frames, dtype=torch.float32) * step
|
| 286 |
+
idxs = idxs.floor().to(torch.int64)
|
| 287 |
+
return idxs
|
| 288 |
+
|
| 289 |
+
def get_clip(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any], int]:
|
| 290 |
+
"""
|
| 291 |
+
Gets a subclip from a list of videos.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
idx (int): index of the subclip. Must be between 0 and num_clips().
|
| 295 |
+
|
| 296 |
+
Returns:
|
| 297 |
+
video (Tensor)
|
| 298 |
+
audio (Tensor)
|
| 299 |
+
info (Dict)
|
| 300 |
+
video_idx (int): index of the video in `video_paths`
|
| 301 |
+
"""
|
| 302 |
+
if idx >= self.num_clips():
|
| 303 |
+
raise IndexError(f"Index {idx} out of range ({self.num_clips()} number of clips)")
|
| 304 |
+
video_idx, clip_idx = self.get_clip_location(idx)
|
| 305 |
+
video_path = self.video_paths[video_idx]
|
| 306 |
+
clip_pts = self.clips[video_idx][clip_idx]
|
| 307 |
+
|
| 308 |
+
from torchvision import get_video_backend
|
| 309 |
+
|
| 310 |
+
backend = get_video_backend()
|
| 311 |
+
|
| 312 |
+
if backend == "pyav":
|
| 313 |
+
# check for invalid options
|
| 314 |
+
if self._video_width != 0:
|
| 315 |
+
raise ValueError("pyav backend doesn't support _video_width != 0")
|
| 316 |
+
if self._video_height != 0:
|
| 317 |
+
raise ValueError("pyav backend doesn't support _video_height != 0")
|
| 318 |
+
if self._video_min_dimension != 0:
|
| 319 |
+
raise ValueError("pyav backend doesn't support _video_min_dimension != 0")
|
| 320 |
+
if self._video_max_dimension != 0:
|
| 321 |
+
raise ValueError("pyav backend doesn't support _video_max_dimension != 0")
|
| 322 |
+
if self._audio_samples != 0:
|
| 323 |
+
raise ValueError("pyav backend doesn't support _audio_samples != 0")
|
| 324 |
+
|
| 325 |
+
if backend == "pyav":
|
| 326 |
+
start_pts = clip_pts[0].item()
|
| 327 |
+
end_pts = clip_pts[-1].item()
|
| 328 |
+
video, audio, info = read_video(video_path, start_pts, end_pts)
|
| 329 |
+
else:
|
| 330 |
+
_info = _probe_video_from_file(video_path)
|
| 331 |
+
video_fps = _info.video_fps
|
| 332 |
+
audio_fps = None
|
| 333 |
+
|
| 334 |
+
video_start_pts = cast(int, clip_pts[0].item())
|
| 335 |
+
video_end_pts = cast(int, clip_pts[-1].item())
|
| 336 |
+
|
| 337 |
+
audio_start_pts, audio_end_pts = 0, -1
|
| 338 |
+
audio_timebase = Fraction(0, 1)
|
| 339 |
+
video_timebase = Fraction(_info.video_timebase.numerator, _info.video_timebase.denominator)
|
| 340 |
+
if _info.has_audio:
|
| 341 |
+
audio_timebase = Fraction(_info.audio_timebase.numerator, _info.audio_timebase.denominator)
|
| 342 |
+
audio_start_pts = pts_convert(video_start_pts, video_timebase, audio_timebase, math.floor)
|
| 343 |
+
audio_end_pts = pts_convert(video_end_pts, video_timebase, audio_timebase, math.ceil)
|
| 344 |
+
audio_fps = _info.audio_sample_rate
|
| 345 |
+
video, audio, _ = _read_video_from_file(
|
| 346 |
+
video_path,
|
| 347 |
+
video_width=self._video_width,
|
| 348 |
+
video_height=self._video_height,
|
| 349 |
+
video_min_dimension=self._video_min_dimension,
|
| 350 |
+
video_max_dimension=self._video_max_dimension,
|
| 351 |
+
video_pts_range=(video_start_pts, video_end_pts),
|
| 352 |
+
video_timebase=video_timebase,
|
| 353 |
+
audio_samples=self._audio_samples,
|
| 354 |
+
audio_channels=self._audio_channels,
|
| 355 |
+
audio_pts_range=(audio_start_pts, audio_end_pts),
|
| 356 |
+
audio_timebase=audio_timebase,
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
info = {"video_fps": video_fps}
|
| 360 |
+
if audio_fps is not None:
|
| 361 |
+
info["audio_fps"] = audio_fps
|
| 362 |
+
|
| 363 |
+
if self.frame_rate is not None:
|
| 364 |
+
resampling_idx = self.resampling_idxs[video_idx][clip_idx]
|
| 365 |
+
if isinstance(resampling_idx, torch.Tensor):
|
| 366 |
+
resampling_idx = resampling_idx - resampling_idx[0]
|
| 367 |
+
video = video[resampling_idx]
|
| 368 |
+
info["video_fps"] = self.frame_rate
|
| 369 |
+
assert len(video) == self.num_frames, f"{video.shape} x {self.num_frames}"
|
| 370 |
+
|
| 371 |
+
if self.output_format == "TCHW":
|
| 372 |
+
# [T,H,W,C] --> [T,C,H,W]
|
| 373 |
+
video = video.permute(0, 3, 1, 2)
|
| 374 |
+
|
| 375 |
+
return video, audio, info, video_idx
|
| 376 |
+
|
| 377 |
+
def __getstate__(self) -> Dict[str, Any]:
|
| 378 |
+
video_pts_sizes = [len(v) for v in self.video_pts]
|
| 379 |
+
# To be back-compatible, we convert data to dtype torch.long as needed
|
| 380 |
+
# because for empty list, in legacy implementation, torch.as_tensor will
|
| 381 |
+
# use torch.float as default dtype. This happens when decoding fails and
|
| 382 |
+
# no pts is returned in the list.
|
| 383 |
+
video_pts = [x.to(torch.int64) for x in self.video_pts]
|
| 384 |
+
# video_pts can be an empty list if no frames have been decoded
|
| 385 |
+
if video_pts:
|
| 386 |
+
video_pts = torch.cat(video_pts) # type: ignore[assignment]
|
| 387 |
+
# avoid bug in https://github.com/pytorch/pytorch/issues/32351
|
| 388 |
+
# TODO: Revert it once the bug is fixed.
|
| 389 |
+
video_pts = video_pts.numpy() # type: ignore[attr-defined]
|
| 390 |
+
|
| 391 |
+
# make a copy of the fields of self
|
| 392 |
+
d = self.__dict__.copy()
|
| 393 |
+
d["video_pts_sizes"] = video_pts_sizes
|
| 394 |
+
d["video_pts"] = video_pts
|
| 395 |
+
# delete the following attributes to reduce the size of dictionary. They
|
| 396 |
+
# will be re-computed in "__setstate__()"
|
| 397 |
+
del d["clips"]
|
| 398 |
+
del d["resampling_idxs"]
|
| 399 |
+
del d["cumulative_sizes"]
|
| 400 |
+
|
| 401 |
+
# for backwards-compatibility
|
| 402 |
+
d["_version"] = 2
|
| 403 |
+
return d
|
| 404 |
+
|
| 405 |
+
def __setstate__(self, d: Dict[str, Any]) -> None:
|
| 406 |
+
# for backwards-compatibility
|
| 407 |
+
if "_version" not in d:
|
| 408 |
+
self.__dict__ = d
|
| 409 |
+
return
|
| 410 |
+
|
| 411 |
+
video_pts = torch.as_tensor(d["video_pts"], dtype=torch.int64)
|
| 412 |
+
video_pts = torch.split(video_pts, d["video_pts_sizes"], dim=0)
|
| 413 |
+
# don't need this info anymore
|
| 414 |
+
del d["video_pts_sizes"]
|
| 415 |
+
|
| 416 |
+
d["video_pts"] = video_pts
|
| 417 |
+
self.__dict__ = d
|
| 418 |
+
# recompute attributes "clips", "resampling_idxs" and other derivative ones
|
| 419 |
+
self.compute_clips(self.num_frames, self.step, self.frame_rate)
|
wemm/lib/python3.10/site-packages/torchvision/datasets/voc.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import os
|
| 3 |
+
from xml.etree.ElementTree import Element as ET_Element
|
| 4 |
+
|
| 5 |
+
from .vision import VisionDataset
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
from defusedxml.ElementTree import parse as ET_parse
|
| 9 |
+
except ImportError:
|
| 10 |
+
from xml.etree.ElementTree import parse as ET_parse
|
| 11 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
| 12 |
+
|
| 13 |
+
from PIL import Image
|
| 14 |
+
|
| 15 |
+
from .utils import download_and_extract_archive, verify_str_arg
|
| 16 |
+
|
| 17 |
+
DATASET_YEAR_DICT = {
|
| 18 |
+
"2012": {
|
| 19 |
+
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar",
|
| 20 |
+
"filename": "VOCtrainval_11-May-2012.tar",
|
| 21 |
+
"md5": "6cd6e144f989b92b3379bac3b3de84fd",
|
| 22 |
+
"base_dir": os.path.join("VOCdevkit", "VOC2012"),
|
| 23 |
+
},
|
| 24 |
+
"2011": {
|
| 25 |
+
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar",
|
| 26 |
+
"filename": "VOCtrainval_25-May-2011.tar",
|
| 27 |
+
"md5": "6c3384ef61512963050cb5d687e5bf1e",
|
| 28 |
+
"base_dir": os.path.join("TrainVal", "VOCdevkit", "VOC2011"),
|
| 29 |
+
},
|
| 30 |
+
"2010": {
|
| 31 |
+
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar",
|
| 32 |
+
"filename": "VOCtrainval_03-May-2010.tar",
|
| 33 |
+
"md5": "da459979d0c395079b5c75ee67908abb",
|
| 34 |
+
"base_dir": os.path.join("VOCdevkit", "VOC2010"),
|
| 35 |
+
},
|
| 36 |
+
"2009": {
|
| 37 |
+
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar",
|
| 38 |
+
"filename": "VOCtrainval_11-May-2009.tar",
|
| 39 |
+
"md5": "a3e00b113cfcfebf17e343f59da3caa1",
|
| 40 |
+
"base_dir": os.path.join("VOCdevkit", "VOC2009"),
|
| 41 |
+
},
|
| 42 |
+
"2008": {
|
| 43 |
+
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar",
|
| 44 |
+
"filename": "VOCtrainval_11-May-2012.tar",
|
| 45 |
+
"md5": "2629fa636546599198acfcfbfcf1904a",
|
| 46 |
+
"base_dir": os.path.join("VOCdevkit", "VOC2008"),
|
| 47 |
+
},
|
| 48 |
+
"2007": {
|
| 49 |
+
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar",
|
| 50 |
+
"filename": "VOCtrainval_06-Nov-2007.tar",
|
| 51 |
+
"md5": "c52e279531787c972589f7e41ab4ae64",
|
| 52 |
+
"base_dir": os.path.join("VOCdevkit", "VOC2007"),
|
| 53 |
+
},
|
| 54 |
+
"2007-test": {
|
| 55 |
+
"url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar",
|
| 56 |
+
"filename": "VOCtest_06-Nov-2007.tar",
|
| 57 |
+
"md5": "b6e924de25625d8de591ea690078ad9f",
|
| 58 |
+
"base_dir": os.path.join("VOCdevkit", "VOC2007"),
|
| 59 |
+
},
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class _VOCBase(VisionDataset):
|
| 64 |
+
_SPLITS_DIR: str
|
| 65 |
+
_TARGET_DIR: str
|
| 66 |
+
_TARGET_FILE_EXT: str
|
| 67 |
+
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
root: str,
|
| 71 |
+
year: str = "2012",
|
| 72 |
+
image_set: str = "train",
|
| 73 |
+
download: bool = False,
|
| 74 |
+
transform: Optional[Callable] = None,
|
| 75 |
+
target_transform: Optional[Callable] = None,
|
| 76 |
+
transforms: Optional[Callable] = None,
|
| 77 |
+
):
|
| 78 |
+
super().__init__(root, transforms, transform, target_transform)
|
| 79 |
+
|
| 80 |
+
self.year = verify_str_arg(year, "year", valid_values=[str(yr) for yr in range(2007, 2013)])
|
| 81 |
+
|
| 82 |
+
valid_image_sets = ["train", "trainval", "val"]
|
| 83 |
+
if year == "2007":
|
| 84 |
+
valid_image_sets.append("test")
|
| 85 |
+
self.image_set = verify_str_arg(image_set, "image_set", valid_image_sets)
|
| 86 |
+
|
| 87 |
+
key = "2007-test" if year == "2007" and image_set == "test" else year
|
| 88 |
+
dataset_year_dict = DATASET_YEAR_DICT[key]
|
| 89 |
+
|
| 90 |
+
self.url = dataset_year_dict["url"]
|
| 91 |
+
self.filename = dataset_year_dict["filename"]
|
| 92 |
+
self.md5 = dataset_year_dict["md5"]
|
| 93 |
+
|
| 94 |
+
base_dir = dataset_year_dict["base_dir"]
|
| 95 |
+
voc_root = os.path.join(self.root, base_dir)
|
| 96 |
+
|
| 97 |
+
if download:
|
| 98 |
+
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.md5)
|
| 99 |
+
|
| 100 |
+
if not os.path.isdir(voc_root):
|
| 101 |
+
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
|
| 102 |
+
|
| 103 |
+
splits_dir = os.path.join(voc_root, "ImageSets", self._SPLITS_DIR)
|
| 104 |
+
split_f = os.path.join(splits_dir, image_set.rstrip("\n") + ".txt")
|
| 105 |
+
with open(os.path.join(split_f)) as f:
|
| 106 |
+
file_names = [x.strip() for x in f.readlines()]
|
| 107 |
+
|
| 108 |
+
image_dir = os.path.join(voc_root, "JPEGImages")
|
| 109 |
+
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
|
| 110 |
+
|
| 111 |
+
target_dir = os.path.join(voc_root, self._TARGET_DIR)
|
| 112 |
+
self.targets = [os.path.join(target_dir, x + self._TARGET_FILE_EXT) for x in file_names]
|
| 113 |
+
|
| 114 |
+
assert len(self.images) == len(self.targets)
|
| 115 |
+
|
| 116 |
+
def __len__(self) -> int:
|
| 117 |
+
return len(self.images)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class VOCSegmentation(_VOCBase):
|
| 121 |
+
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
root (string): Root directory of the VOC Dataset.
|
| 125 |
+
year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``.
|
| 126 |
+
image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If
|
| 127 |
+
``year=="2007"``, can also be ``"test"``.
|
| 128 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 129 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 130 |
+
downloaded again.
|
| 131 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 132 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 133 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 134 |
+
target and transforms it.
|
| 135 |
+
transforms (callable, optional): A function/transform that takes input sample and its target as entry
|
| 136 |
+
and returns a transformed version.
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
_SPLITS_DIR = "Segmentation"
|
| 140 |
+
_TARGET_DIR = "SegmentationClass"
|
| 141 |
+
_TARGET_FILE_EXT = ".png"
|
| 142 |
+
|
| 143 |
+
@property
|
| 144 |
+
def masks(self) -> List[str]:
|
| 145 |
+
return self.targets
|
| 146 |
+
|
| 147 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 148 |
+
"""
|
| 149 |
+
Args:
|
| 150 |
+
index (int): Index
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
tuple: (image, target) where target is the image segmentation.
|
| 154 |
+
"""
|
| 155 |
+
img = Image.open(self.images[index]).convert("RGB")
|
| 156 |
+
target = Image.open(self.masks[index])
|
| 157 |
+
|
| 158 |
+
if self.transforms is not None:
|
| 159 |
+
img, target = self.transforms(img, target)
|
| 160 |
+
|
| 161 |
+
return img, target
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class VOCDetection(_VOCBase):
|
| 165 |
+
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
root (string): Root directory of the VOC Dataset.
|
| 169 |
+
year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``.
|
| 170 |
+
image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If
|
| 171 |
+
``year=="2007"``, can also be ``"test"``.
|
| 172 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 173 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 174 |
+
downloaded again.
|
| 175 |
+
(default: alphabetic indexing of VOC's 20 classes).
|
| 176 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 177 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 178 |
+
target_transform (callable, required): A function/transform that takes in the
|
| 179 |
+
target and transforms it.
|
| 180 |
+
transforms (callable, optional): A function/transform that takes input sample and its target as entry
|
| 181 |
+
and returns a transformed version.
|
| 182 |
+
"""
|
| 183 |
+
|
| 184 |
+
_SPLITS_DIR = "Main"
|
| 185 |
+
_TARGET_DIR = "Annotations"
|
| 186 |
+
_TARGET_FILE_EXT = ".xml"
|
| 187 |
+
|
| 188 |
+
@property
|
| 189 |
+
def annotations(self) -> List[str]:
|
| 190 |
+
return self.targets
|
| 191 |
+
|
| 192 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 193 |
+
"""
|
| 194 |
+
Args:
|
| 195 |
+
index (int): Index
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
tuple: (image, target) where target is a dictionary of the XML tree.
|
| 199 |
+
"""
|
| 200 |
+
img = Image.open(self.images[index]).convert("RGB")
|
| 201 |
+
target = self.parse_voc_xml(ET_parse(self.annotations[index]).getroot())
|
| 202 |
+
|
| 203 |
+
if self.transforms is not None:
|
| 204 |
+
img, target = self.transforms(img, target)
|
| 205 |
+
|
| 206 |
+
return img, target
|
| 207 |
+
|
| 208 |
+
@staticmethod
|
| 209 |
+
def parse_voc_xml(node: ET_Element) -> Dict[str, Any]:
|
| 210 |
+
voc_dict: Dict[str, Any] = {}
|
| 211 |
+
children = list(node)
|
| 212 |
+
if children:
|
| 213 |
+
def_dic: Dict[str, Any] = collections.defaultdict(list)
|
| 214 |
+
for dc in map(VOCDetection.parse_voc_xml, children):
|
| 215 |
+
for ind, v in dc.items():
|
| 216 |
+
def_dic[ind].append(v)
|
| 217 |
+
if node.tag == "annotation":
|
| 218 |
+
def_dic["object"] = [def_dic["object"]]
|
| 219 |
+
voc_dict = {node.tag: {ind: v[0] if len(v) == 1 else v for ind, v in def_dic.items()}}
|
| 220 |
+
if node.text:
|
| 221 |
+
text = node.text.strip()
|
| 222 |
+
if not children:
|
| 223 |
+
voc_dict[node.tag] = text
|
| 224 |
+
return voc_dict
|
wemm/lib/python3.10/site-packages/torchvision/datasets/widerface.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from os.path import abspath, expanduser
|
| 3 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
from .utils import download_and_extract_archive, download_file_from_google_drive, extract_archive, verify_str_arg
|
| 9 |
+
from .vision import VisionDataset
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class WIDERFace(VisionDataset):
|
| 13 |
+
"""`WIDERFace <http://shuoyang1213.me/WIDERFACE/>`_ Dataset.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
root (string): Root directory where images and annotations are downloaded to.
|
| 17 |
+
Expects the following folder structure if download=False:
|
| 18 |
+
|
| 19 |
+
.. code::
|
| 20 |
+
|
| 21 |
+
<root>
|
| 22 |
+
└── widerface
|
| 23 |
+
├── wider_face_split ('wider_face_split.zip' if compressed)
|
| 24 |
+
├── WIDER_train ('WIDER_train.zip' if compressed)
|
| 25 |
+
├── WIDER_val ('WIDER_val.zip' if compressed)
|
| 26 |
+
└── WIDER_test ('WIDER_test.zip' if compressed)
|
| 27 |
+
split (string): The dataset split to use. One of {``train``, ``val``, ``test``}.
|
| 28 |
+
Defaults to ``train``.
|
| 29 |
+
transform (callable, optional): A function/transform that takes in a PIL image
|
| 30 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 31 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 32 |
+
target and transforms it.
|
| 33 |
+
download (bool, optional): If true, downloads the dataset from the internet and
|
| 34 |
+
puts it in root directory. If dataset is already downloaded, it is not
|
| 35 |
+
downloaded again.
|
| 36 |
+
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
BASE_FOLDER = "widerface"
|
| 40 |
+
FILE_LIST = [
|
| 41 |
+
# File ID MD5 Hash Filename
|
| 42 |
+
("15hGDLhsx8bLgLcIRD5DhYt5iBxnjNF1M", "3fedf70df600953d25982bcd13d91ba2", "WIDER_train.zip"),
|
| 43 |
+
("1GUCogbp16PMGa39thoMMeWxp7Rp5oM8Q", "dfa7d7e790efa35df3788964cf0bbaea", "WIDER_val.zip"),
|
| 44 |
+
("1HIfDbVEWKmsYKJZm4lchTBDLW5N7dY5T", "e5d8f4248ed24c334bbd12f49c29dd40", "WIDER_test.zip"),
|
| 45 |
+
]
|
| 46 |
+
ANNOTATIONS_FILE = (
|
| 47 |
+
"http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip",
|
| 48 |
+
"0e3767bcf0e326556d407bf5bff5d27c",
|
| 49 |
+
"wider_face_split.zip",
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
root: str,
|
| 55 |
+
split: str = "train",
|
| 56 |
+
transform: Optional[Callable] = None,
|
| 57 |
+
target_transform: Optional[Callable] = None,
|
| 58 |
+
download: bool = False,
|
| 59 |
+
) -> None:
|
| 60 |
+
super().__init__(
|
| 61 |
+
root=os.path.join(root, self.BASE_FOLDER), transform=transform, target_transform=target_transform
|
| 62 |
+
)
|
| 63 |
+
# check arguments
|
| 64 |
+
self.split = verify_str_arg(split, "split", ("train", "val", "test"))
|
| 65 |
+
|
| 66 |
+
if download:
|
| 67 |
+
self.download()
|
| 68 |
+
|
| 69 |
+
if not self._check_integrity():
|
| 70 |
+
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download and prepare it")
|
| 71 |
+
|
| 72 |
+
self.img_info: List[Dict[str, Union[str, Dict[str, torch.Tensor]]]] = []
|
| 73 |
+
if self.split in ("train", "val"):
|
| 74 |
+
self.parse_train_val_annotations_file()
|
| 75 |
+
else:
|
| 76 |
+
self.parse_test_annotations_file()
|
| 77 |
+
|
| 78 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 79 |
+
"""
|
| 80 |
+
Args:
|
| 81 |
+
index (int): Index
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
tuple: (image, target) where target is a dict of annotations for all faces in the image.
|
| 85 |
+
target=None for the test split.
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
# stay consistent with other datasets and return a PIL Image
|
| 89 |
+
img = Image.open(self.img_info[index]["img_path"])
|
| 90 |
+
|
| 91 |
+
if self.transform is not None:
|
| 92 |
+
img = self.transform(img)
|
| 93 |
+
|
| 94 |
+
target = None if self.split == "test" else self.img_info[index]["annotations"]
|
| 95 |
+
if self.target_transform is not None:
|
| 96 |
+
target = self.target_transform(target)
|
| 97 |
+
|
| 98 |
+
return img, target
|
| 99 |
+
|
| 100 |
+
def __len__(self) -> int:
|
| 101 |
+
return len(self.img_info)
|
| 102 |
+
|
| 103 |
+
def extra_repr(self) -> str:
|
| 104 |
+
lines = ["Split: {split}"]
|
| 105 |
+
return "\n".join(lines).format(**self.__dict__)
|
| 106 |
+
|
| 107 |
+
def parse_train_val_annotations_file(self) -> None:
|
| 108 |
+
filename = "wider_face_train_bbx_gt.txt" if self.split == "train" else "wider_face_val_bbx_gt.txt"
|
| 109 |
+
filepath = os.path.join(self.root, "wider_face_split", filename)
|
| 110 |
+
|
| 111 |
+
with open(filepath) as f:
|
| 112 |
+
lines = f.readlines()
|
| 113 |
+
file_name_line, num_boxes_line, box_annotation_line = True, False, False
|
| 114 |
+
num_boxes, box_counter = 0, 0
|
| 115 |
+
labels = []
|
| 116 |
+
for line in lines:
|
| 117 |
+
line = line.rstrip()
|
| 118 |
+
if file_name_line:
|
| 119 |
+
img_path = os.path.join(self.root, "WIDER_" + self.split, "images", line)
|
| 120 |
+
img_path = abspath(expanduser(img_path))
|
| 121 |
+
file_name_line = False
|
| 122 |
+
num_boxes_line = True
|
| 123 |
+
elif num_boxes_line:
|
| 124 |
+
num_boxes = int(line)
|
| 125 |
+
num_boxes_line = False
|
| 126 |
+
box_annotation_line = True
|
| 127 |
+
elif box_annotation_line:
|
| 128 |
+
box_counter += 1
|
| 129 |
+
line_split = line.split(" ")
|
| 130 |
+
line_values = [int(x) for x in line_split]
|
| 131 |
+
labels.append(line_values)
|
| 132 |
+
if box_counter >= num_boxes:
|
| 133 |
+
box_annotation_line = False
|
| 134 |
+
file_name_line = True
|
| 135 |
+
labels_tensor = torch.tensor(labels)
|
| 136 |
+
self.img_info.append(
|
| 137 |
+
{
|
| 138 |
+
"img_path": img_path,
|
| 139 |
+
"annotations": {
|
| 140 |
+
"bbox": labels_tensor[:, 0:4], # x, y, width, height
|
| 141 |
+
"blur": labels_tensor[:, 4],
|
| 142 |
+
"expression": labels_tensor[:, 5],
|
| 143 |
+
"illumination": labels_tensor[:, 6],
|
| 144 |
+
"occlusion": labels_tensor[:, 7],
|
| 145 |
+
"pose": labels_tensor[:, 8],
|
| 146 |
+
"invalid": labels_tensor[:, 9],
|
| 147 |
+
},
|
| 148 |
+
}
|
| 149 |
+
)
|
| 150 |
+
box_counter = 0
|
| 151 |
+
labels.clear()
|
| 152 |
+
else:
|
| 153 |
+
raise RuntimeError(f"Error parsing annotation file {filepath}")
|
| 154 |
+
|
| 155 |
+
def parse_test_annotations_file(self) -> None:
|
| 156 |
+
filepath = os.path.join(self.root, "wider_face_split", "wider_face_test_filelist.txt")
|
| 157 |
+
filepath = abspath(expanduser(filepath))
|
| 158 |
+
with open(filepath) as f:
|
| 159 |
+
lines = f.readlines()
|
| 160 |
+
for line in lines:
|
| 161 |
+
line = line.rstrip()
|
| 162 |
+
img_path = os.path.join(self.root, "WIDER_test", "images", line)
|
| 163 |
+
img_path = abspath(expanduser(img_path))
|
| 164 |
+
self.img_info.append({"img_path": img_path})
|
| 165 |
+
|
| 166 |
+
def _check_integrity(self) -> bool:
|
| 167 |
+
# Allow original archive to be deleted (zip). Only need the extracted images
|
| 168 |
+
all_files = self.FILE_LIST.copy()
|
| 169 |
+
all_files.append(self.ANNOTATIONS_FILE)
|
| 170 |
+
for (_, md5, filename) in all_files:
|
| 171 |
+
file, ext = os.path.splitext(filename)
|
| 172 |
+
extracted_dir = os.path.join(self.root, file)
|
| 173 |
+
if not os.path.exists(extracted_dir):
|
| 174 |
+
return False
|
| 175 |
+
return True
|
| 176 |
+
|
| 177 |
+
def download(self) -> None:
|
| 178 |
+
if self._check_integrity():
|
| 179 |
+
print("Files already downloaded and verified")
|
| 180 |
+
return
|
| 181 |
+
|
| 182 |
+
# download and extract image data
|
| 183 |
+
for (file_id, md5, filename) in self.FILE_LIST:
|
| 184 |
+
download_file_from_google_drive(file_id, self.root, filename, md5)
|
| 185 |
+
filepath = os.path.join(self.root, filename)
|
| 186 |
+
extract_archive(filepath)
|
| 187 |
+
|
| 188 |
+
# download and extract annotation files
|
| 189 |
+
download_and_extract_archive(
|
| 190 |
+
url=self.ANNOTATIONS_FILE[0], download_root=self.root, md5=self.ANNOTATIONS_FILE[1]
|
| 191 |
+
)
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (807 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (9.66 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/densenet.cpython-310.pyc
ADDED
|
Binary file (14.2 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc
ADDED
|
Binary file (29.2 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/googlenet.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/maxvit.cpython-310.pyc
ADDED
|
Binary file (24.4 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/regnet.cpython-310.pyc
ADDED
|
Binary file (37.3 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/squeezenet.cpython-310.pyc
ADDED
|
Binary file (7.35 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/__pycache__/vgg.cpython-310.pyc
ADDED
|
Binary file (15.4 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/torchvision/models/detection/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .faster_rcnn import *
|
| 2 |
+
from .fcos import *
|
| 3 |
+
from .keypoint_rcnn import *
|
| 4 |
+
from .mask_rcnn import *
|
| 5 |
+
from .retinanet import *
|
| 6 |
+
from .ssd import *
|
| 7 |
+
from .ssdlite import *
|