ZTWHHH commited on
Commit
5981ec1
·
verified ·
1 Parent(s): 48a4b5d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. vllm/lib/python3.10/site-packages/_yaml/__init__.py +33 -0
  2. vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/WHEEL +4 -0
  3. vllm/lib/python3.10/site-packages/torchvision/__pycache__/utils.cpython-310.pyc +0 -0
  4. vllm/lib/python3.10/site-packages/torchvision/datasets/__init__.py +146 -0
  5. vllm/lib/python3.10/site-packages/torchvision/datasets/_optical_flow.py +490 -0
  6. vllm/lib/python3.10/site-packages/torchvision/datasets/_stereo_matching.py +1224 -0
  7. vllm/lib/python3.10/site-packages/torchvision/datasets/caltech.py +242 -0
  8. vllm/lib/python3.10/site-packages/torchvision/datasets/celeba.py +194 -0
  9. vllm/lib/python3.10/site-packages/torchvision/datasets/cifar.py +168 -0
  10. vllm/lib/python3.10/site-packages/torchvision/datasets/cityscapes.py +222 -0
  11. vllm/lib/python3.10/site-packages/torchvision/datasets/clevr.py +88 -0
  12. vllm/lib/python3.10/site-packages/torchvision/datasets/coco.py +109 -0
  13. vllm/lib/python3.10/site-packages/torchvision/datasets/country211.py +58 -0
  14. vllm/lib/python3.10/site-packages/torchvision/datasets/dtd.py +100 -0
  15. vllm/lib/python3.10/site-packages/torchvision/datasets/eurosat.py +62 -0
  16. vllm/lib/python3.10/site-packages/torchvision/datasets/fakedata.py +67 -0
  17. vllm/lib/python3.10/site-packages/torchvision/datasets/fer2013.py +120 -0
  18. vllm/lib/python3.10/site-packages/torchvision/datasets/fgvc_aircraft.py +115 -0
  19. vllm/lib/python3.10/site-packages/torchvision/datasets/flickr.py +167 -0
  20. vllm/lib/python3.10/site-packages/torchvision/datasets/flowers102.py +114 -0
  21. vllm/lib/python3.10/site-packages/torchvision/datasets/folder.py +337 -0
  22. vllm/lib/python3.10/site-packages/torchvision/datasets/food101.py +93 -0
  23. vllm/lib/python3.10/site-packages/torchvision/datasets/gtsrb.py +103 -0
  24. vllm/lib/python3.10/site-packages/torchvision/datasets/hmdb51.py +152 -0
  25. vllm/lib/python3.10/site-packages/torchvision/datasets/imagenet.py +219 -0
  26. vllm/lib/python3.10/site-packages/torchvision/datasets/imagenette.py +104 -0
  27. vllm/lib/python3.10/site-packages/torchvision/datasets/inaturalist.py +242 -0
  28. vllm/lib/python3.10/site-packages/torchvision/datasets/kinetics.py +248 -0
  29. vllm/lib/python3.10/site-packages/torchvision/datasets/kitti.py +158 -0
  30. vllm/lib/python3.10/site-packages/torchvision/datasets/lfw.py +256 -0
  31. vllm/lib/python3.10/site-packages/torchvision/datasets/lsun.py +168 -0
  32. vllm/lib/python3.10/site-packages/torchvision/datasets/mnist.py +559 -0
  33. vllm/lib/python3.10/site-packages/torchvision/datasets/moving_mnist.py +94 -0
  34. vllm/lib/python3.10/site-packages/torchvision/datasets/omniglot.py +103 -0
  35. vllm/lib/python3.10/site-packages/torchvision/datasets/oxford_iiit_pet.py +132 -0
  36. vllm/lib/python3.10/site-packages/torchvision/datasets/pcam.py +134 -0
  37. vllm/lib/python3.10/site-packages/torchvision/datasets/phototour.py +234 -0
  38. vllm/lib/python3.10/site-packages/torchvision/datasets/places365.py +171 -0
  39. vllm/lib/python3.10/site-packages/torchvision/datasets/rendered_sst2.py +86 -0
  40. vllm/lib/python3.10/site-packages/torchvision/datasets/sbd.py +126 -0
  41. vllm/lib/python3.10/site-packages/torchvision/datasets/sbu.py +110 -0
  42. vllm/lib/python3.10/site-packages/torchvision/datasets/semeion.py +92 -0
  43. vllm/lib/python3.10/site-packages/torchvision/datasets/stanford_cars.py +109 -0
  44. vllm/lib/python3.10/site-packages/torchvision/datasets/stl10.py +175 -0
  45. vllm/lib/python3.10/site-packages/torchvision/datasets/sun397.py +76 -0
  46. vllm/lib/python3.10/site-packages/torchvision/datasets/ucf101.py +131 -0
  47. vllm/lib/python3.10/site-packages/torchvision/datasets/usps.py +96 -0
  48. vllm/lib/python3.10/site-packages/torchvision/datasets/utils.py +476 -0
  49. vllm/lib/python3.10/site-packages/torchvision/datasets/video_utils.py +419 -0
  50. vllm/lib/python3.10/site-packages/torchvision/datasets/voc.py +224 -0
vllm/lib/python3.10/site-packages/_yaml/__init__.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a stub package designed to roughly emulate the _yaml
2
+ # extension module, which previously existed as a standalone module
3
+ # and has been moved into the `yaml` package namespace.
4
+ # It does not perfectly mimic its old counterpart, but should get
5
+ # close enough for anyone who's relying on it even when they shouldn't.
6
+ import yaml
7
+
8
+ # in some circumstances, the yaml module we imoprted may be from a different version, so we need
9
+ # to tread carefully when poking at it here (it may not have the attributes we expect)
10
+ if not getattr(yaml, '__with_libyaml__', False):
11
+ from sys import version_info
12
+
13
+ exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
14
+ raise exc("No module named '_yaml'")
15
+ else:
16
+ from yaml._yaml import *
17
+ import warnings
18
+ warnings.warn(
19
+ 'The _yaml extension module is now located at yaml._yaml'
20
+ ' and its location is subject to change. To use the'
21
+ ' LibYAML-based parser and emitter, import from `yaml`:'
22
+ ' `from yaml import CLoader as Loader, CDumper as Dumper`.',
23
+ DeprecationWarning
24
+ )
25
+ del warnings
26
+ # Don't `del yaml` here because yaml is actually an existing
27
+ # namespace member of _yaml.
28
+
29
+ __name__ = '_yaml'
30
+ # If the module is top-level (i.e. not a part of any specific package)
31
+ # then the attribute should be set to ''.
32
+ # https://docs.python.org/3.8/library/types.html
33
+ __package__ = ''
vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
vllm/lib/python3.10/site-packages/torchvision/__pycache__/utils.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/datasets/__init__.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
2
+ from ._stereo_matching import (
3
+ CarlaStereo,
4
+ CREStereo,
5
+ ETH3DStereo,
6
+ FallingThingsStereo,
7
+ InStereo2k,
8
+ Kitti2012Stereo,
9
+ Kitti2015Stereo,
10
+ Middlebury2014Stereo,
11
+ SceneFlowStereo,
12
+ SintelStereo,
13
+ )
14
+ from .caltech import Caltech101, Caltech256
15
+ from .celeba import CelebA
16
+ from .cifar import CIFAR10, CIFAR100
17
+ from .cityscapes import Cityscapes
18
+ from .clevr import CLEVRClassification
19
+ from .coco import CocoCaptions, CocoDetection
20
+ from .country211 import Country211
21
+ from .dtd import DTD
22
+ from .eurosat import EuroSAT
23
+ from .fakedata import FakeData
24
+ from .fer2013 import FER2013
25
+ from .fgvc_aircraft import FGVCAircraft
26
+ from .flickr import Flickr30k, Flickr8k
27
+ from .flowers102 import Flowers102
28
+ from .folder import DatasetFolder, ImageFolder
29
+ from .food101 import Food101
30
+ from .gtsrb import GTSRB
31
+ from .hmdb51 import HMDB51
32
+ from .imagenet import ImageNet
33
+ from .imagenette import Imagenette
34
+ from .inaturalist import INaturalist
35
+ from .kinetics import Kinetics
36
+ from .kitti import Kitti
37
+ from .lfw import LFWPairs, LFWPeople
38
+ from .lsun import LSUN, LSUNClass
39
+ from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
40
+ from .moving_mnist import MovingMNIST
41
+ from .omniglot import Omniglot
42
+ from .oxford_iiit_pet import OxfordIIITPet
43
+ from .pcam import PCAM
44
+ from .phototour import PhotoTour
45
+ from .places365 import Places365
46
+ from .rendered_sst2 import RenderedSST2
47
+ from .sbd import SBDataset
48
+ from .sbu import SBU
49
+ from .semeion import SEMEION
50
+ from .stanford_cars import StanfordCars
51
+ from .stl10 import STL10
52
+ from .sun397 import SUN397
53
+ from .svhn import SVHN
54
+ from .ucf101 import UCF101
55
+ from .usps import USPS
56
+ from .vision import VisionDataset
57
+ from .voc import VOCDetection, VOCSegmentation
58
+ from .widerface import WIDERFace
59
+
60
+ __all__ = (
61
+ "LSUN",
62
+ "LSUNClass",
63
+ "ImageFolder",
64
+ "DatasetFolder",
65
+ "FakeData",
66
+ "CocoCaptions",
67
+ "CocoDetection",
68
+ "CIFAR10",
69
+ "CIFAR100",
70
+ "EMNIST",
71
+ "FashionMNIST",
72
+ "QMNIST",
73
+ "MNIST",
74
+ "KMNIST",
75
+ "StanfordCars",
76
+ "STL10",
77
+ "SUN397",
78
+ "SVHN",
79
+ "PhotoTour",
80
+ "SEMEION",
81
+ "Omniglot",
82
+ "SBU",
83
+ "Flickr8k",
84
+ "Flickr30k",
85
+ "Flowers102",
86
+ "VOCSegmentation",
87
+ "VOCDetection",
88
+ "Cityscapes",
89
+ "ImageNet",
90
+ "Caltech101",
91
+ "Caltech256",
92
+ "CelebA",
93
+ "WIDERFace",
94
+ "SBDataset",
95
+ "VisionDataset",
96
+ "USPS",
97
+ "Kinetics",
98
+ "HMDB51",
99
+ "UCF101",
100
+ "Places365",
101
+ "Kitti",
102
+ "INaturalist",
103
+ "LFWPeople",
104
+ "LFWPairs",
105
+ "KittiFlow",
106
+ "Sintel",
107
+ "FlyingChairs",
108
+ "FlyingThings3D",
109
+ "HD1K",
110
+ "Food101",
111
+ "DTD",
112
+ "FER2013",
113
+ "GTSRB",
114
+ "CLEVRClassification",
115
+ "OxfordIIITPet",
116
+ "PCAM",
117
+ "Country211",
118
+ "FGVCAircraft",
119
+ "EuroSAT",
120
+ "RenderedSST2",
121
+ "Kitti2012Stereo",
122
+ "Kitti2015Stereo",
123
+ "CarlaStereo",
124
+ "Middlebury2014Stereo",
125
+ "CREStereo",
126
+ "FallingThingsStereo",
127
+ "SceneFlowStereo",
128
+ "SintelStereo",
129
+ "InStereo2k",
130
+ "ETH3DStereo",
131
+ "wrap_dataset_for_transforms_v2",
132
+ "Imagenette",
133
+ )
134
+
135
+
136
+ # We override current module's attributes to handle the import:
137
+ # from torchvision.datasets import wrap_dataset_for_transforms_v2
138
+ # without a cyclic error.
139
+ # Ref: https://peps.python.org/pep-0562/
140
+ def __getattr__(name):
141
+ if name in ("wrap_dataset_for_transforms_v2",):
142
+ from torchvision.tv_tensors._dataset_wrapper import wrap_dataset_for_transforms_v2
143
+
144
+ return wrap_dataset_for_transforms_v2
145
+
146
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
vllm/lib/python3.10/site-packages/torchvision/datasets/_optical_flow.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import os
3
+ from abc import ABC, abstractmethod
4
+ from glob import glob
5
+ from pathlib import Path
6
+ from typing import Callable, List, Optional, Tuple, Union
7
+
8
+ import numpy as np
9
+ import torch
10
+ from PIL import Image
11
+
12
+ from ..io.image import decode_png, read_file
13
+ from .utils import _read_pfm, verify_str_arg
14
+ from .vision import VisionDataset
15
+
16
+ T1 = Tuple[Image.Image, Image.Image, Optional[np.ndarray], Optional[np.ndarray]]
17
+ T2 = Tuple[Image.Image, Image.Image, Optional[np.ndarray]]
18
+
19
+
20
+ __all__ = (
21
+ "KittiFlow",
22
+ "Sintel",
23
+ "FlyingThings3D",
24
+ "FlyingChairs",
25
+ "HD1K",
26
+ )
27
+
28
+
29
+ class FlowDataset(ABC, VisionDataset):
30
+ # Some datasets like Kitti have a built-in valid_flow_mask, indicating which flow values are valid
31
+ # For those we return (img1, img2, flow, valid_flow_mask), and for the rest we return (img1, img2, flow),
32
+ # and it's up to whatever consumes the dataset to decide what valid_flow_mask should be.
33
+ _has_builtin_flow_mask = False
34
+
35
+ def __init__(self, root: Union[str, Path], transforms: Optional[Callable] = None) -> None:
36
+
37
+ super().__init__(root=root)
38
+ self.transforms = transforms
39
+
40
+ self._flow_list: List[str] = []
41
+ self._image_list: List[List[str]] = []
42
+
43
+ def _read_img(self, file_name: str) -> Image.Image:
44
+ img = Image.open(file_name)
45
+ if img.mode != "RGB":
46
+ img = img.convert("RGB") # type: ignore[assignment]
47
+ return img
48
+
49
+ @abstractmethod
50
+ def _read_flow(self, file_name: str):
51
+ # Return the flow or a tuple with the flow and the valid_flow_mask if _has_builtin_flow_mask is True
52
+ pass
53
+
54
+ def __getitem__(self, index: int) -> Union[T1, T2]:
55
+
56
+ img1 = self._read_img(self._image_list[index][0])
57
+ img2 = self._read_img(self._image_list[index][1])
58
+
59
+ if self._flow_list: # it will be empty for some dataset when split="test"
60
+ flow = self._read_flow(self._flow_list[index])
61
+ if self._has_builtin_flow_mask:
62
+ flow, valid_flow_mask = flow
63
+ else:
64
+ valid_flow_mask = None
65
+ else:
66
+ flow = valid_flow_mask = None
67
+
68
+ if self.transforms is not None:
69
+ img1, img2, flow, valid_flow_mask = self.transforms(img1, img2, flow, valid_flow_mask)
70
+
71
+ if self._has_builtin_flow_mask or valid_flow_mask is not None:
72
+ # The `or valid_flow_mask is not None` part is here because the mask can be generated within a transform
73
+ return img1, img2, flow, valid_flow_mask
74
+ else:
75
+ return img1, img2, flow
76
+
77
+ def __len__(self) -> int:
78
+ return len(self._image_list)
79
+
80
+ def __rmul__(self, v: int) -> torch.utils.data.ConcatDataset:
81
+ return torch.utils.data.ConcatDataset([self] * v)
82
+
83
+
84
+ class Sintel(FlowDataset):
85
+ """`Sintel <http://sintel.is.tue.mpg.de/>`_ Dataset for optical flow.
86
+
87
+ The dataset is expected to have the following structure: ::
88
+
89
+ root
90
+ Sintel
91
+ testing
92
+ clean
93
+ scene_1
94
+ scene_2
95
+ ...
96
+ final
97
+ scene_1
98
+ scene_2
99
+ ...
100
+ training
101
+ clean
102
+ scene_1
103
+ scene_2
104
+ ...
105
+ final
106
+ scene_1
107
+ scene_2
108
+ ...
109
+ flow
110
+ scene_1
111
+ scene_2
112
+ ...
113
+
114
+ Args:
115
+ root (str or ``pathlib.Path``): Root directory of the Sintel Dataset.
116
+ split (string, optional): The dataset split, either "train" (default) or "test"
117
+ pass_name (string, optional): The pass to use, either "clean" (default), "final", or "both". See link above for
118
+ details on the different passes.
119
+ transforms (callable, optional): A function/transform that takes in
120
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
121
+ ``valid_flow_mask`` is expected for consistency with other datasets which
122
+ return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
123
+ """
124
+
125
+ def __init__(
126
+ self,
127
+ root: Union[str, Path],
128
+ split: str = "train",
129
+ pass_name: str = "clean",
130
+ transforms: Optional[Callable] = None,
131
+ ) -> None:
132
+ super().__init__(root=root, transforms=transforms)
133
+
134
+ verify_str_arg(split, "split", valid_values=("train", "test"))
135
+ verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
136
+ passes = ["clean", "final"] if pass_name == "both" else [pass_name]
137
+
138
+ root = Path(root) / "Sintel"
139
+ flow_root = root / "training" / "flow"
140
+
141
+ for pass_name in passes:
142
+ split_dir = "training" if split == "train" else split
143
+ image_root = root / split_dir / pass_name
144
+ for scene in os.listdir(image_root):
145
+ image_list = sorted(glob(str(image_root / scene / "*.png")))
146
+ for i in range(len(image_list) - 1):
147
+ self._image_list += [[image_list[i], image_list[i + 1]]]
148
+
149
+ if split == "train":
150
+ self._flow_list += sorted(glob(str(flow_root / scene / "*.flo")))
151
+
152
+ def __getitem__(self, index: int) -> Union[T1, T2]:
153
+ """Return example at given index.
154
+
155
+ Args:
156
+ index(int): The index of the example to retrieve
157
+
158
+ Returns:
159
+ tuple: A 3-tuple with ``(img1, img2, flow)``.
160
+ The flow is a numpy array of shape (2, H, W) and the images are PIL images.
161
+ ``flow`` is None if ``split="test"``.
162
+ If a valid flow mask is generated within the ``transforms`` parameter,
163
+ a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
164
+ """
165
+ return super().__getitem__(index)
166
+
167
+ def _read_flow(self, file_name: str) -> np.ndarray:
168
+ return _read_flo(file_name)
169
+
170
+
171
+ class KittiFlow(FlowDataset):
172
+ """`KITTI <http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow>`__ dataset for optical flow (2015).
173
+
174
+ The dataset is expected to have the following structure: ::
175
+
176
+ root
177
+ KittiFlow
178
+ testing
179
+ image_2
180
+ training
181
+ image_2
182
+ flow_occ
183
+
184
+ Args:
185
+ root (str or ``pathlib.Path``): Root directory of the KittiFlow Dataset.
186
+ split (string, optional): The dataset split, either "train" (default) or "test"
187
+ transforms (callable, optional): A function/transform that takes in
188
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
189
+ """
190
+
191
+ _has_builtin_flow_mask = True
192
+
193
+ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Optional[Callable] = None) -> None:
194
+ super().__init__(root=root, transforms=transforms)
195
+
196
+ verify_str_arg(split, "split", valid_values=("train", "test"))
197
+
198
+ root = Path(root) / "KittiFlow" / (split + "ing")
199
+ images1 = sorted(glob(str(root / "image_2" / "*_10.png")))
200
+ images2 = sorted(glob(str(root / "image_2" / "*_11.png")))
201
+
202
+ if not images1 or not images2:
203
+ raise FileNotFoundError(
204
+ "Could not find the Kitti flow images. Please make sure the directory structure is correct."
205
+ )
206
+
207
+ for img1, img2 in zip(images1, images2):
208
+ self._image_list += [[img1, img2]]
209
+
210
+ if split == "train":
211
+ self._flow_list = sorted(glob(str(root / "flow_occ" / "*_10.png")))
212
+
213
+ def __getitem__(self, index: int) -> Union[T1, T2]:
214
+ """Return example at given index.
215
+
216
+ Args:
217
+ index(int): The index of the example to retrieve
218
+
219
+ Returns:
220
+ tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)``
221
+ where ``valid_flow_mask`` is a numpy boolean mask of shape (H, W)
222
+ indicating which flow values are valid. The flow is a numpy array of
223
+ shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if
224
+ ``split="test"``.
225
+ """
226
+ return super().__getitem__(index)
227
+
228
+ def _read_flow(self, file_name: str) -> Tuple[np.ndarray, np.ndarray]:
229
+ return _read_16bits_png_with_flow_and_valid_mask(file_name)
230
+
231
+
232
+ class FlyingChairs(FlowDataset):
233
+ """`FlyingChairs <https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs>`_ Dataset for optical flow.
234
+
235
+ You will also need to download the FlyingChairs_train_val.txt file from the dataset page.
236
+
237
+ The dataset is expected to have the following structure: ::
238
+
239
+ root
240
+ FlyingChairs
241
+ data
242
+ 00001_flow.flo
243
+ 00001_img1.ppm
244
+ 00001_img2.ppm
245
+ ...
246
+ FlyingChairs_train_val.txt
247
+
248
+
249
+ Args:
250
+ root (str or ``pathlib.Path``): Root directory of the FlyingChairs Dataset.
251
+ split (string, optional): The dataset split, either "train" (default) or "val"
252
+ transforms (callable, optional): A function/transform that takes in
253
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
254
+ ``valid_flow_mask`` is expected for consistency with other datasets which
255
+ return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
256
+ """
257
+
258
+ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Optional[Callable] = None) -> None:
259
+ super().__init__(root=root, transforms=transforms)
260
+
261
+ verify_str_arg(split, "split", valid_values=("train", "val"))
262
+
263
+ root = Path(root) / "FlyingChairs"
264
+ images = sorted(glob(str(root / "data" / "*.ppm")))
265
+ flows = sorted(glob(str(root / "data" / "*.flo")))
266
+
267
+ split_file_name = "FlyingChairs_train_val.txt"
268
+
269
+ if not os.path.exists(root / split_file_name):
270
+ raise FileNotFoundError(
271
+ "The FlyingChairs_train_val.txt file was not found - please download it from the dataset page (see docstring)."
272
+ )
273
+
274
+ split_list = np.loadtxt(str(root / split_file_name), dtype=np.int32)
275
+ for i in range(len(flows)):
276
+ split_id = split_list[i]
277
+ if (split == "train" and split_id == 1) or (split == "val" and split_id == 2):
278
+ self._flow_list += [flows[i]]
279
+ self._image_list += [[images[2 * i], images[2 * i + 1]]]
280
+
281
+ def __getitem__(self, index: int) -> Union[T1, T2]:
282
+ """Return example at given index.
283
+
284
+ Args:
285
+ index(int): The index of the example to retrieve
286
+
287
+ Returns:
288
+ tuple: A 3-tuple with ``(img1, img2, flow)``.
289
+ The flow is a numpy array of shape (2, H, W) and the images are PIL images.
290
+ ``flow`` is None if ``split="val"``.
291
+ If a valid flow mask is generated within the ``transforms`` parameter,
292
+ a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
293
+ """
294
+ return super().__getitem__(index)
295
+
296
+ def _read_flow(self, file_name: str) -> np.ndarray:
297
+ return _read_flo(file_name)
298
+
299
+
300
+ class FlyingThings3D(FlowDataset):
301
+ """`FlyingThings3D <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ dataset for optical flow.
302
+
303
+ The dataset is expected to have the following structure: ::
304
+
305
+ root
306
+ FlyingThings3D
307
+ frames_cleanpass
308
+ TEST
309
+ TRAIN
310
+ frames_finalpass
311
+ TEST
312
+ TRAIN
313
+ optical_flow
314
+ TEST
315
+ TRAIN
316
+
317
+ Args:
318
+ root (str or ``pathlib.Path``): Root directory of the intel FlyingThings3D Dataset.
319
+ split (string, optional): The dataset split, either "train" (default) or "test"
320
+ pass_name (string, optional): The pass to use, either "clean" (default) or "final" or "both". See link above for
321
+ details on the different passes.
322
+ camera (string, optional): Which camera to return images from. Can be either "left" (default) or "right" or "both".
323
+ transforms (callable, optional): A function/transform that takes in
324
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
325
+ ``valid_flow_mask`` is expected for consistency with other datasets which
326
+ return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
327
+ """
328
+
329
+ def __init__(
330
+ self,
331
+ root: Union[str, Path],
332
+ split: str = "train",
333
+ pass_name: str = "clean",
334
+ camera: str = "left",
335
+ transforms: Optional[Callable] = None,
336
+ ) -> None:
337
+ super().__init__(root=root, transforms=transforms)
338
+
339
+ verify_str_arg(split, "split", valid_values=("train", "test"))
340
+ split = split.upper()
341
+
342
+ verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
343
+ passes = {
344
+ "clean": ["frames_cleanpass"],
345
+ "final": ["frames_finalpass"],
346
+ "both": ["frames_cleanpass", "frames_finalpass"],
347
+ }[pass_name]
348
+
349
+ verify_str_arg(camera, "camera", valid_values=("left", "right", "both"))
350
+ cameras = ["left", "right"] if camera == "both" else [camera]
351
+
352
+ root = Path(root) / "FlyingThings3D"
353
+
354
+ directions = ("into_future", "into_past")
355
+ for pass_name, camera, direction in itertools.product(passes, cameras, directions):
356
+ image_dirs = sorted(glob(str(root / pass_name / split / "*/*")))
357
+ image_dirs = sorted(Path(image_dir) / camera for image_dir in image_dirs)
358
+
359
+ flow_dirs = sorted(glob(str(root / "optical_flow" / split / "*/*")))
360
+ flow_dirs = sorted(Path(flow_dir) / direction / camera for flow_dir in flow_dirs)
361
+
362
+ if not image_dirs or not flow_dirs:
363
+ raise FileNotFoundError(
364
+ "Could not find the FlyingThings3D flow images. "
365
+ "Please make sure the directory structure is correct."
366
+ )
367
+
368
+ for image_dir, flow_dir in zip(image_dirs, flow_dirs):
369
+ images = sorted(glob(str(image_dir / "*.png")))
370
+ flows = sorted(glob(str(flow_dir / "*.pfm")))
371
+ for i in range(len(flows) - 1):
372
+ if direction == "into_future":
373
+ self._image_list += [[images[i], images[i + 1]]]
374
+ self._flow_list += [flows[i]]
375
+ elif direction == "into_past":
376
+ self._image_list += [[images[i + 1], images[i]]]
377
+ self._flow_list += [flows[i + 1]]
378
+
379
+ def __getitem__(self, index: int) -> Union[T1, T2]:
380
+ """Return example at given index.
381
+
382
+ Args:
383
+ index(int): The index of the example to retrieve
384
+
385
+ Returns:
386
+ tuple: A 3-tuple with ``(img1, img2, flow)``.
387
+ The flow is a numpy array of shape (2, H, W) and the images are PIL images.
388
+ ``flow`` is None if ``split="test"``.
389
+ If a valid flow mask is generated within the ``transforms`` parameter,
390
+ a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
391
+ """
392
+ return super().__getitem__(index)
393
+
394
+ def _read_flow(self, file_name: str) -> np.ndarray:
395
+ return _read_pfm(file_name)
396
+
397
+
398
+ class HD1K(FlowDataset):
399
+ """`HD1K <http://hci-benchmark.iwr.uni-heidelberg.de/>`__ dataset for optical flow.
400
+
401
+ The dataset is expected to have the following structure: ::
402
+
403
+ root
404
+ hd1k
405
+ hd1k_challenge
406
+ image_2
407
+ hd1k_flow_gt
408
+ flow_occ
409
+ hd1k_input
410
+ image_2
411
+
412
+ Args:
413
+ root (str or ``pathlib.Path``): Root directory of the HD1K Dataset.
414
+ split (string, optional): The dataset split, either "train" (default) or "test"
415
+ transforms (callable, optional): A function/transform that takes in
416
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
417
+ """
418
+
419
+ _has_builtin_flow_mask = True
420
+
421
+ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Optional[Callable] = None) -> None:
422
+ super().__init__(root=root, transforms=transforms)
423
+
424
+ verify_str_arg(split, "split", valid_values=("train", "test"))
425
+
426
+ root = Path(root) / "hd1k"
427
+ if split == "train":
428
+ # There are 36 "sequences" and we don't want seq i to overlap with seq i + 1, so we need this for loop
429
+ for seq_idx in range(36):
430
+ flows = sorted(glob(str(root / "hd1k_flow_gt" / "flow_occ" / f"{seq_idx:06d}_*.png")))
431
+ images = sorted(glob(str(root / "hd1k_input" / "image_2" / f"{seq_idx:06d}_*.png")))
432
+ for i in range(len(flows) - 1):
433
+ self._flow_list += [flows[i]]
434
+ self._image_list += [[images[i], images[i + 1]]]
435
+ else:
436
+ images1 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*10.png")))
437
+ images2 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*11.png")))
438
+ for image1, image2 in zip(images1, images2):
439
+ self._image_list += [[image1, image2]]
440
+
441
+ if not self._image_list:
442
+ raise FileNotFoundError(
443
+ "Could not find the HD1K images. Please make sure the directory structure is correct."
444
+ )
445
+
446
+ def _read_flow(self, file_name: str) -> Tuple[np.ndarray, np.ndarray]:
447
+ return _read_16bits_png_with_flow_and_valid_mask(file_name)
448
+
449
+ def __getitem__(self, index: int) -> Union[T1, T2]:
450
+ """Return example at given index.
451
+
452
+ Args:
453
+ index(int): The index of the example to retrieve
454
+
455
+ Returns:
456
+ tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` where ``valid_flow_mask``
457
+ is a numpy boolean mask of shape (H, W)
458
+ indicating which flow values are valid. The flow is a numpy array of
459
+ shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if
460
+ ``split="test"``.
461
+ """
462
+ return super().__getitem__(index)
463
+
464
+
465
+ def _read_flo(file_name: str) -> np.ndarray:
466
+ """Read .flo file in Middlebury format"""
467
+ # Code adapted from:
468
+ # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
469
+ # Everything needs to be in little Endian according to
470
+ # https://vision.middlebury.edu/flow/code/flow-code/README.txt
471
+ with open(file_name, "rb") as f:
472
+ magic = np.fromfile(f, "c", count=4).tobytes()
473
+ if magic != b"PIEH":
474
+ raise ValueError("Magic number incorrect. Invalid .flo file")
475
+
476
+ w = int(np.fromfile(f, "<i4", count=1))
477
+ h = int(np.fromfile(f, "<i4", count=1))
478
+ data = np.fromfile(f, "<f4", count=2 * w * h)
479
+ return data.reshape(h, w, 2).transpose(2, 0, 1)
480
+
481
+
482
+ def _read_16bits_png_with_flow_and_valid_mask(file_name: str) -> Tuple[np.ndarray, np.ndarray]:
483
+
484
+ flow_and_valid = decode_png(read_file(file_name)).to(torch.float32)
485
+ flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :]
486
+ flow = (flow - 2**15) / 64 # This conversion is explained somewhere on the kitti archive
487
+ valid_flow_mask = valid_flow_mask.bool()
488
+
489
+ # For consistency with other datasets, we convert to numpy
490
+ return flow.numpy(), valid_flow_mask.numpy()
vllm/lib/python3.10/site-packages/torchvision/datasets/_stereo_matching.py ADDED
@@ -0,0 +1,1224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import json
3
+ import os
4
+ import random
5
+ import shutil
6
+ from abc import ABC, abstractmethod
7
+ from glob import glob
8
+ from pathlib import Path
9
+ from typing import Callable, cast, List, Optional, Tuple, Union
10
+
11
+ import numpy as np
12
+ from PIL import Image
13
+
14
+ from .utils import _read_pfm, download_and_extract_archive, verify_str_arg
15
+ from .vision import VisionDataset
16
+
17
+ T1 = Tuple[Image.Image, Image.Image, Optional[np.ndarray], np.ndarray]
18
+ T2 = Tuple[Image.Image, Image.Image, Optional[np.ndarray]]
19
+
20
+ __all__ = ()
21
+
22
+ _read_pfm_file = functools.partial(_read_pfm, slice_channels=1)
23
+
24
+
25
+ class StereoMatchingDataset(ABC, VisionDataset):
26
+ """Base interface for Stereo matching datasets"""
27
+
28
+ _has_built_in_disparity_mask = False
29
+
30
+ def __init__(self, root: Union[str, Path], transforms: Optional[Callable] = None) -> None:
31
+ """
32
+ Args:
33
+ root(str): Root directory of the dataset.
34
+ transforms(callable, optional): A function/transform that takes in Tuples of
35
+ (images, disparities, valid_masks) and returns a transformed version of each of them.
36
+ images is a Tuple of (``PIL.Image``, ``PIL.Image``)
37
+ disparities is a Tuple of (``np.ndarray``, ``np.ndarray``) with shape (1, H, W)
38
+ valid_masks is a Tuple of (``np.ndarray``, ``np.ndarray``) with shape (H, W)
39
+ In some cases, when a dataset does not provide disparities, the ``disparities`` and
40
+ ``valid_masks`` can be Tuples containing None values.
41
+ For training splits generally the datasets provide a minimal guarantee of
42
+ images: (``PIL.Image``, ``PIL.Image``)
43
+ disparities: (``np.ndarray``, ``None``) with shape (1, H, W)
44
+ Optionally, based on the dataset, it can return a ``mask`` as well:
45
+ valid_masks: (``np.ndarray | None``, ``None``) with shape (H, W)
46
+ For some test splits, the datasets provides outputs that look like:
47
+ imgaes: (``PIL.Image``, ``PIL.Image``)
48
+ disparities: (``None``, ``None``)
49
+ Optionally, based on the dataset, it can return a ``mask`` as well:
50
+ valid_masks: (``None``, ``None``)
51
+ """
52
+ super().__init__(root=root)
53
+ self.transforms = transforms
54
+
55
+ self._images = [] # type: ignore
56
+ self._disparities = [] # type: ignore
57
+
58
+ def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
59
+ img = Image.open(file_path)
60
+ if img.mode != "RGB":
61
+ img = img.convert("RGB") # type: ignore [assignment]
62
+ return img
63
+
64
+ def _scan_pairs(
65
+ self,
66
+ paths_left_pattern: str,
67
+ paths_right_pattern: Optional[str] = None,
68
+ ) -> List[Tuple[str, Optional[str]]]:
69
+
70
+ left_paths = list(sorted(glob(paths_left_pattern)))
71
+
72
+ right_paths: List[Union[None, str]]
73
+ if paths_right_pattern:
74
+ right_paths = list(sorted(glob(paths_right_pattern)))
75
+ else:
76
+ right_paths = list(None for _ in left_paths)
77
+
78
+ if not left_paths:
79
+ raise FileNotFoundError(f"Could not find any files matching the patterns: {paths_left_pattern}")
80
+
81
+ if not right_paths:
82
+ raise FileNotFoundError(f"Could not find any files matching the patterns: {paths_right_pattern}")
83
+
84
+ if len(left_paths) != len(right_paths):
85
+ raise ValueError(
86
+ f"Found {len(left_paths)} left files but {len(right_paths)} right files using:\n "
87
+ f"left pattern: {paths_left_pattern}\n"
88
+ f"right pattern: {paths_right_pattern}\n"
89
+ )
90
+
91
+ paths = list((left, right) for left, right in zip(left_paths, right_paths))
92
+ return paths
93
+
94
+ @abstractmethod
95
+ def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
96
+ # function that returns a disparity map and an occlusion map
97
+ pass
98
+
99
+ def __getitem__(self, index: int) -> Union[T1, T2]:
100
+ """Return example at given index.
101
+
102
+ Args:
103
+ index(int): The index of the example to retrieve
104
+
105
+ Returns:
106
+ tuple: A 3 or 4-tuple with ``(img_left, img_right, disparity, Optional[valid_mask])`` where ``valid_mask``
107
+ can be a numpy boolean mask of shape (H, W) if the dataset provides a file
108
+ indicating which disparity pixels are valid. The disparity is a numpy array of
109
+ shape (1, H, W) and the images are PIL images. ``disparity`` is None for
110
+ datasets on which for ``split="test"`` the authors did not provide annotations.
111
+ """
112
+ img_left = self._read_img(self._images[index][0])
113
+ img_right = self._read_img(self._images[index][1])
114
+
115
+ dsp_map_left, valid_mask_left = self._read_disparity(self._disparities[index][0])
116
+ dsp_map_right, valid_mask_right = self._read_disparity(self._disparities[index][1])
117
+
118
+ imgs = (img_left, img_right)
119
+ dsp_maps = (dsp_map_left, dsp_map_right)
120
+ valid_masks = (valid_mask_left, valid_mask_right)
121
+
122
+ if self.transforms is not None:
123
+ (
124
+ imgs,
125
+ dsp_maps,
126
+ valid_masks,
127
+ ) = self.transforms(imgs, dsp_maps, valid_masks)
128
+
129
+ if self._has_built_in_disparity_mask or valid_masks[0] is not None:
130
+ return imgs[0], imgs[1], dsp_maps[0], cast(np.ndarray, valid_masks[0])
131
+ else:
132
+ return imgs[0], imgs[1], dsp_maps[0]
133
+
134
+ def __len__(self) -> int:
135
+ return len(self._images)
136
+
137
+
138
+ class CarlaStereo(StereoMatchingDataset):
139
+ """
140
+ Carla simulator data linked in the `CREStereo github repo <https://github.com/megvii-research/CREStereo>`_.
141
+
142
+ The dataset is expected to have the following structure: ::
143
+
144
+ root
145
+ carla-highres
146
+ trainingF
147
+ scene1
148
+ img0.png
149
+ img1.png
150
+ disp0GT.pfm
151
+ disp1GT.pfm
152
+ calib.txt
153
+ scene2
154
+ img0.png
155
+ img1.png
156
+ disp0GT.pfm
157
+ disp1GT.pfm
158
+ calib.txt
159
+ ...
160
+
161
+ Args:
162
+ root (str or ``pathlib.Path``): Root directory where `carla-highres` is located.
163
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
164
+ """
165
+
166
+ def __init__(self, root: Union[str, Path], transforms: Optional[Callable] = None) -> None:
167
+ super().__init__(root, transforms)
168
+
169
+ root = Path(root) / "carla-highres"
170
+
171
+ left_image_pattern = str(root / "trainingF" / "*" / "im0.png")
172
+ right_image_pattern = str(root / "trainingF" / "*" / "im1.png")
173
+ imgs = self._scan_pairs(left_image_pattern, right_image_pattern)
174
+ self._images = imgs
175
+
176
+ left_disparity_pattern = str(root / "trainingF" / "*" / "disp0GT.pfm")
177
+ right_disparity_pattern = str(root / "trainingF" / "*" / "disp1GT.pfm")
178
+ disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
179
+ self._disparities = disparities
180
+
181
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
182
+ disparity_map = _read_pfm_file(file_path)
183
+ disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
184
+ valid_mask = None
185
+ return disparity_map, valid_mask
186
+
187
+ def __getitem__(self, index: int) -> T1:
188
+ """Return example at given index.
189
+
190
+ Args:
191
+ index(int): The index of the example to retrieve
192
+
193
+ Returns:
194
+ tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
195
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
196
+ If a ``valid_mask`` is generated within the ``transforms`` parameter,
197
+ a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
198
+ """
199
+ return cast(T1, super().__getitem__(index))
200
+
201
+
202
+ class Kitti2012Stereo(StereoMatchingDataset):
203
+ """
204
+ KITTI dataset from the `2012 stereo evaluation benchmark <http://www.cvlibs.net/datasets/kitti/eval_stereo_flow.php>`_.
205
+ Uses the RGB images for consistency with KITTI 2015.
206
+
207
+ The dataset is expected to have the following structure: ::
208
+
209
+ root
210
+ Kitti2012
211
+ testing
212
+ colored_0
213
+ 1_10.png
214
+ 2_10.png
215
+ ...
216
+ colored_1
217
+ 1_10.png
218
+ 2_10.png
219
+ ...
220
+ training
221
+ colored_0
222
+ 1_10.png
223
+ 2_10.png
224
+ ...
225
+ colored_1
226
+ 1_10.png
227
+ 2_10.png
228
+ ...
229
+ disp_noc
230
+ 1.png
231
+ 2.png
232
+ ...
233
+ calib
234
+
235
+ Args:
236
+ root (str or ``pathlib.Path``): Root directory where `Kitti2012` is located.
237
+ split (string, optional): The dataset split of scenes, either "train" (default) or "test".
238
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
239
+ """
240
+
241
+ _has_built_in_disparity_mask = True
242
+
243
+ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Optional[Callable] = None) -> None:
244
+ super().__init__(root, transforms)
245
+
246
+ verify_str_arg(split, "split", valid_values=("train", "test"))
247
+
248
+ root = Path(root) / "Kitti2012" / (split + "ing")
249
+
250
+ left_img_pattern = str(root / "colored_0" / "*_10.png")
251
+ right_img_pattern = str(root / "colored_1" / "*_10.png")
252
+ self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
253
+
254
+ if split == "train":
255
+ disparity_pattern = str(root / "disp_noc" / "*.png")
256
+ self._disparities = self._scan_pairs(disparity_pattern, None)
257
+ else:
258
+ self._disparities = list((None, None) for _ in self._images)
259
+
260
+ def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
261
+ # test split has no disparity maps
262
+ if file_path is None:
263
+ return None, None
264
+
265
+ disparity_map = np.asarray(Image.open(file_path)) / 256.0
266
+ # unsqueeze the disparity map into (C, H, W) format
267
+ disparity_map = disparity_map[None, :, :]
268
+ valid_mask = None
269
+ return disparity_map, valid_mask
270
+
271
+ def __getitem__(self, index: int) -> T1:
272
+ """Return example at given index.
273
+
274
+ Args:
275
+ index(int): The index of the example to retrieve
276
+
277
+ Returns:
278
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
279
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
280
+ ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
281
+ generate a valid mask.
282
+ Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
283
+ """
284
+ return cast(T1, super().__getitem__(index))
285
+
286
+
287
+ class Kitti2015Stereo(StereoMatchingDataset):
288
+ """
289
+ KITTI dataset from the `2015 stereo evaluation benchmark <http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php>`_.
290
+
291
+ The dataset is expected to have the following structure: ::
292
+
293
+ root
294
+ Kitti2015
295
+ testing
296
+ image_2
297
+ img1.png
298
+ img2.png
299
+ ...
300
+ image_3
301
+ img1.png
302
+ img2.png
303
+ ...
304
+ training
305
+ image_2
306
+ img1.png
307
+ img2.png
308
+ ...
309
+ image_3
310
+ img1.png
311
+ img2.png
312
+ ...
313
+ disp_occ_0
314
+ img1.png
315
+ img2.png
316
+ ...
317
+ disp_occ_1
318
+ img1.png
319
+ img2.png
320
+ ...
321
+ calib
322
+
323
+ Args:
324
+ root (str or ``pathlib.Path``): Root directory where `Kitti2015` is located.
325
+ split (string, optional): The dataset split of scenes, either "train" (default) or "test".
326
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
327
+ """
328
+
329
+ _has_built_in_disparity_mask = True
330
+
331
+ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Optional[Callable] = None) -> None:
332
+ super().__init__(root, transforms)
333
+
334
+ verify_str_arg(split, "split", valid_values=("train", "test"))
335
+
336
+ root = Path(root) / "Kitti2015" / (split + "ing")
337
+ left_img_pattern = str(root / "image_2" / "*.png")
338
+ right_img_pattern = str(root / "image_3" / "*.png")
339
+ self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
340
+
341
+ if split == "train":
342
+ left_disparity_pattern = str(root / "disp_occ_0" / "*.png")
343
+ right_disparity_pattern = str(root / "disp_occ_1" / "*.png")
344
+ self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
345
+ else:
346
+ self._disparities = list((None, None) for _ in self._images)
347
+
348
+ def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
349
+ # test split has no disparity maps
350
+ if file_path is None:
351
+ return None, None
352
+
353
+ disparity_map = np.asarray(Image.open(file_path)) / 256.0
354
+ # unsqueeze the disparity map into (C, H, W) format
355
+ disparity_map = disparity_map[None, :, :]
356
+ valid_mask = None
357
+ return disparity_map, valid_mask
358
+
359
+ def __getitem__(self, index: int) -> T1:
360
+ """Return example at given index.
361
+
362
+ Args:
363
+ index(int): The index of the example to retrieve
364
+
365
+ Returns:
366
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
367
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
368
+ ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
369
+ generate a valid mask.
370
+ Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
371
+ """
372
+ return cast(T1, super().__getitem__(index))
373
+
374
+
375
+ class Middlebury2014Stereo(StereoMatchingDataset):
376
+ """Publicly available scenes from the Middlebury dataset `2014 version <https://vision.middlebury.edu/stereo/data/scenes2014/>`.
377
+
378
+ The dataset mostly follows the original format, without containing the ambient subdirectories. : ::
379
+
380
+ root
381
+ Middlebury2014
382
+ train
383
+ scene1-{perfect,imperfect}
384
+ calib.txt
385
+ im{0,1}.png
386
+ im1E.png
387
+ im1L.png
388
+ disp{0,1}.pfm
389
+ disp{0,1}-n.png
390
+ disp{0,1}-sd.pfm
391
+ disp{0,1}y.pfm
392
+ scene2-{perfect,imperfect}
393
+ calib.txt
394
+ im{0,1}.png
395
+ im1E.png
396
+ im1L.png
397
+ disp{0,1}.pfm
398
+ disp{0,1}-n.png
399
+ disp{0,1}-sd.pfm
400
+ disp{0,1}y.pfm
401
+ ...
402
+ additional
403
+ scene1-{perfect,imperfect}
404
+ calib.txt
405
+ im{0,1}.png
406
+ im1E.png
407
+ im1L.png
408
+ disp{0,1}.pfm
409
+ disp{0,1}-n.png
410
+ disp{0,1}-sd.pfm
411
+ disp{0,1}y.pfm
412
+ ...
413
+ test
414
+ scene1
415
+ calib.txt
416
+ im{0,1}.png
417
+ scene2
418
+ calib.txt
419
+ im{0,1}.png
420
+ ...
421
+
422
+ Args:
423
+ root (str or ``pathlib.Path``): Root directory of the Middleburry 2014 Dataset.
424
+ split (string, optional): The dataset split of scenes, either "train" (default), "test", or "additional"
425
+ use_ambient_views (boolean, optional): Whether to use different expose or lightning views when possible.
426
+ The dataset samples with equal probability between ``[im1.png, im1E.png, im1L.png]``.
427
+ calibration (string, optional): Whether or not to use the calibrated (default) or uncalibrated scenes.
428
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
429
+ download (boolean, optional): Whether or not to download the dataset in the ``root`` directory.
430
+ """
431
+
432
+ splits = {
433
+ "train": [
434
+ "Adirondack",
435
+ "Jadeplant",
436
+ "Motorcycle",
437
+ "Piano",
438
+ "Pipes",
439
+ "Playroom",
440
+ "Playtable",
441
+ "Recycle",
442
+ "Shelves",
443
+ "Vintage",
444
+ ],
445
+ "additional": [
446
+ "Backpack",
447
+ "Bicycle1",
448
+ "Cable",
449
+ "Classroom1",
450
+ "Couch",
451
+ "Flowers",
452
+ "Mask",
453
+ "Shopvac",
454
+ "Sticks",
455
+ "Storage",
456
+ "Sword1",
457
+ "Sword2",
458
+ "Umbrella",
459
+ ],
460
+ "test": [
461
+ "Plants",
462
+ "Classroom2E",
463
+ "Classroom2",
464
+ "Australia",
465
+ "DjembeL",
466
+ "CrusadeP",
467
+ "Crusade",
468
+ "Hoops",
469
+ "Bicycle2",
470
+ "Staircase",
471
+ "Newkuba",
472
+ "AustraliaP",
473
+ "Djembe",
474
+ "Livingroom",
475
+ "Computer",
476
+ ],
477
+ }
478
+
479
+ _has_built_in_disparity_mask = True
480
+
481
+ def __init__(
482
+ self,
483
+ root: Union[str, Path],
484
+ split: str = "train",
485
+ calibration: Optional[str] = "perfect",
486
+ use_ambient_views: bool = False,
487
+ transforms: Optional[Callable] = None,
488
+ download: bool = False,
489
+ ) -> None:
490
+ super().__init__(root, transforms)
491
+
492
+ verify_str_arg(split, "split", valid_values=("train", "test", "additional"))
493
+ self.split = split
494
+
495
+ if calibration:
496
+ verify_str_arg(calibration, "calibration", valid_values=("perfect", "imperfect", "both", None)) # type: ignore
497
+ if split == "test":
498
+ raise ValueError("Split 'test' has only no calibration settings, please set `calibration=None`.")
499
+ else:
500
+ if split != "test":
501
+ raise ValueError(
502
+ f"Split '{split}' has calibration settings, however None was provided as an argument."
503
+ f"\nSetting calibration to 'perfect' for split '{split}'. Available calibration settings are: 'perfect', 'imperfect', 'both'.",
504
+ )
505
+
506
+ if download:
507
+ self._download_dataset(root)
508
+
509
+ root = Path(root) / "Middlebury2014"
510
+
511
+ if not os.path.exists(root / split):
512
+ raise FileNotFoundError(f"The {split} directory was not found in the provided root directory")
513
+
514
+ split_scenes = self.splits[split]
515
+ # check that the provided root folder contains the scene splits
516
+ if not any(
517
+ # using startswith to account for perfect / imperfect calibrartion
518
+ scene.startswith(s)
519
+ for scene in os.listdir(root / split)
520
+ for s in split_scenes
521
+ ):
522
+ raise FileNotFoundError(f"Provided root folder does not contain any scenes from the {split} split.")
523
+
524
+ calibrartion_suffixes = {
525
+ None: [""],
526
+ "perfect": ["-perfect"],
527
+ "imperfect": ["-imperfect"],
528
+ "both": ["-perfect", "-imperfect"],
529
+ }[calibration]
530
+
531
+ for calibration_suffix in calibrartion_suffixes:
532
+ scene_pattern = "*" + calibration_suffix
533
+ left_img_pattern = str(root / split / scene_pattern / "im0.png")
534
+ right_img_pattern = str(root / split / scene_pattern / "im1.png")
535
+ self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
536
+
537
+ if split == "test":
538
+ self._disparities = list((None, None) for _ in self._images)
539
+ else:
540
+ left_dispartity_pattern = str(root / split / scene_pattern / "disp0.pfm")
541
+ right_dispartity_pattern = str(root / split / scene_pattern / "disp1.pfm")
542
+ self._disparities += self._scan_pairs(left_dispartity_pattern, right_dispartity_pattern)
543
+
544
+ self.use_ambient_views = use_ambient_views
545
+
546
+ def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
547
+ """
548
+ Function that reads either the original right image or an augmented view when ``use_ambient_views`` is True.
549
+ When ``use_ambient_views`` is True, the dataset will return at random one of ``[im1.png, im1E.png, im1L.png]``
550
+ as the right image.
551
+ """
552
+ ambient_file_paths: List[Union[str, Path]] # make mypy happy
553
+
554
+ if not isinstance(file_path, Path):
555
+ file_path = Path(file_path)
556
+
557
+ if file_path.name == "im1.png" and self.use_ambient_views:
558
+ base_path = file_path.parent
559
+ # initialize sampleable container
560
+ ambient_file_paths = list(base_path / view_name for view_name in ["im1E.png", "im1L.png"])
561
+ # double check that we're not going to try to read from an invalid file path
562
+ ambient_file_paths = list(filter(lambda p: os.path.exists(p), ambient_file_paths))
563
+ # keep the original image as an option as well for uniform sampling between base views
564
+ ambient_file_paths.append(file_path)
565
+ file_path = random.choice(ambient_file_paths) # type: ignore
566
+ return super()._read_img(file_path)
567
+
568
+ def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
569
+ # test split has not disparity maps
570
+ if file_path is None:
571
+ return None, None
572
+
573
+ disparity_map = _read_pfm_file(file_path)
574
+ disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
575
+ disparity_map[disparity_map == np.inf] = 0 # remove infinite disparities
576
+ valid_mask = (disparity_map > 0).squeeze(0) # mask out invalid disparities
577
+ return disparity_map, valid_mask
578
+
579
+ def _download_dataset(self, root: Union[str, Path]) -> None:
580
+ base_url = "https://vision.middlebury.edu/stereo/data/scenes2014/zip"
581
+ # train and additional splits have 2 different calibration settings
582
+ root = Path(root) / "Middlebury2014"
583
+ split_name = self.split
584
+
585
+ if split_name != "test":
586
+ for split_scene in self.splits[split_name]:
587
+ split_root = root / split_name
588
+ for calibration in ["perfect", "imperfect"]:
589
+ scene_name = f"{split_scene}-{calibration}"
590
+ scene_url = f"{base_url}/{scene_name}.zip"
591
+ print(f"Downloading {scene_url}")
592
+ # download the scene only if it doesn't exist
593
+ if not (split_root / scene_name).exists():
594
+ download_and_extract_archive(
595
+ url=scene_url,
596
+ filename=f"{scene_name}.zip",
597
+ download_root=str(split_root),
598
+ remove_finished=True,
599
+ )
600
+ else:
601
+ os.makedirs(root / "test")
602
+ if any(s not in os.listdir(root / "test") for s in self.splits["test"]):
603
+ # test split is downloaded from a different location
604
+ test_set_url = "https://vision.middlebury.edu/stereo/submit3/zip/MiddEval3-data-F.zip"
605
+ # the unzip is going to produce a directory MiddEval3 with two subdirectories trainingF and testF
606
+ # we want to move the contents from testF into the directory
607
+ download_and_extract_archive(url=test_set_url, download_root=str(root), remove_finished=True)
608
+ for scene_dir, scene_names, _ in os.walk(str(root / "MiddEval3/testF")):
609
+ for scene in scene_names:
610
+ scene_dst_dir = root / "test"
611
+ scene_src_dir = Path(scene_dir) / scene
612
+ os.makedirs(scene_dst_dir, exist_ok=True)
613
+ shutil.move(str(scene_src_dir), str(scene_dst_dir))
614
+
615
+ # cleanup MiddEval3 directory
616
+ shutil.rmtree(str(root / "MiddEval3"))
617
+
618
+ def __getitem__(self, index: int) -> T2:
619
+ """Return example at given index.
620
+
621
+ Args:
622
+ index(int): The index of the example to retrieve
623
+
624
+ Returns:
625
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
626
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
627
+ ``valid_mask`` is implicitly ``None`` for `split=test`.
628
+ """
629
+ return cast(T2, super().__getitem__(index))
630
+
631
+
632
+ class CREStereo(StereoMatchingDataset):
633
+ """Synthetic dataset used in training the `CREStereo <https://arxiv.org/pdf/2203.11483.pdf>`_ architecture.
634
+ Dataset details on the official paper `repo <https://github.com/megvii-research/CREStereo>`_.
635
+
636
+ The dataset is expected to have the following structure: ::
637
+
638
+ root
639
+ CREStereo
640
+ tree
641
+ img1_left.jpg
642
+ img1_right.jpg
643
+ img1_left.disp.jpg
644
+ img1_right.disp.jpg
645
+ img2_left.jpg
646
+ img2_right.jpg
647
+ img2_left.disp.jpg
648
+ img2_right.disp.jpg
649
+ ...
650
+ shapenet
651
+ img1_left.jpg
652
+ img1_right.jpg
653
+ img1_left.disp.jpg
654
+ img1_right.disp.jpg
655
+ ...
656
+ reflective
657
+ img1_left.jpg
658
+ img1_right.jpg
659
+ img1_left.disp.jpg
660
+ img1_right.disp.jpg
661
+ ...
662
+ hole
663
+ img1_left.jpg
664
+ img1_right.jpg
665
+ img1_left.disp.jpg
666
+ img1_right.disp.jpg
667
+ ...
668
+
669
+ Args:
670
+ root (str): Root directory of the dataset.
671
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
672
+ """
673
+
674
+ _has_built_in_disparity_mask = True
675
+
676
+ def __init__(
677
+ self,
678
+ root: Union[str, Path],
679
+ transforms: Optional[Callable] = None,
680
+ ) -> None:
681
+ super().__init__(root, transforms)
682
+
683
+ root = Path(root) / "CREStereo"
684
+
685
+ dirs = ["shapenet", "reflective", "tree", "hole"]
686
+
687
+ for s in dirs:
688
+ left_image_pattern = str(root / s / "*_left.jpg")
689
+ right_image_pattern = str(root / s / "*_right.jpg")
690
+ imgs = self._scan_pairs(left_image_pattern, right_image_pattern)
691
+ self._images += imgs
692
+
693
+ left_disparity_pattern = str(root / s / "*_left.disp.png")
694
+ right_disparity_pattern = str(root / s / "*_right.disp.png")
695
+ disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
696
+ self._disparities += disparities
697
+
698
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
699
+ disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
700
+ # unsqueeze the disparity map into (C, H, W) format
701
+ disparity_map = disparity_map[None, :, :] / 32.0
702
+ valid_mask = None
703
+ return disparity_map, valid_mask
704
+
705
+ def __getitem__(self, index: int) -> T1:
706
+ """Return example at given index.
707
+
708
+ Args:
709
+ index(int): The index of the example to retrieve
710
+
711
+ Returns:
712
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
713
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
714
+ ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
715
+ generate a valid mask.
716
+ """
717
+ return cast(T1, super().__getitem__(index))
718
+
719
+
720
+ class FallingThingsStereo(StereoMatchingDataset):
721
+ """`FallingThings <https://research.nvidia.com/publication/2018-06_falling-things-synthetic-dataset-3d-object-detection-and-pose-estimation>`_ dataset.
722
+
723
+ The dataset is expected to have the following structure: ::
724
+
725
+ root
726
+ FallingThings
727
+ single
728
+ dir1
729
+ scene1
730
+ _object_settings.json
731
+ _camera_settings.json
732
+ image1.left.depth.png
733
+ image1.right.depth.png
734
+ image1.left.jpg
735
+ image1.right.jpg
736
+ image2.left.depth.png
737
+ image2.right.depth.png
738
+ image2.left.jpg
739
+ image2.right
740
+ ...
741
+ scene2
742
+ ...
743
+ mixed
744
+ scene1
745
+ _object_settings.json
746
+ _camera_settings.json
747
+ image1.left.depth.png
748
+ image1.right.depth.png
749
+ image1.left.jpg
750
+ image1.right.jpg
751
+ image2.left.depth.png
752
+ image2.right.depth.png
753
+ image2.left.jpg
754
+ image2.right
755
+ ...
756
+ scene2
757
+ ...
758
+
759
+ Args:
760
+ root (str or ``pathlib.Path``): Root directory where FallingThings is located.
761
+ variant (string): Which variant to use. Either "single", "mixed", or "both".
762
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
763
+ """
764
+
765
+ def __init__(self, root: Union[str, Path], variant: str = "single", transforms: Optional[Callable] = None) -> None:
766
+ super().__init__(root, transforms)
767
+
768
+ root = Path(root) / "FallingThings"
769
+
770
+ verify_str_arg(variant, "variant", valid_values=("single", "mixed", "both"))
771
+
772
+ variants = {
773
+ "single": ["single"],
774
+ "mixed": ["mixed"],
775
+ "both": ["single", "mixed"],
776
+ }[variant]
777
+
778
+ split_prefix = {
779
+ "single": Path("*") / "*",
780
+ "mixed": Path("*"),
781
+ }
782
+
783
+ for s in variants:
784
+ left_img_pattern = str(root / s / split_prefix[s] / "*.left.jpg")
785
+ right_img_pattern = str(root / s / split_prefix[s] / "*.right.jpg")
786
+ self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
787
+
788
+ left_disparity_pattern = str(root / s / split_prefix[s] / "*.left.depth.png")
789
+ right_disparity_pattern = str(root / s / split_prefix[s] / "*.right.depth.png")
790
+ self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
791
+
792
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
793
+ # (H, W) image
794
+ depth = np.asarray(Image.open(file_path))
795
+ # as per https://research.nvidia.com/sites/default/files/pubs/2018-06_Falling-Things/readme_0.txt
796
+ # in order to extract disparity from depth maps
797
+ camera_settings_path = Path(file_path).parent / "_camera_settings.json"
798
+ with open(camera_settings_path, "r") as f:
799
+ # inverse of depth-from-disparity equation: depth = (baseline * focal) / (disparity * pixel_constant)
800
+ intrinsics = json.load(f)
801
+ focal = intrinsics["camera_settings"][0]["intrinsic_settings"]["fx"]
802
+ baseline, pixel_constant = 6, 100 # pixel constant is inverted
803
+ disparity_map = (baseline * focal * pixel_constant) / depth.astype(np.float32)
804
+ # unsqueeze disparity to (C, H, W)
805
+ disparity_map = disparity_map[None, :, :]
806
+ valid_mask = None
807
+ return disparity_map, valid_mask
808
+
809
+ def __getitem__(self, index: int) -> T1:
810
+ """Return example at given index.
811
+
812
+ Args:
813
+ index(int): The index of the example to retrieve
814
+
815
+ Returns:
816
+ tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
817
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
818
+ If a ``valid_mask`` is generated within the ``transforms`` parameter,
819
+ a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
820
+ """
821
+ return cast(T1, super().__getitem__(index))
822
+
823
+
824
+ class SceneFlowStereo(StereoMatchingDataset):
825
+ """Dataset interface for `Scene Flow <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ datasets.
826
+ This interface provides access to the `FlyingThings3D, `Monkaa` and `Driving` datasets.
827
+
828
+ The dataset is expected to have the following structure: ::
829
+
830
+ root
831
+ SceneFlow
832
+ Monkaa
833
+ frames_cleanpass
834
+ scene1
835
+ left
836
+ img1.png
837
+ img2.png
838
+ right
839
+ img1.png
840
+ img2.png
841
+ scene2
842
+ left
843
+ img1.png
844
+ img2.png
845
+ right
846
+ img1.png
847
+ img2.png
848
+ frames_finalpass
849
+ scene1
850
+ left
851
+ img1.png
852
+ img2.png
853
+ right
854
+ img1.png
855
+ img2.png
856
+ ...
857
+ ...
858
+ disparity
859
+ scene1
860
+ left
861
+ img1.pfm
862
+ img2.pfm
863
+ right
864
+ img1.pfm
865
+ img2.pfm
866
+ FlyingThings3D
867
+ ...
868
+ ...
869
+
870
+ Args:
871
+ root (str or ``pathlib.Path``): Root directory where SceneFlow is located.
872
+ variant (string): Which dataset variant to user, "FlyingThings3D" (default), "Monkaa" or "Driving".
873
+ pass_name (string): Which pass to use, "clean" (default), "final" or "both".
874
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
875
+
876
+ """
877
+
878
+ def __init__(
879
+ self,
880
+ root: Union[str, Path],
881
+ variant: str = "FlyingThings3D",
882
+ pass_name: str = "clean",
883
+ transforms: Optional[Callable] = None,
884
+ ) -> None:
885
+ super().__init__(root, transforms)
886
+
887
+ root = Path(root) / "SceneFlow"
888
+
889
+ verify_str_arg(variant, "variant", valid_values=("FlyingThings3D", "Driving", "Monkaa"))
890
+ verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
891
+
892
+ passes = {
893
+ "clean": ["frames_cleanpass"],
894
+ "final": ["frames_finalpass"],
895
+ "both": ["frames_cleanpass", "frames_finalpass"],
896
+ }[pass_name]
897
+
898
+ root = root / variant
899
+
900
+ prefix_directories = {
901
+ "Monkaa": Path("*"),
902
+ "FlyingThings3D": Path("*") / "*" / "*",
903
+ "Driving": Path("*") / "*" / "*",
904
+ }
905
+
906
+ for p in passes:
907
+ left_image_pattern = str(root / p / prefix_directories[variant] / "left" / "*.png")
908
+ right_image_pattern = str(root / p / prefix_directories[variant] / "right" / "*.png")
909
+ self._images += self._scan_pairs(left_image_pattern, right_image_pattern)
910
+
911
+ left_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "left" / "*.pfm")
912
+ right_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "right" / "*.pfm")
913
+ self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
914
+
915
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
916
+ disparity_map = _read_pfm_file(file_path)
917
+ disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
918
+ valid_mask = None
919
+ return disparity_map, valid_mask
920
+
921
+ def __getitem__(self, index: int) -> T1:
922
+ """Return example at given index.
923
+
924
+ Args:
925
+ index(int): The index of the example to retrieve
926
+
927
+ Returns:
928
+ tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
929
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
930
+ If a ``valid_mask`` is generated within the ``transforms`` parameter,
931
+ a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
932
+ """
933
+ return cast(T1, super().__getitem__(index))
934
+
935
+
936
+ class SintelStereo(StereoMatchingDataset):
937
+ """Sintel `Stereo Dataset <http://sintel.is.tue.mpg.de/stereo>`_.
938
+
939
+ The dataset is expected to have the following structure: ::
940
+
941
+ root
942
+ Sintel
943
+ training
944
+ final_left
945
+ scene1
946
+ img1.png
947
+ img2.png
948
+ ...
949
+ ...
950
+ final_right
951
+ scene2
952
+ img1.png
953
+ img2.png
954
+ ...
955
+ ...
956
+ disparities
957
+ scene1
958
+ img1.png
959
+ img2.png
960
+ ...
961
+ ...
962
+ occlusions
963
+ scene1
964
+ img1.png
965
+ img2.png
966
+ ...
967
+ ...
968
+ outofframe
969
+ scene1
970
+ img1.png
971
+ img2.png
972
+ ...
973
+ ...
974
+
975
+ Args:
976
+ root (str or ``pathlib.Path``): Root directory where Sintel Stereo is located.
977
+ pass_name (string): The name of the pass to use, either "final", "clean" or "both".
978
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
979
+ """
980
+
981
+ _has_built_in_disparity_mask = True
982
+
983
+ def __init__(self, root: Union[str, Path], pass_name: str = "final", transforms: Optional[Callable] = None) -> None:
984
+ super().__init__(root, transforms)
985
+
986
+ verify_str_arg(pass_name, "pass_name", valid_values=("final", "clean", "both"))
987
+
988
+ root = Path(root) / "Sintel"
989
+ pass_names = {
990
+ "final": ["final"],
991
+ "clean": ["clean"],
992
+ "both": ["final", "clean"],
993
+ }[pass_name]
994
+
995
+ for p in pass_names:
996
+ left_img_pattern = str(root / "training" / f"{p}_left" / "*" / "*.png")
997
+ right_img_pattern = str(root / "training" / f"{p}_right" / "*" / "*.png")
998
+ self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
999
+
1000
+ disparity_pattern = str(root / "training" / "disparities" / "*" / "*.png")
1001
+ self._disparities += self._scan_pairs(disparity_pattern, None)
1002
+
1003
+ def _get_occlussion_mask_paths(self, file_path: str) -> Tuple[str, str]:
1004
+ # helper function to get the occlusion mask paths
1005
+ # a path will look like .../.../.../training/disparities/scene1/img1.png
1006
+ # we want to get something like .../.../.../training/occlusions/scene1/img1.png
1007
+ fpath = Path(file_path)
1008
+ basename = fpath.name
1009
+ scenedir = fpath.parent
1010
+ # the parent of the scenedir is actually the disparity dir
1011
+ sampledir = scenedir.parent.parent
1012
+
1013
+ occlusion_path = str(sampledir / "occlusions" / scenedir.name / basename)
1014
+ outofframe_path = str(sampledir / "outofframe" / scenedir.name / basename)
1015
+
1016
+ if not os.path.exists(occlusion_path):
1017
+ raise FileNotFoundError(f"Occlusion mask {occlusion_path} does not exist")
1018
+
1019
+ if not os.path.exists(outofframe_path):
1020
+ raise FileNotFoundError(f"Out of frame mask {outofframe_path} does not exist")
1021
+
1022
+ return occlusion_path, outofframe_path
1023
+
1024
+ def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
1025
+ if file_path is None:
1026
+ return None, None
1027
+
1028
+ # disparity decoding as per Sintel instructions in the README provided with the dataset
1029
+ disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
1030
+ r, g, b = np.split(disparity_map, 3, axis=-1)
1031
+ disparity_map = r * 4 + g / (2**6) + b / (2**14)
1032
+ # reshape into (C, H, W) format
1033
+ disparity_map = np.transpose(disparity_map, (2, 0, 1))
1034
+ # find the appropriate file paths
1035
+ occlued_mask_path, out_of_frame_mask_path = self._get_occlussion_mask_paths(file_path)
1036
+ # occlusion masks
1037
+ valid_mask = np.asarray(Image.open(occlued_mask_path)) == 0
1038
+ # out of frame masks
1039
+ off_mask = np.asarray(Image.open(out_of_frame_mask_path)) == 0
1040
+ # combine the masks together
1041
+ valid_mask = np.logical_and(off_mask, valid_mask)
1042
+ return disparity_map, valid_mask
1043
+
1044
+ def __getitem__(self, index: int) -> T2:
1045
+ """Return example at given index.
1046
+
1047
+ Args:
1048
+ index(int): The index of the example to retrieve
1049
+
1050
+ Returns:
1051
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
1052
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images whilst
1053
+ the valid_mask is a numpy array of shape (H, W).
1054
+ """
1055
+ return cast(T2, super().__getitem__(index))
1056
+
1057
+
1058
+ class InStereo2k(StereoMatchingDataset):
1059
+ """`InStereo2k <https://github.com/YuhuaXu/StereoDataset>`_ dataset.
1060
+
1061
+ The dataset is expected to have the following structure: ::
1062
+
1063
+ root
1064
+ InStereo2k
1065
+ train
1066
+ scene1
1067
+ left.png
1068
+ right.png
1069
+ left_disp.png
1070
+ right_disp.png
1071
+ ...
1072
+ scene2
1073
+ ...
1074
+ test
1075
+ scene1
1076
+ left.png
1077
+ right.png
1078
+ left_disp.png
1079
+ right_disp.png
1080
+ ...
1081
+ scene2
1082
+ ...
1083
+
1084
+ Args:
1085
+ root (str or ``pathlib.Path``): Root directory where InStereo2k is located.
1086
+ split (string): Either "train" or "test".
1087
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
1088
+ """
1089
+
1090
+ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Optional[Callable] = None) -> None:
1091
+ super().__init__(root, transforms)
1092
+
1093
+ root = Path(root) / "InStereo2k" / split
1094
+
1095
+ verify_str_arg(split, "split", valid_values=("train", "test"))
1096
+
1097
+ left_img_pattern = str(root / "*" / "left.png")
1098
+ right_img_pattern = str(root / "*" / "right.png")
1099
+ self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
1100
+
1101
+ left_disparity_pattern = str(root / "*" / "left_disp.png")
1102
+ right_disparity_pattern = str(root / "*" / "right_disp.png")
1103
+ self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
1104
+
1105
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
1106
+ disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
1107
+ # unsqueeze disparity to (C, H, W)
1108
+ disparity_map = disparity_map[None, :, :] / 1024.0
1109
+ valid_mask = None
1110
+ return disparity_map, valid_mask
1111
+
1112
+ def __getitem__(self, index: int) -> T1:
1113
+ """Return example at given index.
1114
+
1115
+ Args:
1116
+ index(int): The index of the example to retrieve
1117
+
1118
+ Returns:
1119
+ tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
1120
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
1121
+ If a ``valid_mask`` is generated within the ``transforms`` parameter,
1122
+ a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
1123
+ """
1124
+ return cast(T1, super().__getitem__(index))
1125
+
1126
+
1127
+ class ETH3DStereo(StereoMatchingDataset):
1128
+ """ETH3D `Low-Res Two-View <https://www.eth3d.net/datasets>`_ dataset.
1129
+
1130
+ The dataset is expected to have the following structure: ::
1131
+
1132
+ root
1133
+ ETH3D
1134
+ two_view_training
1135
+ scene1
1136
+ im1.png
1137
+ im0.png
1138
+ images.txt
1139
+ cameras.txt
1140
+ calib.txt
1141
+ scene2
1142
+ im1.png
1143
+ im0.png
1144
+ images.txt
1145
+ cameras.txt
1146
+ calib.txt
1147
+ ...
1148
+ two_view_training_gt
1149
+ scene1
1150
+ disp0GT.pfm
1151
+ mask0nocc.png
1152
+ scene2
1153
+ disp0GT.pfm
1154
+ mask0nocc.png
1155
+ ...
1156
+ two_view_testing
1157
+ scene1
1158
+ im1.png
1159
+ im0.png
1160
+ images.txt
1161
+ cameras.txt
1162
+ calib.txt
1163
+ scene2
1164
+ im1.png
1165
+ im0.png
1166
+ images.txt
1167
+ cameras.txt
1168
+ calib.txt
1169
+ ...
1170
+
1171
+ Args:
1172
+ root (str or ``pathlib.Path``): Root directory of the ETH3D Dataset.
1173
+ split (string, optional): The dataset split of scenes, either "train" (default) or "test".
1174
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
1175
+ """
1176
+
1177
+ _has_built_in_disparity_mask = True
1178
+
1179
+ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Optional[Callable] = None) -> None:
1180
+ super().__init__(root, transforms)
1181
+
1182
+ verify_str_arg(split, "split", valid_values=("train", "test"))
1183
+
1184
+ root = Path(root) / "ETH3D"
1185
+
1186
+ img_dir = "two_view_training" if split == "train" else "two_view_test"
1187
+ anot_dir = "two_view_training_gt"
1188
+
1189
+ left_img_pattern = str(root / img_dir / "*" / "im0.png")
1190
+ right_img_pattern = str(root / img_dir / "*" / "im1.png")
1191
+ self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
1192
+
1193
+ if split == "test":
1194
+ self._disparities = list((None, None) for _ in self._images)
1195
+ else:
1196
+ disparity_pattern = str(root / anot_dir / "*" / "disp0GT.pfm")
1197
+ self._disparities = self._scan_pairs(disparity_pattern, None)
1198
+
1199
+ def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
1200
+ # test split has no disparity maps
1201
+ if file_path is None:
1202
+ return None, None
1203
+
1204
+ disparity_map = _read_pfm_file(file_path)
1205
+ disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
1206
+ mask_path = Path(file_path).parent / "mask0nocc.png"
1207
+ valid_mask = Image.open(mask_path)
1208
+ valid_mask = np.asarray(valid_mask).astype(bool)
1209
+ return disparity_map, valid_mask
1210
+
1211
+ def __getitem__(self, index: int) -> T2:
1212
+ """Return example at given index.
1213
+
1214
+ Args:
1215
+ index(int): The index of the example to retrieve
1216
+
1217
+ Returns:
1218
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
1219
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
1220
+ ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
1221
+ generate a valid mask.
1222
+ Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
1223
+ """
1224
+ return cast(T2, super().__getitem__(index))
vllm/lib/python3.10/site-packages/torchvision/datasets/caltech.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ from pathlib import Path
4
+ from typing import Any, Callable, List, Optional, Tuple, Union
5
+
6
+ from PIL import Image
7
+
8
+ from .utils import download_and_extract_archive, verify_str_arg
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class Caltech101(VisionDataset):
13
+ """`Caltech 101 <https://data.caltech.edu/records/20086>`_ Dataset.
14
+
15
+ .. warning::
16
+
17
+ This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
18
+
19
+ Args:
20
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
21
+ ``caltech101`` exists or will be saved to if download is set to True.
22
+ target_type (string or list, optional): Type of target to use, ``category`` or
23
+ ``annotation``. Can also be a list to output a tuple with all specified
24
+ target types. ``category`` represents the target class, and
25
+ ``annotation`` is a list of points from a hand-generated outline.
26
+ Defaults to ``category``.
27
+ transform (callable, optional): A function/transform that takes in a PIL image
28
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
29
+ target_transform (callable, optional): A function/transform that takes in the
30
+ target and transforms it.
31
+ download (bool, optional): If true, downloads the dataset from the internet and
32
+ puts it in root directory. If dataset is already downloaded, it is not
33
+ downloaded again.
34
+
35
+ .. warning::
36
+
37
+ To download the dataset `gdown <https://github.com/wkentaro/gdown>`_ is required.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ root: Union[str, Path],
43
+ target_type: Union[List[str], str] = "category",
44
+ transform: Optional[Callable] = None,
45
+ target_transform: Optional[Callable] = None,
46
+ download: bool = False,
47
+ ) -> None:
48
+ super().__init__(os.path.join(root, "caltech101"), transform=transform, target_transform=target_transform)
49
+ os.makedirs(self.root, exist_ok=True)
50
+ if isinstance(target_type, str):
51
+ target_type = [target_type]
52
+ self.target_type = [verify_str_arg(t, "target_type", ("category", "annotation")) for t in target_type]
53
+
54
+ if download:
55
+ self.download()
56
+
57
+ if not self._check_integrity():
58
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
59
+
60
+ self.categories = sorted(os.listdir(os.path.join(self.root, "101_ObjectCategories")))
61
+ self.categories.remove("BACKGROUND_Google") # this is not a real class
62
+
63
+ # For some reason, the category names in "101_ObjectCategories" and
64
+ # "Annotations" do not always match. This is a manual map between the
65
+ # two. Defaults to using same name, since most names are fine.
66
+ name_map = {
67
+ "Faces": "Faces_2",
68
+ "Faces_easy": "Faces_3",
69
+ "Motorbikes": "Motorbikes_16",
70
+ "airplanes": "Airplanes_Side_2",
71
+ }
72
+ self.annotation_categories = list(map(lambda x: name_map[x] if x in name_map else x, self.categories))
73
+
74
+ self.index: List[int] = []
75
+ self.y = []
76
+ for (i, c) in enumerate(self.categories):
77
+ n = len(os.listdir(os.path.join(self.root, "101_ObjectCategories", c)))
78
+ self.index.extend(range(1, n + 1))
79
+ self.y.extend(n * [i])
80
+
81
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
82
+ """
83
+ Args:
84
+ index (int): Index
85
+
86
+ Returns:
87
+ tuple: (image, target) where the type of target specified by target_type.
88
+ """
89
+ import scipy.io
90
+
91
+ img = Image.open(
92
+ os.path.join(
93
+ self.root,
94
+ "101_ObjectCategories",
95
+ self.categories[self.y[index]],
96
+ f"image_{self.index[index]:04d}.jpg",
97
+ )
98
+ )
99
+
100
+ target: Any = []
101
+ for t in self.target_type:
102
+ if t == "category":
103
+ target.append(self.y[index])
104
+ elif t == "annotation":
105
+ data = scipy.io.loadmat(
106
+ os.path.join(
107
+ self.root,
108
+ "Annotations",
109
+ self.annotation_categories[self.y[index]],
110
+ f"annotation_{self.index[index]:04d}.mat",
111
+ )
112
+ )
113
+ target.append(data["obj_contour"])
114
+ target = tuple(target) if len(target) > 1 else target[0]
115
+
116
+ if self.transform is not None:
117
+ img = self.transform(img)
118
+
119
+ if self.target_transform is not None:
120
+ target = self.target_transform(target)
121
+
122
+ return img, target
123
+
124
+ def _check_integrity(self) -> bool:
125
+ # can be more robust and check hash of files
126
+ return os.path.exists(os.path.join(self.root, "101_ObjectCategories"))
127
+
128
+ def __len__(self) -> int:
129
+ return len(self.index)
130
+
131
+ def download(self) -> None:
132
+ if self._check_integrity():
133
+ print("Files already downloaded and verified")
134
+ return
135
+
136
+ download_and_extract_archive(
137
+ "https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp",
138
+ self.root,
139
+ filename="101_ObjectCategories.tar.gz",
140
+ md5="b224c7392d521a49829488ab0f1120d9",
141
+ )
142
+ download_and_extract_archive(
143
+ "https://drive.google.com/file/d/175kQy3UsZ0wUEHZjqkUDdNVssr7bgh_m",
144
+ self.root,
145
+ filename="Annotations.tar",
146
+ md5="6f83eeb1f24d99cab4eb377263132c91",
147
+ )
148
+
149
+ def extra_repr(self) -> str:
150
+ return "Target type: {target_type}".format(**self.__dict__)
151
+
152
+
153
+ class Caltech256(VisionDataset):
154
+ """`Caltech 256 <https://data.caltech.edu/records/20087>`_ Dataset.
155
+
156
+ Args:
157
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
158
+ ``caltech256`` exists or will be saved to if download is set to True.
159
+ transform (callable, optional): A function/transform that takes in a PIL image
160
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
161
+ target_transform (callable, optional): A function/transform that takes in the
162
+ target and transforms it.
163
+ download (bool, optional): If true, downloads the dataset from the internet and
164
+ puts it in root directory. If dataset is already downloaded, it is not
165
+ downloaded again.
166
+ """
167
+
168
+ def __init__(
169
+ self,
170
+ root: str,
171
+ transform: Optional[Callable] = None,
172
+ target_transform: Optional[Callable] = None,
173
+ download: bool = False,
174
+ ) -> None:
175
+ super().__init__(os.path.join(root, "caltech256"), transform=transform, target_transform=target_transform)
176
+ os.makedirs(self.root, exist_ok=True)
177
+
178
+ if download:
179
+ self.download()
180
+
181
+ if not self._check_integrity():
182
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
183
+
184
+ self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories")))
185
+ self.index: List[int] = []
186
+ self.y = []
187
+ for (i, c) in enumerate(self.categories):
188
+ n = len(
189
+ [
190
+ item
191
+ for item in os.listdir(os.path.join(self.root, "256_ObjectCategories", c))
192
+ if item.endswith(".jpg")
193
+ ]
194
+ )
195
+ self.index.extend(range(1, n + 1))
196
+ self.y.extend(n * [i])
197
+
198
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
199
+ """
200
+ Args:
201
+ index (int): Index
202
+
203
+ Returns:
204
+ tuple: (image, target) where target is index of the target class.
205
+ """
206
+ img = Image.open(
207
+ os.path.join(
208
+ self.root,
209
+ "256_ObjectCategories",
210
+ self.categories[self.y[index]],
211
+ f"{self.y[index] + 1:03d}_{self.index[index]:04d}.jpg",
212
+ )
213
+ )
214
+
215
+ target = self.y[index]
216
+
217
+ if self.transform is not None:
218
+ img = self.transform(img)
219
+
220
+ if self.target_transform is not None:
221
+ target = self.target_transform(target)
222
+
223
+ return img, target
224
+
225
+ def _check_integrity(self) -> bool:
226
+ # can be more robust and check hash of files
227
+ return os.path.exists(os.path.join(self.root, "256_ObjectCategories"))
228
+
229
+ def __len__(self) -> int:
230
+ return len(self.index)
231
+
232
+ def download(self) -> None:
233
+ if self._check_integrity():
234
+ print("Files already downloaded and verified")
235
+ return
236
+
237
+ download_and_extract_archive(
238
+ "https://drive.google.com/file/d/1r6o0pSROcV1_VwT4oSjA2FBUSCWGuxLK",
239
+ self.root,
240
+ filename="256_ObjectCategories.tar",
241
+ md5="67b4f42ca05d46448c6bb8ecd2220f6d",
242
+ )
vllm/lib/python3.10/site-packages/torchvision/datasets/celeba.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from collections import namedtuple
4
+ from pathlib import Path
5
+ from typing import Any, Callable, List, Optional, Tuple, Union
6
+
7
+ import PIL
8
+ import torch
9
+
10
+ from .utils import check_integrity, download_file_from_google_drive, extract_archive, verify_str_arg
11
+ from .vision import VisionDataset
12
+
13
+ CSV = namedtuple("CSV", ["header", "index", "data"])
14
+
15
+
16
+ class CelebA(VisionDataset):
17
+ """`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
18
+
19
+ Args:
20
+ root (str or ``pathlib.Path``): Root directory where images are downloaded to.
21
+ split (string): One of {'train', 'valid', 'test', 'all'}.
22
+ Accordingly dataset is selected.
23
+ target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,
24
+ or ``landmarks``. Can also be a list to output a tuple with all specified target types.
25
+ The targets represent:
26
+
27
+ - ``attr`` (Tensor shape=(40,) dtype=int): binary (0, 1) labels for attributes
28
+ - ``identity`` (int): label for each person (data points with the same identity are the same person)
29
+ - ``bbox`` (Tensor shape=(4,) dtype=int): bounding box (x, y, width, height)
30
+ - ``landmarks`` (Tensor shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
31
+ righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
32
+
33
+ Defaults to ``attr``. If empty, ``None`` will be returned as target.
34
+
35
+ transform (callable, optional): A function/transform that takes in a PIL image
36
+ and returns a transformed version. E.g, ``transforms.PILToTensor``
37
+ target_transform (callable, optional): A function/transform that takes in the
38
+ target and transforms it.
39
+ download (bool, optional): If true, downloads the dataset from the internet and
40
+ puts it in root directory. If dataset is already downloaded, it is not
41
+ downloaded again.
42
+
43
+ .. warning::
44
+
45
+ To download the dataset `gdown <https://github.com/wkentaro/gdown>`_ is required.
46
+ """
47
+
48
+ base_folder = "celeba"
49
+ # There currently does not appear to be an easy way to extract 7z in python (without introducing additional
50
+ # dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available
51
+ # right now.
52
+ file_list = [
53
+ # File ID MD5 Hash Filename
54
+ ("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
55
+ # ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc","b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"),
56
+ # ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"),
57
+ ("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
58
+ ("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
59
+ ("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
60
+ ("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
61
+ # ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"),
62
+ ("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
63
+ ]
64
+
65
+ def __init__(
66
+ self,
67
+ root: Union[str, Path],
68
+ split: str = "train",
69
+ target_type: Union[List[str], str] = "attr",
70
+ transform: Optional[Callable] = None,
71
+ target_transform: Optional[Callable] = None,
72
+ download: bool = False,
73
+ ) -> None:
74
+ super().__init__(root, transform=transform, target_transform=target_transform)
75
+ self.split = split
76
+ if isinstance(target_type, list):
77
+ self.target_type = target_type
78
+ else:
79
+ self.target_type = [target_type]
80
+
81
+ if not self.target_type and self.target_transform is not None:
82
+ raise RuntimeError("target_transform is specified but target_type is empty")
83
+
84
+ if download:
85
+ self.download()
86
+
87
+ if not self._check_integrity():
88
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
89
+
90
+ split_map = {
91
+ "train": 0,
92
+ "valid": 1,
93
+ "test": 2,
94
+ "all": None,
95
+ }
96
+ split_ = split_map[verify_str_arg(split.lower(), "split", ("train", "valid", "test", "all"))]
97
+ splits = self._load_csv("list_eval_partition.txt")
98
+ identity = self._load_csv("identity_CelebA.txt")
99
+ bbox = self._load_csv("list_bbox_celeba.txt", header=1)
100
+ landmarks_align = self._load_csv("list_landmarks_align_celeba.txt", header=1)
101
+ attr = self._load_csv("list_attr_celeba.txt", header=1)
102
+
103
+ mask = slice(None) if split_ is None else (splits.data == split_).squeeze()
104
+
105
+ if mask == slice(None): # if split == "all"
106
+ self.filename = splits.index
107
+ else:
108
+ self.filename = [splits.index[i] for i in torch.squeeze(torch.nonzero(mask))]
109
+ self.identity = identity.data[mask]
110
+ self.bbox = bbox.data[mask]
111
+ self.landmarks_align = landmarks_align.data[mask]
112
+ self.attr = attr.data[mask]
113
+ # map from {-1, 1} to {0, 1}
114
+ self.attr = torch.div(self.attr + 1, 2, rounding_mode="floor")
115
+ self.attr_names = attr.header
116
+
117
+ def _load_csv(
118
+ self,
119
+ filename: str,
120
+ header: Optional[int] = None,
121
+ ) -> CSV:
122
+ with open(os.path.join(self.root, self.base_folder, filename)) as csv_file:
123
+ data = list(csv.reader(csv_file, delimiter=" ", skipinitialspace=True))
124
+
125
+ if header is not None:
126
+ headers = data[header]
127
+ data = data[header + 1 :]
128
+ else:
129
+ headers = []
130
+
131
+ indices = [row[0] for row in data]
132
+ data = [row[1:] for row in data]
133
+ data_int = [list(map(int, i)) for i in data]
134
+
135
+ return CSV(headers, indices, torch.tensor(data_int))
136
+
137
+ def _check_integrity(self) -> bool:
138
+ for (_, md5, filename) in self.file_list:
139
+ fpath = os.path.join(self.root, self.base_folder, filename)
140
+ _, ext = os.path.splitext(filename)
141
+ # Allow original archive to be deleted (zip and 7z)
142
+ # Only need the extracted images
143
+ if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
144
+ return False
145
+
146
+ # Should check a hash of the images
147
+ return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
148
+
149
+ def download(self) -> None:
150
+ if self._check_integrity():
151
+ print("Files already downloaded and verified")
152
+ return
153
+
154
+ for (file_id, md5, filename) in self.file_list:
155
+ download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
156
+
157
+ extract_archive(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"))
158
+
159
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
160
+ X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
161
+
162
+ target: Any = []
163
+ for t in self.target_type:
164
+ if t == "attr":
165
+ target.append(self.attr[index, :])
166
+ elif t == "identity":
167
+ target.append(self.identity[index, 0])
168
+ elif t == "bbox":
169
+ target.append(self.bbox[index, :])
170
+ elif t == "landmarks":
171
+ target.append(self.landmarks_align[index, :])
172
+ else:
173
+ # TODO: refactor with utils.verify_str_arg
174
+ raise ValueError(f'Target type "{t}" is not recognized.')
175
+
176
+ if self.transform is not None:
177
+ X = self.transform(X)
178
+
179
+ if target:
180
+ target = tuple(target) if len(target) > 1 else target[0]
181
+
182
+ if self.target_transform is not None:
183
+ target = self.target_transform(target)
184
+ else:
185
+ target = None
186
+
187
+ return X, target
188
+
189
+ def __len__(self) -> int:
190
+ return len(self.attr)
191
+
192
+ def extra_repr(self) -> str:
193
+ lines = ["Target type: {target_type}", "Split: {split}"]
194
+ return "\n".join(lines).format(**self.__dict__)
vllm/lib/python3.10/site-packages/torchvision/datasets/cifar.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ import pickle
3
+ from pathlib import Path
4
+ from typing import Any, Callable, Optional, Tuple, Union
5
+
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ from .utils import check_integrity, download_and_extract_archive
10
+ from .vision import VisionDataset
11
+
12
+
13
+ class CIFAR10(VisionDataset):
14
+ """`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
15
+
16
+ Args:
17
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
18
+ ``cifar-10-batches-py`` exists or will be saved to if download is set to True.
19
+ train (bool, optional): If True, creates dataset from training set, otherwise
20
+ creates from test set.
21
+ transform (callable, optional): A function/transform that takes in a PIL image
22
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
23
+ target_transform (callable, optional): A function/transform that takes in the
24
+ target and transforms it.
25
+ download (bool, optional): If true, downloads the dataset from the internet and
26
+ puts it in root directory. If dataset is already downloaded, it is not
27
+ downloaded again.
28
+
29
+ """
30
+
31
+ base_folder = "cifar-10-batches-py"
32
+ url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
33
+ filename = "cifar-10-python.tar.gz"
34
+ tgz_md5 = "c58f30108f718f92721af3b95e74349a"
35
+ train_list = [
36
+ ["data_batch_1", "c99cafc152244af753f735de768cd75f"],
37
+ ["data_batch_2", "d4bba439e000b95fd0a9bffe97cbabec"],
38
+ ["data_batch_3", "54ebc095f3ab1f0389bbae665268c751"],
39
+ ["data_batch_4", "634d18415352ddfa80567beed471001a"],
40
+ ["data_batch_5", "482c414d41f54cd18b22e5b47cb7c3cb"],
41
+ ]
42
+
43
+ test_list = [
44
+ ["test_batch", "40351d587109b95175f43aff81a1287e"],
45
+ ]
46
+ meta = {
47
+ "filename": "batches.meta",
48
+ "key": "label_names",
49
+ "md5": "5ff9c542aee3614f3951f8cda6e48888",
50
+ }
51
+
52
+ def __init__(
53
+ self,
54
+ root: Union[str, Path],
55
+ train: bool = True,
56
+ transform: Optional[Callable] = None,
57
+ target_transform: Optional[Callable] = None,
58
+ download: bool = False,
59
+ ) -> None:
60
+
61
+ super().__init__(root, transform=transform, target_transform=target_transform)
62
+
63
+ self.train = train # training set or test set
64
+
65
+ if download:
66
+ self.download()
67
+
68
+ if not self._check_integrity():
69
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
70
+
71
+ if self.train:
72
+ downloaded_list = self.train_list
73
+ else:
74
+ downloaded_list = self.test_list
75
+
76
+ self.data: Any = []
77
+ self.targets = []
78
+
79
+ # now load the picked numpy arrays
80
+ for file_name, checksum in downloaded_list:
81
+ file_path = os.path.join(self.root, self.base_folder, file_name)
82
+ with open(file_path, "rb") as f:
83
+ entry = pickle.load(f, encoding="latin1")
84
+ self.data.append(entry["data"])
85
+ if "labels" in entry:
86
+ self.targets.extend(entry["labels"])
87
+ else:
88
+ self.targets.extend(entry["fine_labels"])
89
+
90
+ self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
91
+ self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
92
+
93
+ self._load_meta()
94
+
95
+ def _load_meta(self) -> None:
96
+ path = os.path.join(self.root, self.base_folder, self.meta["filename"])
97
+ if not check_integrity(path, self.meta["md5"]):
98
+ raise RuntimeError("Dataset metadata file not found or corrupted. You can use download=True to download it")
99
+ with open(path, "rb") as infile:
100
+ data = pickle.load(infile, encoding="latin1")
101
+ self.classes = data[self.meta["key"]]
102
+ self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
103
+
104
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
105
+ """
106
+ Args:
107
+ index (int): Index
108
+
109
+ Returns:
110
+ tuple: (image, target) where target is index of the target class.
111
+ """
112
+ img, target = self.data[index], self.targets[index]
113
+
114
+ # doing this so that it is consistent with all other datasets
115
+ # to return a PIL Image
116
+ img = Image.fromarray(img)
117
+
118
+ if self.transform is not None:
119
+ img = self.transform(img)
120
+
121
+ if self.target_transform is not None:
122
+ target = self.target_transform(target)
123
+
124
+ return img, target
125
+
126
+ def __len__(self) -> int:
127
+ return len(self.data)
128
+
129
+ def _check_integrity(self) -> bool:
130
+ for filename, md5 in self.train_list + self.test_list:
131
+ fpath = os.path.join(self.root, self.base_folder, filename)
132
+ if not check_integrity(fpath, md5):
133
+ return False
134
+ return True
135
+
136
+ def download(self) -> None:
137
+ if self._check_integrity():
138
+ print("Files already downloaded and verified")
139
+ return
140
+ download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
141
+
142
+ def extra_repr(self) -> str:
143
+ split = "Train" if self.train is True else "Test"
144
+ return f"Split: {split}"
145
+
146
+
147
+ class CIFAR100(CIFAR10):
148
+ """`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
149
+
150
+ This is a subclass of the `CIFAR10` Dataset.
151
+ """
152
+
153
+ base_folder = "cifar-100-python"
154
+ url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
155
+ filename = "cifar-100-python.tar.gz"
156
+ tgz_md5 = "eb9058c3a382ffc7106e4002c42a8d85"
157
+ train_list = [
158
+ ["train", "16019d7e3df5f24257cddd939b257f8d"],
159
+ ]
160
+
161
+ test_list = [
162
+ ["test", "f0ef6b0ae62326f3e7ffdfab6717acfc"],
163
+ ]
164
+ meta = {
165
+ "filename": "meta",
166
+ "key": "fine_label_names",
167
+ "md5": "7973b15100ade9c7d40fb424638fde48",
168
+ }
vllm/lib/python3.10/site-packages/torchvision/datasets/cityscapes.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from collections import namedtuple
4
+ from pathlib import Path
5
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6
+
7
+ from PIL import Image
8
+
9
+ from .utils import extract_archive, iterable_to_str, verify_str_arg
10
+ from .vision import VisionDataset
11
+
12
+
13
+ class Cityscapes(VisionDataset):
14
+ """`Cityscapes <http://www.cityscapes-dataset.com/>`_ Dataset.
15
+
16
+ Args:
17
+ root (str or ``pathlib.Path``): Root directory of dataset where directory ``leftImg8bit``
18
+ and ``gtFine`` or ``gtCoarse`` are located.
19
+ split (string, optional): The image split to use, ``train``, ``test`` or ``val`` if mode="fine"
20
+ otherwise ``train``, ``train_extra`` or ``val``
21
+ mode (string, optional): The quality mode to use, ``fine`` or ``coarse``
22
+ target_type (string or list, optional): Type of target to use, ``instance``, ``semantic``, ``polygon``
23
+ or ``color``. Can also be a list to output a tuple with all specified target types.
24
+ transform (callable, optional): A function/transform that takes in a PIL image
25
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
26
+ target_transform (callable, optional): A function/transform that takes in the
27
+ target and transforms it.
28
+ transforms (callable, optional): A function/transform that takes input sample and its target as entry
29
+ and returns a transformed version.
30
+
31
+ Examples:
32
+
33
+ Get semantic segmentation target
34
+
35
+ .. code-block:: python
36
+
37
+ dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
38
+ target_type='semantic')
39
+
40
+ img, smnt = dataset[0]
41
+
42
+ Get multiple targets
43
+
44
+ .. code-block:: python
45
+
46
+ dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
47
+ target_type=['instance', 'color', 'polygon'])
48
+
49
+ img, (inst, col, poly) = dataset[0]
50
+
51
+ Validate on the "coarse" set
52
+
53
+ .. code-block:: python
54
+
55
+ dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse',
56
+ target_type='semantic')
57
+
58
+ img, smnt = dataset[0]
59
+ """
60
+
61
+ # Based on https://github.com/mcordts/cityscapesScripts
62
+ CityscapesClass = namedtuple(
63
+ "CityscapesClass",
64
+ ["name", "id", "train_id", "category", "category_id", "has_instances", "ignore_in_eval", "color"],
65
+ )
66
+
67
+ classes = [
68
+ CityscapesClass("unlabeled", 0, 255, "void", 0, False, True, (0, 0, 0)),
69
+ CityscapesClass("ego vehicle", 1, 255, "void", 0, False, True, (0, 0, 0)),
70
+ CityscapesClass("rectification border", 2, 255, "void", 0, False, True, (0, 0, 0)),
71
+ CityscapesClass("out of roi", 3, 255, "void", 0, False, True, (0, 0, 0)),
72
+ CityscapesClass("static", 4, 255, "void", 0, False, True, (0, 0, 0)),
73
+ CityscapesClass("dynamic", 5, 255, "void", 0, False, True, (111, 74, 0)),
74
+ CityscapesClass("ground", 6, 255, "void", 0, False, True, (81, 0, 81)),
75
+ CityscapesClass("road", 7, 0, "flat", 1, False, False, (128, 64, 128)),
76
+ CityscapesClass("sidewalk", 8, 1, "flat", 1, False, False, (244, 35, 232)),
77
+ CityscapesClass("parking", 9, 255, "flat", 1, False, True, (250, 170, 160)),
78
+ CityscapesClass("rail track", 10, 255, "flat", 1, False, True, (230, 150, 140)),
79
+ CityscapesClass("building", 11, 2, "construction", 2, False, False, (70, 70, 70)),
80
+ CityscapesClass("wall", 12, 3, "construction", 2, False, False, (102, 102, 156)),
81
+ CityscapesClass("fence", 13, 4, "construction", 2, False, False, (190, 153, 153)),
82
+ CityscapesClass("guard rail", 14, 255, "construction", 2, False, True, (180, 165, 180)),
83
+ CityscapesClass("bridge", 15, 255, "construction", 2, False, True, (150, 100, 100)),
84
+ CityscapesClass("tunnel", 16, 255, "construction", 2, False, True, (150, 120, 90)),
85
+ CityscapesClass("pole", 17, 5, "object", 3, False, False, (153, 153, 153)),
86
+ CityscapesClass("polegroup", 18, 255, "object", 3, False, True, (153, 153, 153)),
87
+ CityscapesClass("traffic light", 19, 6, "object", 3, False, False, (250, 170, 30)),
88
+ CityscapesClass("traffic sign", 20, 7, "object", 3, False, False, (220, 220, 0)),
89
+ CityscapesClass("vegetation", 21, 8, "nature", 4, False, False, (107, 142, 35)),
90
+ CityscapesClass("terrain", 22, 9, "nature", 4, False, False, (152, 251, 152)),
91
+ CityscapesClass("sky", 23, 10, "sky", 5, False, False, (70, 130, 180)),
92
+ CityscapesClass("person", 24, 11, "human", 6, True, False, (220, 20, 60)),
93
+ CityscapesClass("rider", 25, 12, "human", 6, True, False, (255, 0, 0)),
94
+ CityscapesClass("car", 26, 13, "vehicle", 7, True, False, (0, 0, 142)),
95
+ CityscapesClass("truck", 27, 14, "vehicle", 7, True, False, (0, 0, 70)),
96
+ CityscapesClass("bus", 28, 15, "vehicle", 7, True, False, (0, 60, 100)),
97
+ CityscapesClass("caravan", 29, 255, "vehicle", 7, True, True, (0, 0, 90)),
98
+ CityscapesClass("trailer", 30, 255, "vehicle", 7, True, True, (0, 0, 110)),
99
+ CityscapesClass("train", 31, 16, "vehicle", 7, True, False, (0, 80, 100)),
100
+ CityscapesClass("motorcycle", 32, 17, "vehicle", 7, True, False, (0, 0, 230)),
101
+ CityscapesClass("bicycle", 33, 18, "vehicle", 7, True, False, (119, 11, 32)),
102
+ CityscapesClass("license plate", -1, -1, "vehicle", 7, False, True, (0, 0, 142)),
103
+ ]
104
+
105
+ def __init__(
106
+ self,
107
+ root: Union[str, Path],
108
+ split: str = "train",
109
+ mode: str = "fine",
110
+ target_type: Union[List[str], str] = "instance",
111
+ transform: Optional[Callable] = None,
112
+ target_transform: Optional[Callable] = None,
113
+ transforms: Optional[Callable] = None,
114
+ ) -> None:
115
+ super().__init__(root, transforms, transform, target_transform)
116
+ self.mode = "gtFine" if mode == "fine" else "gtCoarse"
117
+ self.images_dir = os.path.join(self.root, "leftImg8bit", split)
118
+ self.targets_dir = os.path.join(self.root, self.mode, split)
119
+ self.target_type = target_type
120
+ self.split = split
121
+ self.images = []
122
+ self.targets = []
123
+
124
+ verify_str_arg(mode, "mode", ("fine", "coarse"))
125
+ if mode == "fine":
126
+ valid_modes = ("train", "test", "val")
127
+ else:
128
+ valid_modes = ("train", "train_extra", "val")
129
+ msg = "Unknown value '{}' for argument split if mode is '{}'. Valid values are {{{}}}."
130
+ msg = msg.format(split, mode, iterable_to_str(valid_modes))
131
+ verify_str_arg(split, "split", valid_modes, msg)
132
+
133
+ if not isinstance(target_type, list):
134
+ self.target_type = [target_type]
135
+ [
136
+ verify_str_arg(value, "target_type", ("instance", "semantic", "polygon", "color"))
137
+ for value in self.target_type
138
+ ]
139
+
140
+ if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
141
+
142
+ if split == "train_extra":
143
+ image_dir_zip = os.path.join(self.root, "leftImg8bit_trainextra.zip")
144
+ else:
145
+ image_dir_zip = os.path.join(self.root, "leftImg8bit_trainvaltest.zip")
146
+
147
+ if self.mode == "gtFine":
148
+ target_dir_zip = os.path.join(self.root, f"{self.mode}_trainvaltest.zip")
149
+ elif self.mode == "gtCoarse":
150
+ target_dir_zip = os.path.join(self.root, f"{self.mode}.zip")
151
+
152
+ if os.path.isfile(image_dir_zip) and os.path.isfile(target_dir_zip):
153
+ extract_archive(from_path=image_dir_zip, to_path=self.root)
154
+ extract_archive(from_path=target_dir_zip, to_path=self.root)
155
+ else:
156
+ raise RuntimeError(
157
+ "Dataset not found or incomplete. Please make sure all required folders for the"
158
+ ' specified "split" and "mode" are inside the "root" directory'
159
+ )
160
+
161
+ for city in os.listdir(self.images_dir):
162
+ img_dir = os.path.join(self.images_dir, city)
163
+ target_dir = os.path.join(self.targets_dir, city)
164
+ for file_name in os.listdir(img_dir):
165
+ target_types = []
166
+ for t in self.target_type:
167
+ target_name = "{}_{}".format(
168
+ file_name.split("_leftImg8bit")[0], self._get_target_suffix(self.mode, t)
169
+ )
170
+ target_types.append(os.path.join(target_dir, target_name))
171
+
172
+ self.images.append(os.path.join(img_dir, file_name))
173
+ self.targets.append(target_types)
174
+
175
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
176
+ """
177
+ Args:
178
+ index (int): Index
179
+ Returns:
180
+ tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
181
+ than one item. Otherwise, target is a json object if target_type="polygon", else the image segmentation.
182
+ """
183
+
184
+ image = Image.open(self.images[index]).convert("RGB")
185
+
186
+ targets: Any = []
187
+ for i, t in enumerate(self.target_type):
188
+ if t == "polygon":
189
+ target = self._load_json(self.targets[index][i])
190
+ else:
191
+ target = Image.open(self.targets[index][i]) # type: ignore[assignment]
192
+
193
+ targets.append(target)
194
+
195
+ target = tuple(targets) if len(targets) > 1 else targets[0]
196
+
197
+ if self.transforms is not None:
198
+ image, target = self.transforms(image, target)
199
+
200
+ return image, target
201
+
202
+ def __len__(self) -> int:
203
+ return len(self.images)
204
+
205
+ def extra_repr(self) -> str:
206
+ lines = ["Split: {split}", "Mode: {mode}", "Type: {target_type}"]
207
+ return "\n".join(lines).format(**self.__dict__)
208
+
209
+ def _load_json(self, path: str) -> Dict[str, Any]:
210
+ with open(path) as file:
211
+ data = json.load(file)
212
+ return data
213
+
214
+ def _get_target_suffix(self, mode: str, target_type: str) -> str:
215
+ if target_type == "instance":
216
+ return f"{mode}_instanceIds.png"
217
+ elif target_type == "semantic":
218
+ return f"{mode}_labelIds.png"
219
+ elif target_type == "color":
220
+ return f"{mode}_color.png"
221
+ else:
222
+ return f"{mode}_polygons.json"
vllm/lib/python3.10/site-packages/torchvision/datasets/clevr.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pathlib
3
+ from typing import Any, Callable, List, Optional, Tuple, Union
4
+ from urllib.parse import urlparse
5
+
6
+ from PIL import Image
7
+
8
+ from .utils import download_and_extract_archive, verify_str_arg
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class CLEVRClassification(VisionDataset):
13
+ """`CLEVR <https://cs.stanford.edu/people/jcjohns/clevr/>`_ classification dataset.
14
+
15
+ The number of objects in a scene are used as label.
16
+
17
+ Args:
18
+ root (str or ``pathlib.Path``): Root directory of dataset where directory ``root/clevr`` exists or will be saved to if download is
19
+ set to True.
20
+ split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
21
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
22
+ version. E.g, ``transforms.RandomCrop``
23
+ target_transform (callable, optional): A function/transform that takes in them target and transforms it.
24
+ download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If
25
+ dataset is already downloaded, it is not downloaded again.
26
+ """
27
+
28
+ _URL = "https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip"
29
+ _MD5 = "b11922020e72d0cd9154779b2d3d07d2"
30
+
31
+ def __init__(
32
+ self,
33
+ root: Union[str, pathlib.Path],
34
+ split: str = "train",
35
+ transform: Optional[Callable] = None,
36
+ target_transform: Optional[Callable] = None,
37
+ download: bool = False,
38
+ ) -> None:
39
+ self._split = verify_str_arg(split, "split", ("train", "val", "test"))
40
+ super().__init__(root, transform=transform, target_transform=target_transform)
41
+ self._base_folder = pathlib.Path(self.root) / "clevr"
42
+ self._data_folder = self._base_folder / pathlib.Path(urlparse(self._URL).path).stem
43
+
44
+ if download:
45
+ self._download()
46
+
47
+ if not self._check_exists():
48
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
49
+
50
+ self._image_files = sorted(self._data_folder.joinpath("images", self._split).glob("*"))
51
+
52
+ self._labels: List[Optional[int]]
53
+ if self._split != "test":
54
+ with open(self._data_folder / "scenes" / f"CLEVR_{self._split}_scenes.json") as file:
55
+ content = json.load(file)
56
+ num_objects = {scene["image_filename"]: len(scene["objects"]) for scene in content["scenes"]}
57
+ self._labels = [num_objects[image_file.name] for image_file in self._image_files]
58
+ else:
59
+ self._labels = [None] * len(self._image_files)
60
+
61
+ def __len__(self) -> int:
62
+ return len(self._image_files)
63
+
64
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
65
+ image_file = self._image_files[idx]
66
+ label = self._labels[idx]
67
+
68
+ image = Image.open(image_file).convert("RGB")
69
+
70
+ if self.transform:
71
+ image = self.transform(image)
72
+
73
+ if self.target_transform:
74
+ label = self.target_transform(label)
75
+
76
+ return image, label
77
+
78
+ def _check_exists(self) -> bool:
79
+ return self._data_folder.exists() and self._data_folder.is_dir()
80
+
81
+ def _download(self) -> None:
82
+ if self._check_exists():
83
+ return
84
+
85
+ download_and_extract_archive(self._URL, str(self._base_folder), md5=self._MD5)
86
+
87
+ def extra_repr(self) -> str:
88
+ return f"split={self._split}"
vllm/lib/python3.10/site-packages/torchvision/datasets/coco.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ from pathlib import Path
3
+ from typing import Any, Callable, List, Optional, Tuple, Union
4
+
5
+ from PIL import Image
6
+
7
+ from .vision import VisionDataset
8
+
9
+
10
+ class CocoDetection(VisionDataset):
11
+ """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
12
+
13
+ It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
14
+
15
+ Args:
16
+ root (str or ``pathlib.Path``): Root directory where images are downloaded to.
17
+ annFile (string): Path to json annotation file.
18
+ transform (callable, optional): A function/transform that takes in a PIL image
19
+ and returns a transformed version. E.g, ``transforms.PILToTensor``
20
+ target_transform (callable, optional): A function/transform that takes in the
21
+ target and transforms it.
22
+ transforms (callable, optional): A function/transform that takes input sample and its target as entry
23
+ and returns a transformed version.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ root: Union[str, Path],
29
+ annFile: str,
30
+ transform: Optional[Callable] = None,
31
+ target_transform: Optional[Callable] = None,
32
+ transforms: Optional[Callable] = None,
33
+ ) -> None:
34
+ super().__init__(root, transforms, transform, target_transform)
35
+ from pycocotools.coco import COCO
36
+
37
+ self.coco = COCO(annFile)
38
+ self.ids = list(sorted(self.coco.imgs.keys()))
39
+
40
+ def _load_image(self, id: int) -> Image.Image:
41
+ path = self.coco.loadImgs(id)[0]["file_name"]
42
+ return Image.open(os.path.join(self.root, path)).convert("RGB")
43
+
44
+ def _load_target(self, id: int) -> List[Any]:
45
+ return self.coco.loadAnns(self.coco.getAnnIds(id))
46
+
47
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
48
+
49
+ if not isinstance(index, int):
50
+ raise ValueError(f"Index must be of type integer, got {type(index)} instead.")
51
+
52
+ id = self.ids[index]
53
+ image = self._load_image(id)
54
+ target = self._load_target(id)
55
+
56
+ if self.transforms is not None:
57
+ image, target = self.transforms(image, target)
58
+
59
+ return image, target
60
+
61
+ def __len__(self) -> int:
62
+ return len(self.ids)
63
+
64
+
65
+ class CocoCaptions(CocoDetection):
66
+ """`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
67
+
68
+ It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
69
+
70
+ Args:
71
+ root (str or ``pathlib.Path``): Root directory where images are downloaded to.
72
+ annFile (string): Path to json annotation file.
73
+ transform (callable, optional): A function/transform that takes in a PIL image
74
+ and returns a transformed version. E.g, ``transforms.PILToTensor``
75
+ target_transform (callable, optional): A function/transform that takes in the
76
+ target and transforms it.
77
+ transforms (callable, optional): A function/transform that takes input sample and its target as entry
78
+ and returns a transformed version.
79
+
80
+ Example:
81
+
82
+ .. code:: python
83
+
84
+ import torchvision.datasets as dset
85
+ import torchvision.transforms as transforms
86
+ cap = dset.CocoCaptions(root = 'dir where images are',
87
+ annFile = 'json annotation file',
88
+ transform=transforms.PILToTensor())
89
+
90
+ print('Number of samples: ', len(cap))
91
+ img, target = cap[3] # load 4th sample
92
+
93
+ print("Image Size: ", img.size())
94
+ print(target)
95
+
96
+ Output: ::
97
+
98
+ Number of samples: 82783
99
+ Image Size: (3L, 427L, 640L)
100
+ [u'A plane emitting smoke stream flying over a mountain.',
101
+ u'A plane darts across a bright blue sky behind a mountain covered in snow',
102
+ u'A plane leaves a contrail above the snowy mountain top.',
103
+ u'A mountain that has a plane flying overheard in the distance.',
104
+ u'A mountain view with a plume of smoke in the background']
105
+
106
+ """
107
+
108
+ def _load_target(self, id: int) -> List[str]:
109
+ return [ann["caption"] for ann in super()._load_target(id)]
vllm/lib/python3.10/site-packages/torchvision/datasets/country211.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Callable, Optional, Union
3
+
4
+ from .folder import ImageFolder
5
+ from .utils import download_and_extract_archive, verify_str_arg
6
+
7
+
8
+ class Country211(ImageFolder):
9
+ """`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
10
+
11
+ This dataset was built by filtering the images from the YFCC100m dataset
12
+ that have GPS coordinate corresponding to a ISO-3166 country code. The
13
+ dataset is balanced by sampling 150 train images, 50 validation images, and
14
+ 100 test images for each country.
15
+
16
+ Args:
17
+ root (str or ``pathlib.Path``): Root directory of the dataset.
18
+ split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
19
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
20
+ version. E.g, ``transforms.RandomCrop``.
21
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
22
+ download (bool, optional): If True, downloads the dataset from the internet and puts it into
23
+ ``root/country211/``. If dataset is already downloaded, it is not downloaded again.
24
+ """
25
+
26
+ _URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
27
+ _MD5 = "84988d7644798601126c29e9877aab6a"
28
+
29
+ def __init__(
30
+ self,
31
+ root: Union[str, Path],
32
+ split: str = "train",
33
+ transform: Optional[Callable] = None,
34
+ target_transform: Optional[Callable] = None,
35
+ download: bool = False,
36
+ ) -> None:
37
+ self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
38
+
39
+ root = Path(root).expanduser()
40
+ self.root = str(root)
41
+ self._base_folder = root / "country211"
42
+
43
+ if download:
44
+ self._download()
45
+
46
+ if not self._check_exists():
47
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
48
+
49
+ super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform)
50
+ self.root = str(root)
51
+
52
+ def _check_exists(self) -> bool:
53
+ return self._base_folder.exists() and self._base_folder.is_dir()
54
+
55
+ def _download(self) -> None:
56
+ if self._check_exists():
57
+ return
58
+ download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
vllm/lib/python3.10/site-packages/torchvision/datasets/dtd.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ from typing import Any, Callable, Optional, Tuple, Union
4
+
5
+ import PIL.Image
6
+
7
+ from .utils import download_and_extract_archive, verify_str_arg
8
+ from .vision import VisionDataset
9
+
10
+
11
+ class DTD(VisionDataset):
12
+ """`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
13
+
14
+ Args:
15
+ root (str or ``pathlib.Path``): Root directory of the dataset.
16
+ split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
17
+ partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``.
18
+
19
+ .. note::
20
+
21
+ The partition only changes which split each image belongs to. Thus, regardless of the selected
22
+ partition, combining all splits will result in all images.
23
+
24
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
25
+ version. E.g, ``transforms.RandomCrop``.
26
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
27
+ download (bool, optional): If True, downloads the dataset from the internet and
28
+ puts it in root directory. If dataset is already downloaded, it is not
29
+ downloaded again. Default is False.
30
+ """
31
+
32
+ _URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
33
+ _MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1"
34
+
35
+ def __init__(
36
+ self,
37
+ root: Union[str, pathlib.Path],
38
+ split: str = "train",
39
+ partition: int = 1,
40
+ transform: Optional[Callable] = None,
41
+ target_transform: Optional[Callable] = None,
42
+ download: bool = False,
43
+ ) -> None:
44
+ self._split = verify_str_arg(split, "split", ("train", "val", "test"))
45
+ if not isinstance(partition, int) and not (1 <= partition <= 10):
46
+ raise ValueError(
47
+ f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, "
48
+ f"but got {partition} instead"
49
+ )
50
+ self._partition = partition
51
+
52
+ super().__init__(root, transform=transform, target_transform=target_transform)
53
+ self._base_folder = pathlib.Path(self.root) / type(self).__name__.lower()
54
+ self._data_folder = self._base_folder / "dtd"
55
+ self._meta_folder = self._data_folder / "labels"
56
+ self._images_folder = self._data_folder / "images"
57
+
58
+ if download:
59
+ self._download()
60
+
61
+ if not self._check_exists():
62
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
63
+
64
+ self._image_files = []
65
+ classes = []
66
+ with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file:
67
+ for line in file:
68
+ cls, name = line.strip().split("/")
69
+ self._image_files.append(self._images_folder.joinpath(cls, name))
70
+ classes.append(cls)
71
+
72
+ self.classes = sorted(set(classes))
73
+ self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
74
+ self._labels = [self.class_to_idx[cls] for cls in classes]
75
+
76
+ def __len__(self) -> int:
77
+ return len(self._image_files)
78
+
79
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
80
+ image_file, label = self._image_files[idx], self._labels[idx]
81
+ image = PIL.Image.open(image_file).convert("RGB")
82
+
83
+ if self.transform:
84
+ image = self.transform(image)
85
+
86
+ if self.target_transform:
87
+ label = self.target_transform(label)
88
+
89
+ return image, label
90
+
91
+ def extra_repr(self) -> str:
92
+ return f"split={self._split}, partition={self._partition}"
93
+
94
+ def _check_exists(self) -> bool:
95
+ return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder)
96
+
97
+ def _download(self) -> None:
98
+ if self._check_exists():
99
+ return
100
+ download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5)
vllm/lib/python3.10/site-packages/torchvision/datasets/eurosat.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Callable, Optional, Union
4
+
5
+ from .folder import ImageFolder
6
+ from .utils import download_and_extract_archive
7
+
8
+
9
+ class EuroSAT(ImageFolder):
10
+ """RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
11
+
12
+ For the MS version of the dataset, see
13
+ `TorchGeo <https://torchgeo.readthedocs.io/en/stable/api/datasets.html#eurosat>`__.
14
+
15
+ Args:
16
+ root (str or ``pathlib.Path``): Root directory of dataset where ``root/eurosat`` exists.
17
+ transform (callable, optional): A function/transform that takes in a PIL image
18
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
19
+ target_transform (callable, optional): A function/transform that takes in the
20
+ target and transforms it.
21
+ download (bool, optional): If True, downloads the dataset from the internet and
22
+ puts it in root directory. If dataset is already downloaded, it is not
23
+ downloaded again. Default is False.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ root: Union[str, Path],
29
+ transform: Optional[Callable] = None,
30
+ target_transform: Optional[Callable] = None,
31
+ download: bool = False,
32
+ ) -> None:
33
+ self.root = os.path.expanduser(root)
34
+ self._base_folder = os.path.join(self.root, "eurosat")
35
+ self._data_folder = os.path.join(self._base_folder, "2750")
36
+
37
+ if download:
38
+ self.download()
39
+
40
+ if not self._check_exists():
41
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
42
+
43
+ super().__init__(self._data_folder, transform=transform, target_transform=target_transform)
44
+ self.root = os.path.expanduser(root)
45
+
46
+ def __len__(self) -> int:
47
+ return len(self.samples)
48
+
49
+ def _check_exists(self) -> bool:
50
+ return os.path.exists(self._data_folder)
51
+
52
+ def download(self) -> None:
53
+
54
+ if self._check_exists():
55
+ return
56
+
57
+ os.makedirs(self._base_folder, exist_ok=True)
58
+ download_and_extract_archive(
59
+ "https://huggingface.co/datasets/torchgeo/eurosat/resolve/c877bcd43f099cd0196738f714544e355477f3fd/EuroSAT.zip",
60
+ download_root=self._base_folder,
61
+ md5="c8fa014336c82ac7804f0398fcb19387",
62
+ )
vllm/lib/python3.10/site-packages/torchvision/datasets/fakedata.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Optional, Tuple
2
+
3
+ import torch
4
+
5
+ from .. import transforms
6
+ from .vision import VisionDataset
7
+
8
+
9
+ class FakeData(VisionDataset):
10
+ """A fake dataset that returns randomly generated images and returns them as PIL images
11
+
12
+ Args:
13
+ size (int, optional): Size of the dataset. Default: 1000 images
14
+ image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224)
15
+ num_classes(int, optional): Number of classes in the dataset. Default: 10
16
+ transform (callable, optional): A function/transform that takes in a PIL image
17
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
18
+ target_transform (callable, optional): A function/transform that takes in the
19
+ target and transforms it.
20
+ random_offset (int): Offsets the index-based random seed used to
21
+ generate each image. Default: 0
22
+
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ size: int = 1000,
28
+ image_size: Tuple[int, int, int] = (3, 224, 224),
29
+ num_classes: int = 10,
30
+ transform: Optional[Callable] = None,
31
+ target_transform: Optional[Callable] = None,
32
+ random_offset: int = 0,
33
+ ) -> None:
34
+ super().__init__(transform=transform, target_transform=target_transform)
35
+ self.size = size
36
+ self.num_classes = num_classes
37
+ self.image_size = image_size
38
+ self.random_offset = random_offset
39
+
40
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
41
+ """
42
+ Args:
43
+ index (int): Index
44
+
45
+ Returns:
46
+ tuple: (image, target) where target is class_index of the target class.
47
+ """
48
+ # create random image that is consistent with the index id
49
+ if index >= len(self):
50
+ raise IndexError(f"{self.__class__.__name__} index out of range")
51
+ rng_state = torch.get_rng_state()
52
+ torch.manual_seed(index + self.random_offset)
53
+ img = torch.randn(*self.image_size)
54
+ target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
55
+ torch.set_rng_state(rng_state)
56
+
57
+ # convert to PIL Image
58
+ img = transforms.ToPILImage()(img)
59
+ if self.transform is not None:
60
+ img = self.transform(img)
61
+ if self.target_transform is not None:
62
+ target = self.target_transform(target)
63
+
64
+ return img, target.item()
65
+
66
+ def __len__(self) -> int:
67
+ return self.size
vllm/lib/python3.10/site-packages/torchvision/datasets/fer2013.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import pathlib
3
+ from typing import Any, Callable, Optional, Tuple, Union
4
+
5
+ import torch
6
+ from PIL import Image
7
+
8
+ from .utils import check_integrity, verify_str_arg
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class FER2013(VisionDataset):
13
+ """`FER2013
14
+ <https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge>`_ Dataset.
15
+
16
+ .. note::
17
+ This dataset can return test labels only if ``fer2013.csv`` OR
18
+ ``icml_face_data.csv`` are present in ``root/fer2013/``. If only
19
+ ``train.csv`` and ``test.csv`` are present, the test labels are set to
20
+ ``None``.
21
+
22
+ Args:
23
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
24
+ ``root/fer2013`` exists. This directory may contain either
25
+ ``fer2013.csv``, ``icml_face_data.csv``, or both ``train.csv`` and
26
+ ``test.csv``. Precendence is given in that order, i.e. if
27
+ ``fer2013.csv`` is present then the rest of the files will be
28
+ ignored. All these (combinations of) files contain the same data and
29
+ are supported for convenience, but only ``fer2013.csv`` and
30
+ ``icml_face_data.csv`` are able to return non-None test labels.
31
+ split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
32
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
33
+ version. E.g, ``transforms.RandomCrop``
34
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
35
+ """
36
+
37
+ _RESOURCES = {
38
+ "train": ("train.csv", "3f0dfb3d3fd99c811a1299cb947e3131"),
39
+ "test": ("test.csv", "b02c2298636a634e8c2faabbf3ea9a23"),
40
+ # The fer2013.csv and icml_face_data.csv files contain both train and
41
+ # tests instances, and unlike test.csv they contain the labels for the
42
+ # test instances. We give these 2 files precedence over train.csv and
43
+ # test.csv. And yes, they both contain the same data, but with different
44
+ # column names (note the spaces) and ordering:
45
+ # $ head -n 1 fer2013.csv icml_face_data.csv train.csv test.csv
46
+ # ==> fer2013.csv <==
47
+ # emotion,pixels,Usage
48
+ #
49
+ # ==> icml_face_data.csv <==
50
+ # emotion, Usage, pixels
51
+ #
52
+ # ==> train.csv <==
53
+ # emotion,pixels
54
+ #
55
+ # ==> test.csv <==
56
+ # pixels
57
+ "fer": ("fer2013.csv", "f8428a1edbd21e88f42c73edd2a14f95"),
58
+ "icml": ("icml_face_data.csv", "b114b9e04e6949e5fe8b6a98b3892b1d"),
59
+ }
60
+
61
+ def __init__(
62
+ self,
63
+ root: Union[str, pathlib.Path],
64
+ split: str = "train",
65
+ transform: Optional[Callable] = None,
66
+ target_transform: Optional[Callable] = None,
67
+ ) -> None:
68
+ self._split = verify_str_arg(split, "split", ("train", "test"))
69
+ super().__init__(root, transform=transform, target_transform=target_transform)
70
+
71
+ base_folder = pathlib.Path(self.root) / "fer2013"
72
+ use_fer_file = (base_folder / self._RESOURCES["fer"][0]).exists()
73
+ use_icml_file = not use_fer_file and (base_folder / self._RESOURCES["icml"][0]).exists()
74
+ file_name, md5 = self._RESOURCES["fer" if use_fer_file else "icml" if use_icml_file else self._split]
75
+ data_file = base_folder / file_name
76
+ if not check_integrity(str(data_file), md5=md5):
77
+ raise RuntimeError(
78
+ f"{file_name} not found in {base_folder} or corrupted. "
79
+ f"You can download it from "
80
+ f"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
81
+ )
82
+
83
+ pixels_key = " pixels" if use_icml_file else "pixels"
84
+ usage_key = " Usage" if use_icml_file else "Usage"
85
+
86
+ def get_img(row):
87
+ return torch.tensor([int(idx) for idx in row[pixels_key].split()], dtype=torch.uint8).reshape(48, 48)
88
+
89
+ def get_label(row):
90
+ if use_fer_file or use_icml_file or self._split == "train":
91
+ return int(row["emotion"])
92
+ else:
93
+ return None
94
+
95
+ with open(data_file, "r", newline="") as file:
96
+ rows = (row for row in csv.DictReader(file))
97
+
98
+ if use_fer_file or use_icml_file:
99
+ valid_keys = ("Training",) if self._split == "train" else ("PublicTest", "PrivateTest")
100
+ rows = (row for row in rows if row[usage_key] in valid_keys)
101
+
102
+ self._samples = [(get_img(row), get_label(row)) for row in rows]
103
+
104
+ def __len__(self) -> int:
105
+ return len(self._samples)
106
+
107
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
108
+ image_tensor, target = self._samples[idx]
109
+ image = Image.fromarray(image_tensor.numpy())
110
+
111
+ if self.transform is not None:
112
+ image = self.transform(image)
113
+
114
+ if self.target_transform is not None:
115
+ target = self.target_transform(target)
116
+
117
+ return image, target
118
+
119
+ def extra_repr(self) -> str:
120
+ return f"split={self._split}"
vllm/lib/python3.10/site-packages/torchvision/datasets/fgvc_aircraft.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from pathlib import Path
5
+ from typing import Any, Callable, Optional, Tuple, Union
6
+
7
+ import PIL.Image
8
+
9
+ from .utils import download_and_extract_archive, verify_str_arg
10
+ from .vision import VisionDataset
11
+
12
+
13
+ class FGVCAircraft(VisionDataset):
14
+ """`FGVC Aircraft <https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/>`_ Dataset.
15
+
16
+ The dataset contains 10,000 images of aircraft, with 100 images for each of 100
17
+ different aircraft model variants, most of which are airplanes.
18
+ Aircraft models are organized in a three-levels hierarchy. The three levels, from
19
+ finer to coarser, are:
20
+
21
+ - ``variant``, e.g. Boeing 737-700. A variant collapses all the models that are visually
22
+ indistinguishable into one class. The dataset comprises 100 different variants.
23
+ - ``family``, e.g. Boeing 737. The dataset comprises 70 different families.
24
+ - ``manufacturer``, e.g. Boeing. The dataset comprises 30 different manufacturers.
25
+
26
+ Args:
27
+ root (str or ``pathlib.Path``): Root directory of the FGVC Aircraft dataset.
28
+ split (string, optional): The dataset split, supports ``train``, ``val``,
29
+ ``trainval`` and ``test``.
30
+ annotation_level (str, optional): The annotation level, supports ``variant``,
31
+ ``family`` and ``manufacturer``.
32
+ transform (callable, optional): A function/transform that takes in a PIL image
33
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
34
+ target_transform (callable, optional): A function/transform that takes in the
35
+ target and transforms it.
36
+ download (bool, optional): If True, downloads the dataset from the internet and
37
+ puts it in root directory. If dataset is already downloaded, it is not
38
+ downloaded again.
39
+ """
40
+
41
+ _URL = "https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz"
42
+
43
+ def __init__(
44
+ self,
45
+ root: Union[str, Path],
46
+ split: str = "trainval",
47
+ annotation_level: str = "variant",
48
+ transform: Optional[Callable] = None,
49
+ target_transform: Optional[Callable] = None,
50
+ download: bool = False,
51
+ ) -> None:
52
+ super().__init__(root, transform=transform, target_transform=target_transform)
53
+ self._split = verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
54
+ self._annotation_level = verify_str_arg(
55
+ annotation_level, "annotation_level", ("variant", "family", "manufacturer")
56
+ )
57
+
58
+ self._data_path = os.path.join(self.root, "fgvc-aircraft-2013b")
59
+ if download:
60
+ self._download()
61
+
62
+ if not self._check_exists():
63
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
64
+
65
+ annotation_file = os.path.join(
66
+ self._data_path,
67
+ "data",
68
+ {
69
+ "variant": "variants.txt",
70
+ "family": "families.txt",
71
+ "manufacturer": "manufacturers.txt",
72
+ }[self._annotation_level],
73
+ )
74
+ with open(annotation_file, "r") as f:
75
+ self.classes = [line.strip() for line in f]
76
+
77
+ self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
78
+
79
+ image_data_folder = os.path.join(self._data_path, "data", "images")
80
+ labels_file = os.path.join(self._data_path, "data", f"images_{self._annotation_level}_{self._split}.txt")
81
+
82
+ self._image_files = []
83
+ self._labels = []
84
+
85
+ with open(labels_file, "r") as f:
86
+ for line in f:
87
+ image_name, label_name = line.strip().split(" ", 1)
88
+ self._image_files.append(os.path.join(image_data_folder, f"{image_name}.jpg"))
89
+ self._labels.append(self.class_to_idx[label_name])
90
+
91
+ def __len__(self) -> int:
92
+ return len(self._image_files)
93
+
94
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
95
+ image_file, label = self._image_files[idx], self._labels[idx]
96
+ image = PIL.Image.open(image_file).convert("RGB")
97
+
98
+ if self.transform:
99
+ image = self.transform(image)
100
+
101
+ if self.target_transform:
102
+ label = self.target_transform(label)
103
+
104
+ return image, label
105
+
106
+ def _download(self) -> None:
107
+ """
108
+ Download the FGVC Aircraft dataset archive and extract it under root.
109
+ """
110
+ if self._check_exists():
111
+ return
112
+ download_and_extract_archive(self._URL, self.root)
113
+
114
+ def _check_exists(self) -> bool:
115
+ return os.path.exists(self._data_path) and os.path.isdir(self._data_path)
vllm/lib/python3.10/site-packages/torchvision/datasets/flickr.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ from collections import defaultdict
4
+ from html.parser import HTMLParser
5
+ from pathlib import Path
6
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7
+
8
+ from PIL import Image
9
+
10
+ from .vision import VisionDataset
11
+
12
+
13
+ class Flickr8kParser(HTMLParser):
14
+ """Parser for extracting captions from the Flickr8k dataset web page."""
15
+
16
+ def __init__(self, root: Union[str, Path]) -> None:
17
+ super().__init__()
18
+
19
+ self.root = root
20
+
21
+ # Data structure to store captions
22
+ self.annotations: Dict[str, List[str]] = {}
23
+
24
+ # State variables
25
+ self.in_table = False
26
+ self.current_tag: Optional[str] = None
27
+ self.current_img: Optional[str] = None
28
+
29
+ def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
30
+ self.current_tag = tag
31
+
32
+ if tag == "table":
33
+ self.in_table = True
34
+
35
+ def handle_endtag(self, tag: str) -> None:
36
+ self.current_tag = None
37
+
38
+ if tag == "table":
39
+ self.in_table = False
40
+
41
+ def handle_data(self, data: str) -> None:
42
+ if self.in_table:
43
+ if data == "Image Not Found":
44
+ self.current_img = None
45
+ elif self.current_tag == "a":
46
+ img_id = data.split("/")[-2]
47
+ img_id = os.path.join(self.root, img_id + "_*.jpg")
48
+ img_id = glob.glob(img_id)[0]
49
+ self.current_img = img_id
50
+ self.annotations[img_id] = []
51
+ elif self.current_tag == "li" and self.current_img:
52
+ img_id = self.current_img
53
+ self.annotations[img_id].append(data.strip())
54
+
55
+
56
+ class Flickr8k(VisionDataset):
57
+ """`Flickr8k Entities <http://hockenmaier.cs.illinois.edu/8k-pictures.html>`_ Dataset.
58
+
59
+ Args:
60
+ root (str or ``pathlib.Path``): Root directory where images are downloaded to.
61
+ ann_file (string): Path to annotation file.
62
+ transform (callable, optional): A function/transform that takes in a PIL image
63
+ and returns a transformed version. E.g, ``transforms.PILToTensor``
64
+ target_transform (callable, optional): A function/transform that takes in the
65
+ target and transforms it.
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ root: Union[str, Path],
71
+ ann_file: str,
72
+ transform: Optional[Callable] = None,
73
+ target_transform: Optional[Callable] = None,
74
+ ) -> None:
75
+ super().__init__(root, transform=transform, target_transform=target_transform)
76
+ self.ann_file = os.path.expanduser(ann_file)
77
+
78
+ # Read annotations and store in a dict
79
+ parser = Flickr8kParser(self.root)
80
+ with open(self.ann_file) as fh:
81
+ parser.feed(fh.read())
82
+ self.annotations = parser.annotations
83
+
84
+ self.ids = list(sorted(self.annotations.keys()))
85
+
86
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
87
+ """
88
+ Args:
89
+ index (int): Index
90
+
91
+ Returns:
92
+ tuple: Tuple (image, target). target is a list of captions for the image.
93
+ """
94
+ img_id = self.ids[index]
95
+
96
+ # Image
97
+ img = Image.open(img_id).convert("RGB")
98
+ if self.transform is not None:
99
+ img = self.transform(img)
100
+
101
+ # Captions
102
+ target = self.annotations[img_id]
103
+ if self.target_transform is not None:
104
+ target = self.target_transform(target)
105
+
106
+ return img, target
107
+
108
+ def __len__(self) -> int:
109
+ return len(self.ids)
110
+
111
+
112
+ class Flickr30k(VisionDataset):
113
+ """`Flickr30k Entities <https://bryanplummer.com/Flickr30kEntities/>`_ Dataset.
114
+
115
+ Args:
116
+ root (str or ``pathlib.Path``): Root directory where images are downloaded to.
117
+ ann_file (string): Path to annotation file.
118
+ transform (callable, optional): A function/transform that takes in a PIL image
119
+ and returns a transformed version. E.g, ``transforms.PILToTensor``
120
+ target_transform (callable, optional): A function/transform that takes in the
121
+ target and transforms it.
122
+ """
123
+
124
+ def __init__(
125
+ self,
126
+ root: str,
127
+ ann_file: str,
128
+ transform: Optional[Callable] = None,
129
+ target_transform: Optional[Callable] = None,
130
+ ) -> None:
131
+ super().__init__(root, transform=transform, target_transform=target_transform)
132
+ self.ann_file = os.path.expanduser(ann_file)
133
+
134
+ # Read annotations and store in a dict
135
+ self.annotations = defaultdict(list)
136
+ with open(self.ann_file) as fh:
137
+ for line in fh:
138
+ img_id, caption = line.strip().split("\t")
139
+ self.annotations[img_id[:-2]].append(caption)
140
+
141
+ self.ids = list(sorted(self.annotations.keys()))
142
+
143
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
144
+ """
145
+ Args:
146
+ index (int): Index
147
+
148
+ Returns:
149
+ tuple: Tuple (image, target). target is a list of captions for the image.
150
+ """
151
+ img_id = self.ids[index]
152
+
153
+ # Image
154
+ filename = os.path.join(self.root, img_id)
155
+ img = Image.open(filename).convert("RGB")
156
+ if self.transform is not None:
157
+ img = self.transform(img)
158
+
159
+ # Captions
160
+ target = self.annotations[img_id]
161
+ if self.target_transform is not None:
162
+ target = self.target_transform(target)
163
+
164
+ return img, target
165
+
166
+ def __len__(self) -> int:
167
+ return len(self.ids)
vllm/lib/python3.10/site-packages/torchvision/datasets/flowers102.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Any, Callable, Optional, Tuple, Union
3
+
4
+ import PIL.Image
5
+
6
+ from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
7
+ from .vision import VisionDataset
8
+
9
+
10
+ class Flowers102(VisionDataset):
11
+ """`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
12
+
13
+ .. warning::
14
+
15
+ This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
16
+
17
+ Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
18
+ flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
19
+ between 40 and 258 images.
20
+
21
+ The images have large scale, pose and light variations. In addition, there are categories that
22
+ have large variations within the category, and several very similar categories.
23
+
24
+ Args:
25
+ root (str or ``pathlib.Path``): Root directory of the dataset.
26
+ split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
27
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a
28
+ transformed version. E.g, ``transforms.RandomCrop``.
29
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
30
+ download (bool, optional): If true, downloads the dataset from the internet and
31
+ puts it in root directory. If dataset is already downloaded, it is not
32
+ downloaded again.
33
+ """
34
+
35
+ _download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
36
+ _file_dict = { # filename, md5
37
+ "image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
38
+ "label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
39
+ "setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
40
+ }
41
+ _splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
42
+
43
+ def __init__(
44
+ self,
45
+ root: Union[str, Path],
46
+ split: str = "train",
47
+ transform: Optional[Callable] = None,
48
+ target_transform: Optional[Callable] = None,
49
+ download: bool = False,
50
+ ) -> None:
51
+ super().__init__(root, transform=transform, target_transform=target_transform)
52
+ self._split = verify_str_arg(split, "split", ("train", "val", "test"))
53
+ self._base_folder = Path(self.root) / "flowers-102"
54
+ self._images_folder = self._base_folder / "jpg"
55
+
56
+ if download:
57
+ self.download()
58
+
59
+ if not self._check_integrity():
60
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
61
+
62
+ from scipy.io import loadmat
63
+
64
+ set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
65
+ image_ids = set_ids[self._splits_map[self._split]].tolist()
66
+
67
+ labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
68
+ image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
69
+
70
+ self._labels = []
71
+ self._image_files = []
72
+ for image_id in image_ids:
73
+ self._labels.append(image_id_to_label[image_id])
74
+ self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
75
+
76
+ def __len__(self) -> int:
77
+ return len(self._image_files)
78
+
79
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
80
+ image_file, label = self._image_files[idx], self._labels[idx]
81
+ image = PIL.Image.open(image_file).convert("RGB")
82
+
83
+ if self.transform:
84
+ image = self.transform(image)
85
+
86
+ if self.target_transform:
87
+ label = self.target_transform(label)
88
+
89
+ return image, label
90
+
91
+ def extra_repr(self) -> str:
92
+ return f"split={self._split}"
93
+
94
+ def _check_integrity(self):
95
+ if not (self._images_folder.exists() and self._images_folder.is_dir()):
96
+ return False
97
+
98
+ for id in ["label", "setid"]:
99
+ filename, md5 = self._file_dict[id]
100
+ if not check_integrity(str(self._base_folder / filename), md5):
101
+ return False
102
+ return True
103
+
104
+ def download(self):
105
+ if self._check_integrity():
106
+ return
107
+ download_and_extract_archive(
108
+ f"{self._download_url_prefix}{self._file_dict['image'][0]}",
109
+ str(self._base_folder),
110
+ md5=self._file_dict["image"][1],
111
+ )
112
+ for id in ["label", "setid"]:
113
+ filename, md5 = self._file_dict[id]
114
+ download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
vllm/lib/python3.10/site-packages/torchvision/datasets/folder.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ from pathlib import Path
4
+ from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union
5
+
6
+ from PIL import Image
7
+
8
+ from .vision import VisionDataset
9
+
10
+
11
+ def has_file_allowed_extension(filename: str, extensions: Union[str, Tuple[str, ...]]) -> bool:
12
+ """Checks if a file is an allowed extension.
13
+
14
+ Args:
15
+ filename (string): path to a file
16
+ extensions (tuple of strings): extensions to consider (lowercase)
17
+
18
+ Returns:
19
+ bool: True if the filename ends with one of given extensions
20
+ """
21
+ return filename.lower().endswith(extensions if isinstance(extensions, str) else tuple(extensions))
22
+
23
+
24
+ def is_image_file(filename: str) -> bool:
25
+ """Checks if a file is an allowed image extension.
26
+
27
+ Args:
28
+ filename (string): path to a file
29
+
30
+ Returns:
31
+ bool: True if the filename ends with a known image extension
32
+ """
33
+ return has_file_allowed_extension(filename, IMG_EXTENSIONS)
34
+
35
+
36
+ def find_classes(directory: Union[str, Path]) -> Tuple[List[str], Dict[str, int]]:
37
+ """Finds the class folders in a dataset.
38
+
39
+ See :class:`DatasetFolder` for details.
40
+ """
41
+ classes = sorted(entry.name for entry in os.scandir(directory) if entry.is_dir())
42
+ if not classes:
43
+ raise FileNotFoundError(f"Couldn't find any class folder in {directory}.")
44
+
45
+ class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
46
+ return classes, class_to_idx
47
+
48
+
49
+ def make_dataset(
50
+ directory: Union[str, Path],
51
+ class_to_idx: Optional[Dict[str, int]] = None,
52
+ extensions: Optional[Union[str, Tuple[str, ...]]] = None,
53
+ is_valid_file: Optional[Callable[[str], bool]] = None,
54
+ allow_empty: bool = False,
55
+ ) -> List[Tuple[str, int]]:
56
+ """Generates a list of samples of a form (path_to_sample, class).
57
+
58
+ See :class:`DatasetFolder` for details.
59
+
60
+ Note: The class_to_idx parameter is here optional and will use the logic of the ``find_classes`` function
61
+ by default.
62
+ """
63
+ directory = os.path.expanduser(directory)
64
+
65
+ if class_to_idx is None:
66
+ _, class_to_idx = find_classes(directory)
67
+ elif not class_to_idx:
68
+ raise ValueError("'class_to_index' must have at least one entry to collect any samples.")
69
+
70
+ both_none = extensions is None and is_valid_file is None
71
+ both_something = extensions is not None and is_valid_file is not None
72
+ if both_none or both_something:
73
+ raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
74
+
75
+ if extensions is not None:
76
+
77
+ def is_valid_file(x: str) -> bool:
78
+ return has_file_allowed_extension(x, extensions) # type: ignore[arg-type]
79
+
80
+ is_valid_file = cast(Callable[[str], bool], is_valid_file)
81
+
82
+ instances = []
83
+ available_classes = set()
84
+ for target_class in sorted(class_to_idx.keys()):
85
+ class_index = class_to_idx[target_class]
86
+ target_dir = os.path.join(directory, target_class)
87
+ if not os.path.isdir(target_dir):
88
+ continue
89
+ for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
90
+ for fname in sorted(fnames):
91
+ path = os.path.join(root, fname)
92
+ if is_valid_file(path):
93
+ item = path, class_index
94
+ instances.append(item)
95
+
96
+ if target_class not in available_classes:
97
+ available_classes.add(target_class)
98
+
99
+ empty_classes = set(class_to_idx.keys()) - available_classes
100
+ if empty_classes and not allow_empty:
101
+ msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. "
102
+ if extensions is not None:
103
+ msg += f"Supported extensions are: {extensions if isinstance(extensions, str) else ', '.join(extensions)}"
104
+ raise FileNotFoundError(msg)
105
+
106
+ return instances
107
+
108
+
109
+ class DatasetFolder(VisionDataset):
110
+ """A generic data loader.
111
+
112
+ This default directory structure can be customized by overriding the
113
+ :meth:`find_classes` method.
114
+
115
+ Args:
116
+ root (str or ``pathlib.Path``): Root directory path.
117
+ loader (callable): A function to load a sample given its path.
118
+ extensions (tuple[string]): A list of allowed extensions.
119
+ both extensions and is_valid_file should not be passed.
120
+ transform (callable, optional): A function/transform that takes in
121
+ a sample and returns a transformed version.
122
+ E.g, ``transforms.RandomCrop`` for images.
123
+ target_transform (callable, optional): A function/transform that takes
124
+ in the target and transforms it.
125
+ is_valid_file (callable, optional): A function that takes path of a file
126
+ and check if the file is a valid file (used to check of corrupt files)
127
+ both extensions and is_valid_file should not be passed.
128
+ allow_empty(bool, optional): If True, empty folders are considered to be valid classes.
129
+ An error is raised on empty folders if False (default).
130
+
131
+ Attributes:
132
+ classes (list): List of the class names sorted alphabetically.
133
+ class_to_idx (dict): Dict with items (class_name, class_index).
134
+ samples (list): List of (sample path, class_index) tuples
135
+ targets (list): The class_index value for each image in the dataset
136
+ """
137
+
138
+ def __init__(
139
+ self,
140
+ root: Union[str, Path],
141
+ loader: Callable[[str], Any],
142
+ extensions: Optional[Tuple[str, ...]] = None,
143
+ transform: Optional[Callable] = None,
144
+ target_transform: Optional[Callable] = None,
145
+ is_valid_file: Optional[Callable[[str], bool]] = None,
146
+ allow_empty: bool = False,
147
+ ) -> None:
148
+ super().__init__(root, transform=transform, target_transform=target_transform)
149
+ classes, class_to_idx = self.find_classes(self.root)
150
+ samples = self.make_dataset(
151
+ self.root,
152
+ class_to_idx=class_to_idx,
153
+ extensions=extensions,
154
+ is_valid_file=is_valid_file,
155
+ allow_empty=allow_empty,
156
+ )
157
+
158
+ self.loader = loader
159
+ self.extensions = extensions
160
+
161
+ self.classes = classes
162
+ self.class_to_idx = class_to_idx
163
+ self.samples = samples
164
+ self.targets = [s[1] for s in samples]
165
+
166
+ @staticmethod
167
+ def make_dataset(
168
+ directory: Union[str, Path],
169
+ class_to_idx: Dict[str, int],
170
+ extensions: Optional[Tuple[str, ...]] = None,
171
+ is_valid_file: Optional[Callable[[str], bool]] = None,
172
+ allow_empty: bool = False,
173
+ ) -> List[Tuple[str, int]]:
174
+ """Generates a list of samples of a form (path_to_sample, class).
175
+
176
+ This can be overridden to e.g. read files from a compressed zip file instead of from the disk.
177
+
178
+ Args:
179
+ directory (str): root dataset directory, corresponding to ``self.root``.
180
+ class_to_idx (Dict[str, int]): Dictionary mapping class name to class index.
181
+ extensions (optional): A list of allowed extensions.
182
+ Either extensions or is_valid_file should be passed. Defaults to None.
183
+ is_valid_file (optional): A function that takes path of a file
184
+ and checks if the file is a valid file
185
+ (used to check of corrupt files) both extensions and
186
+ is_valid_file should not be passed. Defaults to None.
187
+ allow_empty(bool, optional): If True, empty folders are considered to be valid classes.
188
+ An error is raised on empty folders if False (default).
189
+
190
+ Raises:
191
+ ValueError: In case ``class_to_idx`` is empty.
192
+ ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
193
+ FileNotFoundError: In case no valid file was found for any class.
194
+
195
+ Returns:
196
+ List[Tuple[str, int]]: samples of a form (path_to_sample, class)
197
+ """
198
+ if class_to_idx is None:
199
+ # prevent potential bug since make_dataset() would use the class_to_idx logic of the
200
+ # find_classes() function, instead of using that of the find_classes() method, which
201
+ # is potentially overridden and thus could have a different logic.
202
+ raise ValueError("The class_to_idx parameter cannot be None.")
203
+ return make_dataset(
204
+ directory, class_to_idx, extensions=extensions, is_valid_file=is_valid_file, allow_empty=allow_empty
205
+ )
206
+
207
+ def find_classes(self, directory: Union[str, Path]) -> Tuple[List[str], Dict[str, int]]:
208
+ """Find the class folders in a dataset structured as follows::
209
+
210
+ directory/
211
+ ├── class_x
212
+ │ ├── xxx.ext
213
+ │ ├── xxy.ext
214
+ │ └── ...
215
+ │ └── xxz.ext
216
+ └── class_y
217
+ ├── 123.ext
218
+ ├── nsdf3.ext
219
+ └── ...
220
+ └── asd932_.ext
221
+
222
+ This method can be overridden to only consider
223
+ a subset of classes, or to adapt to a different dataset directory structure.
224
+
225
+ Args:
226
+ directory(str): Root directory path, corresponding to ``self.root``
227
+
228
+ Raises:
229
+ FileNotFoundError: If ``dir`` has no class folders.
230
+
231
+ Returns:
232
+ (Tuple[List[str], Dict[str, int]]): List of all classes and dictionary mapping each class to an index.
233
+ """
234
+ return find_classes(directory)
235
+
236
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
237
+ """
238
+ Args:
239
+ index (int): Index
240
+
241
+ Returns:
242
+ tuple: (sample, target) where target is class_index of the target class.
243
+ """
244
+ path, target = self.samples[index]
245
+ sample = self.loader(path)
246
+ if self.transform is not None:
247
+ sample = self.transform(sample)
248
+ if self.target_transform is not None:
249
+ target = self.target_transform(target)
250
+
251
+ return sample, target
252
+
253
+ def __len__(self) -> int:
254
+ return len(self.samples)
255
+
256
+
257
+ IMG_EXTENSIONS = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp")
258
+
259
+
260
+ def pil_loader(path: str) -> Image.Image:
261
+ # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
262
+ with open(path, "rb") as f:
263
+ img = Image.open(f)
264
+ return img.convert("RGB")
265
+
266
+
267
+ # TODO: specify the return type
268
+ def accimage_loader(path: str) -> Any:
269
+ import accimage
270
+
271
+ try:
272
+ return accimage.Image(path)
273
+ except OSError:
274
+ # Potentially a decoding problem, fall back to PIL.Image
275
+ return pil_loader(path)
276
+
277
+
278
+ def default_loader(path: str) -> Any:
279
+ from torchvision import get_image_backend
280
+
281
+ if get_image_backend() == "accimage":
282
+ return accimage_loader(path)
283
+ else:
284
+ return pil_loader(path)
285
+
286
+
287
+ class ImageFolder(DatasetFolder):
288
+ """A generic data loader where the images are arranged in this way by default: ::
289
+
290
+ root/dog/xxx.png
291
+ root/dog/xxy.png
292
+ root/dog/[...]/xxz.png
293
+
294
+ root/cat/123.png
295
+ root/cat/nsdf3.png
296
+ root/cat/[...]/asd932_.png
297
+
298
+ This class inherits from :class:`~torchvision.datasets.DatasetFolder` so
299
+ the same methods can be overridden to customize the dataset.
300
+
301
+ Args:
302
+ root (str or ``pathlib.Path``): Root directory path.
303
+ transform (callable, optional): A function/transform that takes in a PIL image
304
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
305
+ target_transform (callable, optional): A function/transform that takes in the
306
+ target and transforms it.
307
+ loader (callable, optional): A function to load an image given its path.
308
+ is_valid_file (callable, optional): A function that takes path of an Image file
309
+ and check if the file is a valid file (used to check of corrupt files)
310
+ allow_empty(bool, optional): If True, empty folders are considered to be valid classes.
311
+ An error is raised on empty folders if False (default).
312
+
313
+ Attributes:
314
+ classes (list): List of the class names sorted alphabetically.
315
+ class_to_idx (dict): Dict with items (class_name, class_index).
316
+ imgs (list): List of (image path, class_index) tuples
317
+ """
318
+
319
+ def __init__(
320
+ self,
321
+ root: Union[str, Path],
322
+ transform: Optional[Callable] = None,
323
+ target_transform: Optional[Callable] = None,
324
+ loader: Callable[[str], Any] = default_loader,
325
+ is_valid_file: Optional[Callable[[str], bool]] = None,
326
+ allow_empty: bool = False,
327
+ ):
328
+ super().__init__(
329
+ root,
330
+ loader,
331
+ IMG_EXTENSIONS if is_valid_file is None else None,
332
+ transform=transform,
333
+ target_transform=target_transform,
334
+ is_valid_file=is_valid_file,
335
+ allow_empty=allow_empty,
336
+ )
337
+ self.imgs = self.samples
vllm/lib/python3.10/site-packages/torchvision/datasets/food101.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Any, Callable, Optional, Tuple, Union
4
+
5
+ import PIL.Image
6
+
7
+ from .utils import download_and_extract_archive, verify_str_arg
8
+ from .vision import VisionDataset
9
+
10
+
11
+ class Food101(VisionDataset):
12
+ """`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/>`_.
13
+
14
+ The Food-101 is a challenging data set of 101 food categories with 101,000 images.
15
+ For each class, 250 manually reviewed test images are provided as well as 750 training images.
16
+ On purpose, the training images were not cleaned, and thus still contain some amount of noise.
17
+ This comes mostly in the form of intense colors and sometimes wrong labels. All images were
18
+ rescaled to have a maximum side length of 512 pixels.
19
+
20
+
21
+ Args:
22
+ root (str or ``pathlib.Path``): Root directory of the dataset.
23
+ split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``.
24
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
25
+ version. E.g, ``transforms.RandomCrop``.
26
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
27
+ download (bool, optional): If True, downloads the dataset from the internet and
28
+ puts it in root directory. If dataset is already downloaded, it is not
29
+ downloaded again. Default is False.
30
+ """
31
+
32
+ _URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
33
+ _MD5 = "85eeb15f3717b99a5da872d97d918f87"
34
+
35
+ def __init__(
36
+ self,
37
+ root: Union[str, Path],
38
+ split: str = "train",
39
+ transform: Optional[Callable] = None,
40
+ target_transform: Optional[Callable] = None,
41
+ download: bool = False,
42
+ ) -> None:
43
+ super().__init__(root, transform=transform, target_transform=target_transform)
44
+ self._split = verify_str_arg(split, "split", ("train", "test"))
45
+ self._base_folder = Path(self.root) / "food-101"
46
+ self._meta_folder = self._base_folder / "meta"
47
+ self._images_folder = self._base_folder / "images"
48
+
49
+ if download:
50
+ self._download()
51
+
52
+ if not self._check_exists():
53
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
54
+
55
+ self._labels = []
56
+ self._image_files = []
57
+ with open(self._meta_folder / f"{split}.json") as f:
58
+ metadata = json.loads(f.read())
59
+
60
+ self.classes = sorted(metadata.keys())
61
+ self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
62
+
63
+ for class_label, im_rel_paths in metadata.items():
64
+ self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths)
65
+ self._image_files += [
66
+ self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths
67
+ ]
68
+
69
+ def __len__(self) -> int:
70
+ return len(self._image_files)
71
+
72
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
73
+ image_file, label = self._image_files[idx], self._labels[idx]
74
+ image = PIL.Image.open(image_file).convert("RGB")
75
+
76
+ if self.transform:
77
+ image = self.transform(image)
78
+
79
+ if self.target_transform:
80
+ label = self.target_transform(label)
81
+
82
+ return image, label
83
+
84
+ def extra_repr(self) -> str:
85
+ return f"split={self._split}"
86
+
87
+ def _check_exists(self) -> bool:
88
+ return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder))
89
+
90
+ def _download(self) -> None:
91
+ if self._check_exists():
92
+ return
93
+ download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
vllm/lib/python3.10/site-packages/torchvision/datasets/gtsrb.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import pathlib
3
+ from typing import Any, Callable, Optional, Tuple, Union
4
+
5
+ import PIL
6
+
7
+ from .folder import make_dataset
8
+ from .utils import download_and_extract_archive, verify_str_arg
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class GTSRB(VisionDataset):
13
+ """`German Traffic Sign Recognition Benchmark (GTSRB) <https://benchmark.ini.rub.de/>`_ Dataset.
14
+
15
+ Args:
16
+ root (str or ``pathlib.Path``): Root directory of the dataset.
17
+ split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
18
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
19
+ version. E.g, ``transforms.RandomCrop``.
20
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
21
+ download (bool, optional): If True, downloads the dataset from the internet and
22
+ puts it in root directory. If dataset is already downloaded, it is not
23
+ downloaded again.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ root: Union[str, pathlib.Path],
29
+ split: str = "train",
30
+ transform: Optional[Callable] = None,
31
+ target_transform: Optional[Callable] = None,
32
+ download: bool = False,
33
+ ) -> None:
34
+
35
+ super().__init__(root, transform=transform, target_transform=target_transform)
36
+
37
+ self._split = verify_str_arg(split, "split", ("train", "test"))
38
+ self._base_folder = pathlib.Path(root) / "gtsrb"
39
+ self._target_folder = (
40
+ self._base_folder / "GTSRB" / ("Training" if self._split == "train" else "Final_Test/Images")
41
+ )
42
+
43
+ if download:
44
+ self.download()
45
+
46
+ if not self._check_exists():
47
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
48
+
49
+ if self._split == "train":
50
+ samples = make_dataset(str(self._target_folder), extensions=(".ppm",))
51
+ else:
52
+ with open(self._base_folder / "GT-final_test.csv") as csv_file:
53
+ samples = [
54
+ (str(self._target_folder / row["Filename"]), int(row["ClassId"]))
55
+ for row in csv.DictReader(csv_file, delimiter=";", skipinitialspace=True)
56
+ ]
57
+
58
+ self._samples = samples
59
+ self.transform = transform
60
+ self.target_transform = target_transform
61
+
62
+ def __len__(self) -> int:
63
+ return len(self._samples)
64
+
65
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
66
+
67
+ path, target = self._samples[index]
68
+ sample = PIL.Image.open(path).convert("RGB")
69
+
70
+ if self.transform is not None:
71
+ sample = self.transform(sample)
72
+
73
+ if self.target_transform is not None:
74
+ target = self.target_transform(target)
75
+
76
+ return sample, target
77
+
78
+ def _check_exists(self) -> bool:
79
+ return self._target_folder.is_dir()
80
+
81
+ def download(self) -> None:
82
+ if self._check_exists():
83
+ return
84
+
85
+ base_url = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
86
+
87
+ if self._split == "train":
88
+ download_and_extract_archive(
89
+ f"{base_url}GTSRB-Training_fixed.zip",
90
+ download_root=str(self._base_folder),
91
+ md5="513f3c79a4c5141765e10e952eaa2478",
92
+ )
93
+ else:
94
+ download_and_extract_archive(
95
+ f"{base_url}GTSRB_Final_Test_Images.zip",
96
+ download_root=str(self._base_folder),
97
+ md5="c7e4e6327067d32654124b0fe9e82185",
98
+ )
99
+ download_and_extract_archive(
100
+ f"{base_url}GTSRB_Final_Test_GT.zip",
101
+ download_root=str(self._base_folder),
102
+ md5="fe31e9c9270bbcd7b84b7f21a9d9d9e5",
103
+ )
vllm/lib/python3.10/site-packages/torchvision/datasets/hmdb51.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5
+
6
+ from torch import Tensor
7
+
8
+ from .folder import find_classes, make_dataset
9
+ from .video_utils import VideoClips
10
+ from .vision import VisionDataset
11
+
12
+
13
+ class HMDB51(VisionDataset):
14
+ """
15
+ `HMDB51 <https://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_
16
+ dataset.
17
+
18
+ HMDB51 is an action recognition video dataset.
19
+ This dataset consider every video as a collection of video clips of fixed size, specified
20
+ by ``frames_per_clip``, where the step in frames between each clip is given by
21
+ ``step_between_clips``.
22
+
23
+ To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
24
+ and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
25
+ elements will come from video 1, and the next three elements from video 2.
26
+ Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
27
+ frames in a video might be present.
28
+
29
+ Internally, it uses a VideoClips object to handle clip creation.
30
+
31
+ Args:
32
+ root (str or ``pathlib.Path``): Root directory of the HMDB51 Dataset.
33
+ annotation_path (str): Path to the folder containing the split files.
34
+ frames_per_clip (int): Number of frames in a clip.
35
+ step_between_clips (int): Number of frames between each clip.
36
+ fold (int, optional): Which fold to use. Should be between 1 and 3.
37
+ train (bool, optional): If ``True``, creates a dataset from the train split,
38
+ otherwise from the ``test`` split.
39
+ transform (callable, optional): A function/transform that takes in a TxHxWxC video
40
+ and returns a transformed version.
41
+ output_format (str, optional): The format of the output video tensors (before transforms).
42
+ Can be either "THWC" (default) or "TCHW".
43
+
44
+ Returns:
45
+ tuple: A 3-tuple with the following entries:
46
+
47
+ - video (Tensor[T, H, W, C] or Tensor[T, C, H, W]): The `T` video frames
48
+ - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
49
+ and `L` is the number of points
50
+ - label (int): class of the video clip
51
+ """
52
+
53
+ data_url = "https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar"
54
+ splits = {
55
+ "url": "https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar",
56
+ "md5": "15e67781e70dcfbdce2d7dbb9b3344b5",
57
+ }
58
+ TRAIN_TAG = 1
59
+ TEST_TAG = 2
60
+
61
+ def __init__(
62
+ self,
63
+ root: Union[str, Path],
64
+ annotation_path: str,
65
+ frames_per_clip: int,
66
+ step_between_clips: int = 1,
67
+ frame_rate: Optional[int] = None,
68
+ fold: int = 1,
69
+ train: bool = True,
70
+ transform: Optional[Callable] = None,
71
+ _precomputed_metadata: Optional[Dict[str, Any]] = None,
72
+ num_workers: int = 1,
73
+ _video_width: int = 0,
74
+ _video_height: int = 0,
75
+ _video_min_dimension: int = 0,
76
+ _audio_samples: int = 0,
77
+ output_format: str = "THWC",
78
+ ) -> None:
79
+ super().__init__(root)
80
+ if fold not in (1, 2, 3):
81
+ raise ValueError(f"fold should be between 1 and 3, got {fold}")
82
+
83
+ extensions = ("avi",)
84
+ self.classes, class_to_idx = find_classes(self.root)
85
+ self.samples = make_dataset(
86
+ self.root,
87
+ class_to_idx,
88
+ extensions,
89
+ )
90
+
91
+ video_paths = [path for (path, _) in self.samples]
92
+ video_clips = VideoClips(
93
+ video_paths,
94
+ frames_per_clip,
95
+ step_between_clips,
96
+ frame_rate,
97
+ _precomputed_metadata,
98
+ num_workers=num_workers,
99
+ _video_width=_video_width,
100
+ _video_height=_video_height,
101
+ _video_min_dimension=_video_min_dimension,
102
+ _audio_samples=_audio_samples,
103
+ output_format=output_format,
104
+ )
105
+ # we bookkeep the full version of video clips because we want to be able
106
+ # to return the metadata of full version rather than the subset version of
107
+ # video clips
108
+ self.full_video_clips = video_clips
109
+ self.fold = fold
110
+ self.train = train
111
+ self.indices = self._select_fold(video_paths, annotation_path, fold, train)
112
+ self.video_clips = video_clips.subset(self.indices)
113
+ self.transform = transform
114
+
115
+ @property
116
+ def metadata(self) -> Dict[str, Any]:
117
+ return self.full_video_clips.metadata
118
+
119
+ def _select_fold(self, video_list: List[str], annotations_dir: str, fold: int, train: bool) -> List[int]:
120
+ target_tag = self.TRAIN_TAG if train else self.TEST_TAG
121
+ split_pattern_name = f"*test_split{fold}.txt"
122
+ split_pattern_path = os.path.join(annotations_dir, split_pattern_name)
123
+ annotation_paths = glob.glob(split_pattern_path)
124
+ selected_files = set()
125
+ for filepath in annotation_paths:
126
+ with open(filepath) as fid:
127
+ lines = fid.readlines()
128
+ for line in lines:
129
+ video_filename, tag_string = line.split()
130
+ tag = int(tag_string)
131
+ if tag == target_tag:
132
+ selected_files.add(video_filename)
133
+
134
+ indices = []
135
+ for video_index, video_path in enumerate(video_list):
136
+ if os.path.basename(video_path) in selected_files:
137
+ indices.append(video_index)
138
+
139
+ return indices
140
+
141
+ def __len__(self) -> int:
142
+ return self.video_clips.num_clips()
143
+
144
+ def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
145
+ video, audio, _, video_idx = self.video_clips.get_clip(idx)
146
+ sample_index = self.indices[video_idx]
147
+ _, class_index = self.samples[sample_index]
148
+
149
+ if self.transform is not None:
150
+ video = self.transform(video)
151
+
152
+ return video, audio, class_index
vllm/lib/python3.10/site-packages/torchvision/datasets/imagenet.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import tempfile
4
+ from contextlib import contextmanager
5
+ from pathlib import Path
6
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
7
+
8
+ import torch
9
+
10
+ from .folder import ImageFolder
11
+ from .utils import check_integrity, extract_archive, verify_str_arg
12
+
13
+ ARCHIVE_META = {
14
+ "train": ("ILSVRC2012_img_train.tar", "1d675b47d978889d74fa0da5fadfb00e"),
15
+ "val": ("ILSVRC2012_img_val.tar", "29b22e2961454d5413ddabcf34fc5622"),
16
+ "devkit": ("ILSVRC2012_devkit_t12.tar.gz", "fa75699e90414af021442c21a62c3abf"),
17
+ }
18
+
19
+ META_FILE = "meta.bin"
20
+
21
+
22
+ class ImageNet(ImageFolder):
23
+ """`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
24
+
25
+ .. note::
26
+ Before using this class, it is required to download ImageNet 2012 dataset from
27
+ `here <https://image-net.org/challenges/LSVRC/2012/2012-downloads.php>`_ and
28
+ place the files ``ILSVRC2012_devkit_t12.tar.gz`` and ``ILSVRC2012_img_train.tar``
29
+ or ``ILSVRC2012_img_val.tar`` based on ``split`` in the root directory.
30
+
31
+ Args:
32
+ root (str or ``pathlib.Path``): Root directory of the ImageNet Dataset.
33
+ split (string, optional): The dataset split, supports ``train``, or ``val``.
34
+ transform (callable, optional): A function/transform that takes in a PIL image
35
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
36
+ target_transform (callable, optional): A function/transform that takes in the
37
+ target and transforms it.
38
+ loader (callable, optional): A function to load an image given its path.
39
+
40
+ Attributes:
41
+ classes (list): List of the class name tuples.
42
+ class_to_idx (dict): Dict with items (class_name, class_index).
43
+ wnids (list): List of the WordNet IDs.
44
+ wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
45
+ imgs (list): List of (image path, class_index) tuples
46
+ targets (list): The class_index value for each image in the dataset
47
+ """
48
+
49
+ def __init__(self, root: Union[str, Path], split: str = "train", **kwargs: Any) -> None:
50
+ root = self.root = os.path.expanduser(root)
51
+ self.split = verify_str_arg(split, "split", ("train", "val"))
52
+
53
+ self.parse_archives()
54
+ wnid_to_classes = load_meta_file(self.root)[0]
55
+
56
+ super().__init__(self.split_folder, **kwargs)
57
+ self.root = root
58
+
59
+ self.wnids = self.classes
60
+ self.wnid_to_idx = self.class_to_idx
61
+ self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
62
+ self.class_to_idx = {cls: idx for idx, clss in enumerate(self.classes) for cls in clss}
63
+
64
+ def parse_archives(self) -> None:
65
+ if not check_integrity(os.path.join(self.root, META_FILE)):
66
+ parse_devkit_archive(self.root)
67
+
68
+ if not os.path.isdir(self.split_folder):
69
+ if self.split == "train":
70
+ parse_train_archive(self.root)
71
+ elif self.split == "val":
72
+ parse_val_archive(self.root)
73
+
74
+ @property
75
+ def split_folder(self) -> str:
76
+ return os.path.join(self.root, self.split)
77
+
78
+ def extra_repr(self) -> str:
79
+ return "Split: {split}".format(**self.__dict__)
80
+
81
+
82
+ def load_meta_file(root: Union[str, Path], file: Optional[str] = None) -> Tuple[Dict[str, str], List[str]]:
83
+ if file is None:
84
+ file = META_FILE
85
+ file = os.path.join(root, file)
86
+
87
+ if check_integrity(file):
88
+ return torch.load(file, weights_only=True)
89
+ else:
90
+ msg = (
91
+ "The meta file {} is not present in the root directory or is corrupted. "
92
+ "This file is automatically created by the ImageNet dataset."
93
+ )
94
+ raise RuntimeError(msg.format(file, root))
95
+
96
+
97
+ def _verify_archive(root: Union[str, Path], file: str, md5: str) -> None:
98
+ if not check_integrity(os.path.join(root, file), md5):
99
+ msg = (
100
+ "The archive {} is not present in the root directory or is corrupted. "
101
+ "You need to download it externally and place it in {}."
102
+ )
103
+ raise RuntimeError(msg.format(file, root))
104
+
105
+
106
+ def parse_devkit_archive(root: Union[str, Path], file: Optional[str] = None) -> None:
107
+ """Parse the devkit archive of the ImageNet2012 classification dataset and save
108
+ the meta information in a binary file.
109
+
110
+ Args:
111
+ root (str or ``pathlib.Path``): Root directory containing the devkit archive
112
+ file (str, optional): Name of devkit archive. Defaults to
113
+ 'ILSVRC2012_devkit_t12.tar.gz'
114
+ """
115
+ import scipy.io as sio
116
+
117
+ def parse_meta_mat(devkit_root: str) -> Tuple[Dict[int, str], Dict[str, Tuple[str, ...]]]:
118
+ metafile = os.path.join(devkit_root, "data", "meta.mat")
119
+ meta = sio.loadmat(metafile, squeeze_me=True)["synsets"]
120
+ nums_children = list(zip(*meta))[4]
121
+ meta = [meta[idx] for idx, num_children in enumerate(nums_children) if num_children == 0]
122
+ idcs, wnids, classes = list(zip(*meta))[:3]
123
+ classes = [tuple(clss.split(", ")) for clss in classes]
124
+ idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
125
+ wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
126
+ return idx_to_wnid, wnid_to_classes
127
+
128
+ def parse_val_groundtruth_txt(devkit_root: str) -> List[int]:
129
+ file = os.path.join(devkit_root, "data", "ILSVRC2012_validation_ground_truth.txt")
130
+ with open(file) as txtfh:
131
+ val_idcs = txtfh.readlines()
132
+ return [int(val_idx) for val_idx in val_idcs]
133
+
134
+ @contextmanager
135
+ def get_tmp_dir() -> Iterator[str]:
136
+ tmp_dir = tempfile.mkdtemp()
137
+ try:
138
+ yield tmp_dir
139
+ finally:
140
+ shutil.rmtree(tmp_dir)
141
+
142
+ archive_meta = ARCHIVE_META["devkit"]
143
+ if file is None:
144
+ file = archive_meta[0]
145
+ md5 = archive_meta[1]
146
+
147
+ _verify_archive(root, file, md5)
148
+
149
+ with get_tmp_dir() as tmp_dir:
150
+ extract_archive(os.path.join(root, file), tmp_dir)
151
+
152
+ devkit_root = os.path.join(tmp_dir, "ILSVRC2012_devkit_t12")
153
+ idx_to_wnid, wnid_to_classes = parse_meta_mat(devkit_root)
154
+ val_idcs = parse_val_groundtruth_txt(devkit_root)
155
+ val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
156
+
157
+ torch.save((wnid_to_classes, val_wnids), os.path.join(root, META_FILE))
158
+
159
+
160
+ def parse_train_archive(root: Union[str, Path], file: Optional[str] = None, folder: str = "train") -> None:
161
+ """Parse the train images archive of the ImageNet2012 classification dataset and
162
+ prepare it for usage with the ImageNet dataset.
163
+
164
+ Args:
165
+ root (str or ``pathlib.Path``): Root directory containing the train images archive
166
+ file (str, optional): Name of train images archive. Defaults to
167
+ 'ILSVRC2012_img_train.tar'
168
+ folder (str, optional): Optional name for train images folder. Defaults to
169
+ 'train'
170
+ """
171
+ archive_meta = ARCHIVE_META["train"]
172
+ if file is None:
173
+ file = archive_meta[0]
174
+ md5 = archive_meta[1]
175
+
176
+ _verify_archive(root, file, md5)
177
+
178
+ train_root = os.path.join(root, folder)
179
+ extract_archive(os.path.join(root, file), train_root)
180
+
181
+ archives = [os.path.join(train_root, archive) for archive in os.listdir(train_root)]
182
+ for archive in archives:
183
+ extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True)
184
+
185
+
186
+ def parse_val_archive(
187
+ root: Union[str, Path], file: Optional[str] = None, wnids: Optional[List[str]] = None, folder: str = "val"
188
+ ) -> None:
189
+ """Parse the validation images archive of the ImageNet2012 classification dataset
190
+ and prepare it for usage with the ImageNet dataset.
191
+
192
+ Args:
193
+ root (str or ``pathlib.Path``): Root directory containing the validation images archive
194
+ file (str, optional): Name of validation images archive. Defaults to
195
+ 'ILSVRC2012_img_val.tar'
196
+ wnids (list, optional): List of WordNet IDs of the validation images. If None
197
+ is given, the IDs are loaded from the meta file in the root directory
198
+ folder (str, optional): Optional name for validation images folder. Defaults to
199
+ 'val'
200
+ """
201
+ archive_meta = ARCHIVE_META["val"]
202
+ if file is None:
203
+ file = archive_meta[0]
204
+ md5 = archive_meta[1]
205
+ if wnids is None:
206
+ wnids = load_meta_file(root)[1]
207
+
208
+ _verify_archive(root, file, md5)
209
+
210
+ val_root = os.path.join(root, folder)
211
+ extract_archive(os.path.join(root, file), val_root)
212
+
213
+ images = sorted(os.path.join(val_root, image) for image in os.listdir(val_root))
214
+
215
+ for wnid in set(wnids):
216
+ os.mkdir(os.path.join(val_root, wnid))
217
+
218
+ for wnid, img_file in zip(wnids, images):
219
+ shutil.move(img_file, os.path.join(val_root, wnid, os.path.basename(img_file)))
vllm/lib/python3.10/site-packages/torchvision/datasets/imagenette.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Any, Callable, Optional, Tuple, Union
3
+
4
+ from PIL import Image
5
+
6
+ from .folder import find_classes, make_dataset
7
+ from .utils import download_and_extract_archive, verify_str_arg
8
+ from .vision import VisionDataset
9
+
10
+
11
+ class Imagenette(VisionDataset):
12
+ """`Imagenette <https://github.com/fastai/imagenette#imagenette-1>`_ image classification dataset.
13
+
14
+ Args:
15
+ root (str or ``pathlib.Path``): Root directory of the Imagenette dataset.
16
+ split (string, optional): The dataset split. Supports ``"train"`` (default), and ``"val"``.
17
+ size (string, optional): The image size. Supports ``"full"`` (default), ``"320px"``, and ``"160px"``.
18
+ download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
19
+ downloaded archives are not downloaded again.
20
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
21
+ version, e.g. ``transforms.RandomCrop``.
22
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
23
+
24
+ Attributes:
25
+ classes (list): List of the class name tuples.
26
+ class_to_idx (dict): Dict with items (class name, class index).
27
+ wnids (list): List of the WordNet IDs.
28
+ wnid_to_idx (dict): Dict with items (WordNet ID, class index).
29
+ """
30
+
31
+ _ARCHIVES = {
32
+ "full": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz", "fe2fc210e6bb7c5664d602c3cd71e612"),
33
+ "320px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz", "3df6f0d01a2c9592104656642f5e78a3"),
34
+ "160px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-160.tgz", "e793b78cc4c9e9a4ccc0c1155377a412"),
35
+ }
36
+ _WNID_TO_CLASS = {
37
+ "n01440764": ("tench", "Tinca tinca"),
38
+ "n02102040": ("English springer", "English springer spaniel"),
39
+ "n02979186": ("cassette player",),
40
+ "n03000684": ("chain saw", "chainsaw"),
41
+ "n03028079": ("church", "church building"),
42
+ "n03394916": ("French horn", "horn"),
43
+ "n03417042": ("garbage truck", "dustcart"),
44
+ "n03425413": ("gas pump", "gasoline pump", "petrol pump", "island dispenser"),
45
+ "n03445777": ("golf ball",),
46
+ "n03888257": ("parachute", "chute"),
47
+ }
48
+
49
+ def __init__(
50
+ self,
51
+ root: Union[str, Path],
52
+ split: str = "train",
53
+ size: str = "full",
54
+ download=False,
55
+ transform: Optional[Callable] = None,
56
+ target_transform: Optional[Callable] = None,
57
+ ) -> None:
58
+ super().__init__(root, transform=transform, target_transform=target_transform)
59
+
60
+ self._split = verify_str_arg(split, "split", ["train", "val"])
61
+ self._size = verify_str_arg(size, "size", ["full", "320px", "160px"])
62
+
63
+ self._url, self._md5 = self._ARCHIVES[self._size]
64
+ self._size_root = Path(self.root) / Path(self._url).stem
65
+ self._image_root = str(self._size_root / self._split)
66
+
67
+ if download:
68
+ self._download()
69
+ elif not self._check_exists():
70
+ raise RuntimeError("Dataset not found. You can use download=True to download it.")
71
+
72
+ self.wnids, self.wnid_to_idx = find_classes(self._image_root)
73
+ self.classes = [self._WNID_TO_CLASS[wnid] for wnid in self.wnids]
74
+ self.class_to_idx = {
75
+ class_name: idx for wnid, idx in self.wnid_to_idx.items() for class_name in self._WNID_TO_CLASS[wnid]
76
+ }
77
+ self._samples = make_dataset(self._image_root, self.wnid_to_idx, extensions=".jpeg")
78
+
79
+ def _check_exists(self) -> bool:
80
+ return self._size_root.exists()
81
+
82
+ def _download(self):
83
+ if self._check_exists():
84
+ raise RuntimeError(
85
+ f"The directory {self._size_root} already exists. "
86
+ f"If you want to re-download or re-extract the images, delete the directory."
87
+ )
88
+
89
+ download_and_extract_archive(self._url, self.root, md5=self._md5)
90
+
91
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
92
+ path, label = self._samples[idx]
93
+ image = Image.open(path).convert("RGB")
94
+
95
+ if self.transform is not None:
96
+ image = self.transform(image)
97
+
98
+ if self.target_transform is not None:
99
+ label = self.target_transform(label)
100
+
101
+ return image, label
102
+
103
+ def __len__(self) -> int:
104
+ return len(self._samples)
vllm/lib/python3.10/site-packages/torchvision/datasets/inaturalist.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ from pathlib import Path
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5
+
6
+ from PIL import Image
7
+
8
+ from .utils import download_and_extract_archive, verify_str_arg
9
+ from .vision import VisionDataset
10
+
11
+ CATEGORIES_2021 = ["kingdom", "phylum", "class", "order", "family", "genus"]
12
+
13
+ DATASET_URLS = {
14
+ "2017": "https://ml-inat-competition-datasets.s3.amazonaws.com/2017/train_val_images.tar.gz",
15
+ "2018": "https://ml-inat-competition-datasets.s3.amazonaws.com/2018/train_val2018.tar.gz",
16
+ "2019": "https://ml-inat-competition-datasets.s3.amazonaws.com/2019/train_val2019.tar.gz",
17
+ "2021_train": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train.tar.gz",
18
+ "2021_train_mini": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train_mini.tar.gz",
19
+ "2021_valid": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/val.tar.gz",
20
+ }
21
+
22
+ DATASET_MD5 = {
23
+ "2017": "7c784ea5e424efaec655bd392f87301f",
24
+ "2018": "b1c6952ce38f31868cc50ea72d066cc3",
25
+ "2019": "c60a6e2962c9b8ccbd458d12c8582644",
26
+ "2021_train": "e0526d53c7f7b2e3167b2b43bb2690ed",
27
+ "2021_train_mini": "db6ed8330e634445efc8fec83ae81442",
28
+ "2021_valid": "f6f6e0e242e3d4c9569ba56400938afc",
29
+ }
30
+
31
+
32
+ class INaturalist(VisionDataset):
33
+ """`iNaturalist <https://github.com/visipedia/inat_comp>`_ Dataset.
34
+
35
+ Args:
36
+ root (str or ``pathlib.Path``): Root directory of dataset where the image files are stored.
37
+ This class does not require/use annotation files.
38
+ version (string, optional): Which version of the dataset to download/use. One of
39
+ '2017', '2018', '2019', '2021_train', '2021_train_mini', '2021_valid'.
40
+ Default: `2021_train`.
41
+ target_type (string or list, optional): Type of target to use, for 2021 versions, one of:
42
+
43
+ - ``full``: the full category (species)
44
+ - ``kingdom``: e.g. "Animalia"
45
+ - ``phylum``: e.g. "Arthropoda"
46
+ - ``class``: e.g. "Insecta"
47
+ - ``order``: e.g. "Coleoptera"
48
+ - ``family``: e.g. "Cleridae"
49
+ - ``genus``: e.g. "Trichodes"
50
+
51
+ for 2017-2019 versions, one of:
52
+
53
+ - ``full``: the full (numeric) category
54
+ - ``super``: the super category, e.g. "Amphibians"
55
+
56
+ Can also be a list to output a tuple with all specified target types.
57
+ Defaults to ``full``.
58
+ transform (callable, optional): A function/transform that takes in a PIL image
59
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
60
+ target_transform (callable, optional): A function/transform that takes in the
61
+ target and transforms it.
62
+ download (bool, optional): If true, downloads the dataset from the internet and
63
+ puts it in root directory. If dataset is already downloaded, it is not
64
+ downloaded again.
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ root: Union[str, Path],
70
+ version: str = "2021_train",
71
+ target_type: Union[List[str], str] = "full",
72
+ transform: Optional[Callable] = None,
73
+ target_transform: Optional[Callable] = None,
74
+ download: bool = False,
75
+ ) -> None:
76
+ self.version = verify_str_arg(version, "version", DATASET_URLS.keys())
77
+
78
+ super().__init__(os.path.join(root, version), transform=transform, target_transform=target_transform)
79
+
80
+ os.makedirs(root, exist_ok=True)
81
+ if download:
82
+ self.download()
83
+
84
+ if not self._check_integrity():
85
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
86
+
87
+ self.all_categories: List[str] = []
88
+
89
+ # map: category type -> name of category -> index
90
+ self.categories_index: Dict[str, Dict[str, int]] = {}
91
+
92
+ # list indexed by category id, containing mapping from category type -> index
93
+ self.categories_map: List[Dict[str, int]] = []
94
+
95
+ if not isinstance(target_type, list):
96
+ target_type = [target_type]
97
+ if self.version[:4] == "2021":
98
+ self.target_type = [verify_str_arg(t, "target_type", ("full", *CATEGORIES_2021)) for t in target_type]
99
+ self._init_2021()
100
+ else:
101
+ self.target_type = [verify_str_arg(t, "target_type", ("full", "super")) for t in target_type]
102
+ self._init_pre2021()
103
+
104
+ # index of all files: (full category id, filename)
105
+ self.index: List[Tuple[int, str]] = []
106
+
107
+ for dir_index, dir_name in enumerate(self.all_categories):
108
+ files = os.listdir(os.path.join(self.root, dir_name))
109
+ for fname in files:
110
+ self.index.append((dir_index, fname))
111
+
112
+ def _init_2021(self) -> None:
113
+ """Initialize based on 2021 layout"""
114
+
115
+ self.all_categories = sorted(os.listdir(self.root))
116
+
117
+ # map: category type -> name of category -> index
118
+ self.categories_index = {k: {} for k in CATEGORIES_2021}
119
+
120
+ for dir_index, dir_name in enumerate(self.all_categories):
121
+ pieces = dir_name.split("_")
122
+ if len(pieces) != 8:
123
+ raise RuntimeError(f"Unexpected category name {dir_name}, wrong number of pieces")
124
+ if pieces[0] != f"{dir_index:05d}":
125
+ raise RuntimeError(f"Unexpected category id {pieces[0]}, expecting {dir_index:05d}")
126
+ cat_map = {}
127
+ for cat, name in zip(CATEGORIES_2021, pieces[1:7]):
128
+ if name in self.categories_index[cat]:
129
+ cat_id = self.categories_index[cat][name]
130
+ else:
131
+ cat_id = len(self.categories_index[cat])
132
+ self.categories_index[cat][name] = cat_id
133
+ cat_map[cat] = cat_id
134
+ self.categories_map.append(cat_map)
135
+
136
+ def _init_pre2021(self) -> None:
137
+ """Initialize based on 2017-2019 layout"""
138
+
139
+ # map: category type -> name of category -> index
140
+ self.categories_index = {"super": {}}
141
+
142
+ cat_index = 0
143
+ super_categories = sorted(os.listdir(self.root))
144
+ for sindex, scat in enumerate(super_categories):
145
+ self.categories_index["super"][scat] = sindex
146
+ subcategories = sorted(os.listdir(os.path.join(self.root, scat)))
147
+ for subcat in subcategories:
148
+ if self.version == "2017":
149
+ # this version does not use ids as directory names
150
+ subcat_i = cat_index
151
+ cat_index += 1
152
+ else:
153
+ try:
154
+ subcat_i = int(subcat)
155
+ except ValueError:
156
+ raise RuntimeError(f"Unexpected non-numeric dir name: {subcat}")
157
+ if subcat_i >= len(self.categories_map):
158
+ old_len = len(self.categories_map)
159
+ self.categories_map.extend([{}] * (subcat_i - old_len + 1))
160
+ self.all_categories.extend([""] * (subcat_i - old_len + 1))
161
+ if self.categories_map[subcat_i]:
162
+ raise RuntimeError(f"Duplicate category {subcat}")
163
+ self.categories_map[subcat_i] = {"super": sindex}
164
+ self.all_categories[subcat_i] = os.path.join(scat, subcat)
165
+
166
+ # validate the dictionary
167
+ for cindex, c in enumerate(self.categories_map):
168
+ if not c:
169
+ raise RuntimeError(f"Missing category {cindex}")
170
+
171
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
172
+ """
173
+ Args:
174
+ index (int): Index
175
+
176
+ Returns:
177
+ tuple: (image, target) where the type of target specified by target_type.
178
+ """
179
+
180
+ cat_id, fname = self.index[index]
181
+ img = Image.open(os.path.join(self.root, self.all_categories[cat_id], fname))
182
+
183
+ target: Any = []
184
+ for t in self.target_type:
185
+ if t == "full":
186
+ target.append(cat_id)
187
+ else:
188
+ target.append(self.categories_map[cat_id][t])
189
+ target = tuple(target) if len(target) > 1 else target[0]
190
+
191
+ if self.transform is not None:
192
+ img = self.transform(img)
193
+
194
+ if self.target_transform is not None:
195
+ target = self.target_transform(target)
196
+
197
+ return img, target
198
+
199
+ def __len__(self) -> int:
200
+ return len(self.index)
201
+
202
+ def category_name(self, category_type: str, category_id: int) -> str:
203
+ """
204
+ Args:
205
+ category_type(str): one of "full", "kingdom", "phylum", "class", "order", "family", "genus" or "super"
206
+ category_id(int): an index (class id) from this category
207
+
208
+ Returns:
209
+ the name of the category
210
+ """
211
+ if category_type == "full":
212
+ return self.all_categories[category_id]
213
+ else:
214
+ if category_type not in self.categories_index:
215
+ raise ValueError(f"Invalid category type '{category_type}'")
216
+ else:
217
+ for name, id in self.categories_index[category_type].items():
218
+ if id == category_id:
219
+ return name
220
+ raise ValueError(f"Invalid category id {category_id} for {category_type}")
221
+
222
+ def _check_integrity(self) -> bool:
223
+ return os.path.exists(self.root) and len(os.listdir(self.root)) > 0
224
+
225
+ def download(self) -> None:
226
+ if self._check_integrity():
227
+ raise RuntimeError(
228
+ f"The directory {self.root} already exists. "
229
+ f"If you want to re-download or re-extract the images, delete the directory."
230
+ )
231
+
232
+ base_root = os.path.dirname(self.root)
233
+
234
+ download_and_extract_archive(
235
+ DATASET_URLS[self.version], base_root, filename=f"{self.version}.tgz", md5=DATASET_MD5[self.version]
236
+ )
237
+
238
+ orig_dir_name = os.path.join(base_root, os.path.basename(DATASET_URLS[self.version]).rstrip(".tar.gz"))
239
+ if not os.path.exists(orig_dir_name):
240
+ raise RuntimeError(f"Unable to find downloaded files at {orig_dir_name}")
241
+ os.rename(orig_dir_name, self.root)
242
+ print(f"Dataset version '{self.version}' has been downloaded and prepared for use")
vllm/lib/python3.10/site-packages/torchvision/datasets/kinetics.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ import time
4
+ import urllib
5
+ from functools import partial
6
+ from multiprocessing import Pool
7
+ from os import path
8
+ from pathlib import Path
9
+ from typing import Any, Callable, Dict, Optional, Tuple, Union
10
+
11
+ from torch import Tensor
12
+
13
+ from .folder import find_classes, make_dataset
14
+ from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
15
+ from .video_utils import VideoClips
16
+ from .vision import VisionDataset
17
+
18
+
19
+ def _dl_wrap(tarpath: Union[str, Path], videopath: Union[str, Path], line: str) -> None:
20
+ download_and_extract_archive(line, tarpath, videopath)
21
+
22
+
23
+ class Kinetics(VisionDataset):
24
+ """`Generic Kinetics <https://www.deepmind.com/open-source/kinetics>`_
25
+ dataset.
26
+
27
+ Kinetics-400/600/700 are action recognition video datasets.
28
+ This dataset consider every video as a collection of video clips of fixed size, specified
29
+ by ``frames_per_clip``, where the step in frames between each clip is given by
30
+ ``step_between_clips``.
31
+
32
+ To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
33
+ and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
34
+ elements will come from video 1, and the next three elements from video 2.
35
+ Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
36
+ frames in a video might be present.
37
+
38
+ Args:
39
+ root (str or ``pathlib.Path``): Root directory of the Kinetics Dataset.
40
+ Directory should be structured as follows:
41
+ .. code::
42
+
43
+ root/
44
+ ├── split
45
+ │ ├── class1
46
+ │ │ ├── vid1.mp4
47
+ │ │ ├── vid2.mp4
48
+ │ │ ├── vid3.mp4
49
+ │ │ ├── ...
50
+ │ ├── class2
51
+ │ │ ├── vidx.mp4
52
+ │ │ └── ...
53
+
54
+ Note: split is appended automatically using the split argument.
55
+ frames_per_clip (int): number of frames in a clip
56
+ num_classes (int): select between Kinetics-400 (default), Kinetics-600, and Kinetics-700
57
+ split (str): split of the dataset to consider; supports ``"train"`` (default) ``"val"`` ``"test"``
58
+ frame_rate (float): If omitted, interpolate different frame rate for each clip.
59
+ step_between_clips (int): number of frames between each clip
60
+ transform (callable, optional): A function/transform that takes in a TxHxWxC video
61
+ and returns a transformed version.
62
+ download (bool): Download the official version of the dataset to root folder.
63
+ num_workers (int): Use multiple workers for VideoClips creation
64
+ num_download_workers (int): Use multiprocessing in order to speed up download.
65
+ output_format (str, optional): The format of the output video tensors (before transforms).
66
+ Can be either "THWC" or "TCHW" (default).
67
+ Note that in most other utils and datasets, the default is actually "THWC".
68
+
69
+ Returns:
70
+ tuple: A 3-tuple with the following entries:
71
+
72
+ - video (Tensor[T, C, H, W] or Tensor[T, H, W, C]): the `T` video frames in torch.uint8 tensor
73
+ - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
74
+ and `L` is the number of points in torch.float tensor
75
+ - label (int): class of the video clip
76
+
77
+ Raises:
78
+ RuntimeError: If ``download is True`` and the video archives are already extracted.
79
+ """
80
+
81
+ _TAR_URLS = {
82
+ "400": "https://s3.amazonaws.com/kinetics/400/{split}/k400_{split}_path.txt",
83
+ "600": "https://s3.amazonaws.com/kinetics/600/{split}/k600_{split}_path.txt",
84
+ "700": "https://s3.amazonaws.com/kinetics/700_2020/{split}/k700_2020_{split}_path.txt",
85
+ }
86
+ _ANNOTATION_URLS = {
87
+ "400": "https://s3.amazonaws.com/kinetics/400/annotations/{split}.csv",
88
+ "600": "https://s3.amazonaws.com/kinetics/600/annotations/{split}.csv",
89
+ "700": "https://s3.amazonaws.com/kinetics/700_2020/annotations/{split}.csv",
90
+ }
91
+
92
+ def __init__(
93
+ self,
94
+ root: Union[str, Path],
95
+ frames_per_clip: int,
96
+ num_classes: str = "400",
97
+ split: str = "train",
98
+ frame_rate: Optional[int] = None,
99
+ step_between_clips: int = 1,
100
+ transform: Optional[Callable] = None,
101
+ extensions: Tuple[str, ...] = ("avi", "mp4"),
102
+ download: bool = False,
103
+ num_download_workers: int = 1,
104
+ num_workers: int = 1,
105
+ _precomputed_metadata: Optional[Dict[str, Any]] = None,
106
+ _video_width: int = 0,
107
+ _video_height: int = 0,
108
+ _video_min_dimension: int = 0,
109
+ _audio_samples: int = 0,
110
+ _audio_channels: int = 0,
111
+ _legacy: bool = False,
112
+ output_format: str = "TCHW",
113
+ ) -> None:
114
+
115
+ # TODO: support test
116
+ self.num_classes = verify_str_arg(num_classes, arg="num_classes", valid_values=["400", "600", "700"])
117
+ self.extensions = extensions
118
+ self.num_download_workers = num_download_workers
119
+
120
+ self.root = root
121
+ self._legacy = _legacy
122
+
123
+ if _legacy:
124
+ print("Using legacy structure")
125
+ self.split_folder = root
126
+ self.split = "unknown"
127
+ output_format = "THWC"
128
+ if download:
129
+ raise ValueError("Cannot download the videos using legacy_structure.")
130
+ else:
131
+ self.split_folder = path.join(root, split)
132
+ self.split = verify_str_arg(split, arg="split", valid_values=["train", "val", "test"])
133
+
134
+ if download:
135
+ self.download_and_process_videos()
136
+
137
+ super().__init__(self.root)
138
+
139
+ self.classes, class_to_idx = find_classes(self.split_folder)
140
+ self.samples = make_dataset(self.split_folder, class_to_idx, extensions, is_valid_file=None)
141
+ video_list = [x[0] for x in self.samples]
142
+ self.video_clips = VideoClips(
143
+ video_list,
144
+ frames_per_clip,
145
+ step_between_clips,
146
+ frame_rate,
147
+ _precomputed_metadata,
148
+ num_workers=num_workers,
149
+ _video_width=_video_width,
150
+ _video_height=_video_height,
151
+ _video_min_dimension=_video_min_dimension,
152
+ _audio_samples=_audio_samples,
153
+ _audio_channels=_audio_channels,
154
+ output_format=output_format,
155
+ )
156
+ self.transform = transform
157
+
158
+ def download_and_process_videos(self) -> None:
159
+ """Downloads all the videos to the _root_ folder in the expected format."""
160
+ tic = time.time()
161
+ self._download_videos()
162
+ toc = time.time()
163
+ print("Elapsed time for downloading in mins ", (toc - tic) / 60)
164
+ self._make_ds_structure()
165
+ toc2 = time.time()
166
+ print("Elapsed time for processing in mins ", (toc2 - toc) / 60)
167
+ print("Elapsed time overall in mins ", (toc2 - tic) / 60)
168
+
169
+ def _download_videos(self) -> None:
170
+ """download tarballs containing the video to "tars" folder and extract them into the _split_ folder where
171
+ split is one of the official dataset splits.
172
+
173
+ Raises:
174
+ RuntimeError: if download folder exists, break to prevent downloading entire dataset again.
175
+ """
176
+ if path.exists(self.split_folder):
177
+ raise RuntimeError(
178
+ f"The directory {self.split_folder} already exists. "
179
+ f"If you want to re-download or re-extract the images, delete the directory."
180
+ )
181
+ tar_path = path.join(self.root, "tars")
182
+ file_list_path = path.join(self.root, "files")
183
+
184
+ split_url = self._TAR_URLS[self.num_classes].format(split=self.split)
185
+ split_url_filepath = path.join(file_list_path, path.basename(split_url))
186
+ if not check_integrity(split_url_filepath):
187
+ download_url(split_url, file_list_path)
188
+ with open(split_url_filepath) as file:
189
+ list_video_urls = [urllib.parse.quote(line, safe="/,:") for line in file.read().splitlines()]
190
+
191
+ if self.num_download_workers == 1:
192
+ for line in list_video_urls:
193
+ download_and_extract_archive(line, tar_path, self.split_folder)
194
+ else:
195
+ part = partial(_dl_wrap, tar_path, self.split_folder)
196
+ poolproc = Pool(self.num_download_workers)
197
+ poolproc.map(part, list_video_urls)
198
+
199
+ def _make_ds_structure(self) -> None:
200
+ """move videos from
201
+ split_folder/
202
+ ├── clip1.avi
203
+ ├── clip2.avi
204
+
205
+ to the correct format as described below:
206
+ split_folder/
207
+ ├── class1
208
+ │ ├── clip1.avi
209
+
210
+ """
211
+ annotation_path = path.join(self.root, "annotations")
212
+ if not check_integrity(path.join(annotation_path, f"{self.split}.csv")):
213
+ download_url(self._ANNOTATION_URLS[self.num_classes].format(split=self.split), annotation_path)
214
+ annotations = path.join(annotation_path, f"{self.split}.csv")
215
+
216
+ file_fmtstr = "{ytid}_{start:06}_{end:06}.mp4"
217
+ with open(annotations) as csvfile:
218
+ reader = csv.DictReader(csvfile)
219
+ for row in reader:
220
+ f = file_fmtstr.format(
221
+ ytid=row["youtube_id"],
222
+ start=int(row["time_start"]),
223
+ end=int(row["time_end"]),
224
+ )
225
+ label = row["label"].replace(" ", "_").replace("'", "").replace("(", "").replace(")", "")
226
+ os.makedirs(path.join(self.split_folder, label), exist_ok=True)
227
+ downloaded_file = path.join(self.split_folder, f)
228
+ if path.isfile(downloaded_file):
229
+ os.replace(
230
+ downloaded_file,
231
+ path.join(self.split_folder, label, f),
232
+ )
233
+
234
+ @property
235
+ def metadata(self) -> Dict[str, Any]:
236
+ return self.video_clips.metadata
237
+
238
+ def __len__(self) -> int:
239
+ return self.video_clips.num_clips()
240
+
241
+ def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
242
+ video, audio, info, video_idx = self.video_clips.get_clip(idx)
243
+ label = self.samples[video_idx][1]
244
+
245
+ if self.transform is not None:
246
+ video = self.transform(video)
247
+
248
+ return video, audio, label
vllm/lib/python3.10/site-packages/torchvision/datasets/kitti.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Any, Callable, List, Optional, Tuple, Union
5
+
6
+ from PIL import Image
7
+
8
+ from .utils import download_and_extract_archive
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class Kitti(VisionDataset):
13
+ """`KITTI <http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark>`_ Dataset.
14
+
15
+ It corresponds to the "left color images of object" dataset, for object detection.
16
+
17
+ Args:
18
+ root (str or ``pathlib.Path``): Root directory where images are downloaded to.
19
+ Expects the following folder structure if download=False:
20
+
21
+ .. code::
22
+
23
+ <root>
24
+ └── Kitti
25
+ └─ raw
26
+ ├── training
27
+ | ├── image_2
28
+ | └── label_2
29
+ └── testing
30
+ └── image_2
31
+ train (bool, optional): Use ``train`` split if true, else ``test`` split.
32
+ Defaults to ``train``.
33
+ transform (callable, optional): A function/transform that takes in a PIL image
34
+ and returns a transformed version. E.g, ``transforms.PILToTensor``
35
+ target_transform (callable, optional): A function/transform that takes in the
36
+ target and transforms it.
37
+ transforms (callable, optional): A function/transform that takes input sample
38
+ and its target as entry and returns a transformed version.
39
+ download (bool, optional): If true, downloads the dataset from the internet and
40
+ puts it in root directory. If dataset is already downloaded, it is not
41
+ downloaded again.
42
+
43
+ """
44
+
45
+ data_url = "https://s3.eu-central-1.amazonaws.com/avg-kitti/"
46
+ resources = [
47
+ "data_object_image_2.zip",
48
+ "data_object_label_2.zip",
49
+ ]
50
+ image_dir_name = "image_2"
51
+ labels_dir_name = "label_2"
52
+
53
+ def __init__(
54
+ self,
55
+ root: Union[str, Path],
56
+ train: bool = True,
57
+ transform: Optional[Callable] = None,
58
+ target_transform: Optional[Callable] = None,
59
+ transforms: Optional[Callable] = None,
60
+ download: bool = False,
61
+ ):
62
+ super().__init__(
63
+ root,
64
+ transform=transform,
65
+ target_transform=target_transform,
66
+ transforms=transforms,
67
+ )
68
+ self.images = []
69
+ self.targets = []
70
+ self.train = train
71
+ self._location = "training" if self.train else "testing"
72
+
73
+ if download:
74
+ self.download()
75
+ if not self._check_exists():
76
+ raise RuntimeError("Dataset not found. You may use download=True to download it.")
77
+
78
+ image_dir = os.path.join(self._raw_folder, self._location, self.image_dir_name)
79
+ if self.train:
80
+ labels_dir = os.path.join(self._raw_folder, self._location, self.labels_dir_name)
81
+ for img_file in os.listdir(image_dir):
82
+ self.images.append(os.path.join(image_dir, img_file))
83
+ if self.train:
84
+ self.targets.append(os.path.join(labels_dir, f"{img_file.split('.')[0]}.txt"))
85
+
86
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
87
+ """Get item at a given index.
88
+
89
+ Args:
90
+ index (int): Index
91
+ Returns:
92
+ tuple: (image, target), where
93
+ target is a list of dictionaries with the following keys:
94
+
95
+ - type: str
96
+ - truncated: float
97
+ - occluded: int
98
+ - alpha: float
99
+ - bbox: float[4]
100
+ - dimensions: float[3]
101
+ - locations: float[3]
102
+ - rotation_y: float
103
+
104
+ """
105
+ image = Image.open(self.images[index])
106
+ target = self._parse_target(index) if self.train else None
107
+ if self.transforms:
108
+ image, target = self.transforms(image, target)
109
+ return image, target
110
+
111
+ def _parse_target(self, index: int) -> List:
112
+ target = []
113
+ with open(self.targets[index]) as inp:
114
+ content = csv.reader(inp, delimiter=" ")
115
+ for line in content:
116
+ target.append(
117
+ {
118
+ "type": line[0],
119
+ "truncated": float(line[1]),
120
+ "occluded": int(line[2]),
121
+ "alpha": float(line[3]),
122
+ "bbox": [float(x) for x in line[4:8]],
123
+ "dimensions": [float(x) for x in line[8:11]],
124
+ "location": [float(x) for x in line[11:14]],
125
+ "rotation_y": float(line[14]),
126
+ }
127
+ )
128
+ return target
129
+
130
+ def __len__(self) -> int:
131
+ return len(self.images)
132
+
133
+ @property
134
+ def _raw_folder(self) -> str:
135
+ return os.path.join(self.root, self.__class__.__name__, "raw")
136
+
137
+ def _check_exists(self) -> bool:
138
+ """Check if the data directory exists."""
139
+ folders = [self.image_dir_name]
140
+ if self.train:
141
+ folders.append(self.labels_dir_name)
142
+ return all(os.path.isdir(os.path.join(self._raw_folder, self._location, fname)) for fname in folders)
143
+
144
+ def download(self) -> None:
145
+ """Download the KITTI data if it doesn't exist already."""
146
+
147
+ if self._check_exists():
148
+ return
149
+
150
+ os.makedirs(self._raw_folder, exist_ok=True)
151
+
152
+ # download files
153
+ for fname in self.resources:
154
+ download_and_extract_archive(
155
+ url=f"{self.data_url}{fname}",
156
+ download_root=self._raw_folder,
157
+ filename=fname,
158
+ )
vllm/lib/python3.10/site-packages/torchvision/datasets/lfw.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
4
+
5
+ from PIL import Image
6
+
7
+ from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
8
+ from .vision import VisionDataset
9
+
10
+
11
+ class _LFW(VisionDataset):
12
+
13
+ base_folder = "lfw-py"
14
+ download_url_prefix = "http://vis-www.cs.umass.edu/lfw/"
15
+
16
+ file_dict = {
17
+ "original": ("lfw", "lfw.tgz", "a17d05bd522c52d84eca14327a23d494"),
18
+ "funneled": ("lfw_funneled", "lfw-funneled.tgz", "1b42dfed7d15c9b2dd63d5e5840c86ad"),
19
+ "deepfunneled": ("lfw-deepfunneled", "lfw-deepfunneled.tgz", "68331da3eb755a505a502b5aacb3c201"),
20
+ }
21
+ checksums = {
22
+ "pairs.txt": "9f1ba174e4e1c508ff7cdf10ac338a7d",
23
+ "pairsDevTest.txt": "5132f7440eb68cf58910c8a45a2ac10b",
24
+ "pairsDevTrain.txt": "4f27cbf15b2da4a85c1907eb4181ad21",
25
+ "people.txt": "450f0863dd89e85e73936a6d71a3474b",
26
+ "peopleDevTest.txt": "e4bf5be0a43b5dcd9dc5ccfcb8fb19c5",
27
+ "peopleDevTrain.txt": "54eaac34beb6d042ed3a7d883e247a21",
28
+ "lfw-names.txt": "a6d0a479bd074669f656265a6e693f6d",
29
+ }
30
+ annot_file = {"10fold": "", "train": "DevTrain", "test": "DevTest"}
31
+ names = "lfw-names.txt"
32
+
33
+ def __init__(
34
+ self,
35
+ root: Union[str, Path],
36
+ split: str,
37
+ image_set: str,
38
+ view: str,
39
+ transform: Optional[Callable] = None,
40
+ target_transform: Optional[Callable] = None,
41
+ download: bool = False,
42
+ ) -> None:
43
+ super().__init__(os.path.join(root, self.base_folder), transform=transform, target_transform=target_transform)
44
+
45
+ self.image_set = verify_str_arg(image_set.lower(), "image_set", self.file_dict.keys())
46
+ images_dir, self.filename, self.md5 = self.file_dict[self.image_set]
47
+
48
+ self.view = verify_str_arg(view.lower(), "view", ["people", "pairs"])
49
+ self.split = verify_str_arg(split.lower(), "split", ["10fold", "train", "test"])
50
+ self.labels_file = f"{self.view}{self.annot_file[self.split]}.txt"
51
+ self.data: List[Any] = []
52
+
53
+ if download:
54
+ self.download()
55
+
56
+ if not self._check_integrity():
57
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
58
+
59
+ self.images_dir = os.path.join(self.root, images_dir)
60
+
61
+ def _loader(self, path: str) -> Image.Image:
62
+ with open(path, "rb") as f:
63
+ img = Image.open(f)
64
+ return img.convert("RGB")
65
+
66
+ def _check_integrity(self) -> bool:
67
+ st1 = check_integrity(os.path.join(self.root, self.filename), self.md5)
68
+ st2 = check_integrity(os.path.join(self.root, self.labels_file), self.checksums[self.labels_file])
69
+ if not st1 or not st2:
70
+ return False
71
+ if self.view == "people":
72
+ return check_integrity(os.path.join(self.root, self.names), self.checksums[self.names])
73
+ return True
74
+
75
+ def download(self) -> None:
76
+ if self._check_integrity():
77
+ print("Files already downloaded and verified")
78
+ return
79
+ url = f"{self.download_url_prefix}{self.filename}"
80
+ download_and_extract_archive(url, self.root, filename=self.filename, md5=self.md5)
81
+ download_url(f"{self.download_url_prefix}{self.labels_file}", self.root)
82
+ if self.view == "people":
83
+ download_url(f"{self.download_url_prefix}{self.names}", self.root)
84
+
85
+ def _get_path(self, identity: str, no: Union[int, str]) -> str:
86
+ return os.path.join(self.images_dir, identity, f"{identity}_{int(no):04d}.jpg")
87
+
88
+ def extra_repr(self) -> str:
89
+ return f"Alignment: {self.image_set}\nSplit: {self.split}"
90
+
91
+ def __len__(self) -> int:
92
+ return len(self.data)
93
+
94
+
95
+ class LFWPeople(_LFW):
96
+ """`LFW <http://vis-www.cs.umass.edu/lfw/>`_ Dataset.
97
+
98
+ Args:
99
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
100
+ ``lfw-py`` exists or will be saved to if download is set to True.
101
+ split (string, optional): The image split to use. Can be one of ``train``, ``test``,
102
+ ``10fold`` (default).
103
+ image_set (str, optional): Type of image funneling to use, ``original``, ``funneled`` or
104
+ ``deepfunneled``. Defaults to ``funneled``.
105
+ transform (callable, optional): A function/transform that takes in a PIL image
106
+ and returns a transformed version. E.g, ``transforms.RandomRotation``
107
+ target_transform (callable, optional): A function/transform that takes in the
108
+ target and transforms it.
109
+ download (bool, optional): If true, downloads the dataset from the internet and
110
+ puts it in root directory. If dataset is already downloaded, it is not
111
+ downloaded again.
112
+
113
+ """
114
+
115
+ def __init__(
116
+ self,
117
+ root: str,
118
+ split: str = "10fold",
119
+ image_set: str = "funneled",
120
+ transform: Optional[Callable] = None,
121
+ target_transform: Optional[Callable] = None,
122
+ download: bool = False,
123
+ ) -> None:
124
+ super().__init__(root, split, image_set, "people", transform, target_transform, download)
125
+
126
+ self.class_to_idx = self._get_classes()
127
+ self.data, self.targets = self._get_people()
128
+
129
+ def _get_people(self) -> Tuple[List[str], List[int]]:
130
+ data, targets = [], []
131
+ with open(os.path.join(self.root, self.labels_file)) as f:
132
+ lines = f.readlines()
133
+ n_folds, s = (int(lines[0]), 1) if self.split == "10fold" else (1, 0)
134
+
135
+ for fold in range(n_folds):
136
+ n_lines = int(lines[s])
137
+ people = [line.strip().split("\t") for line in lines[s + 1 : s + n_lines + 1]]
138
+ s += n_lines + 1
139
+ for i, (identity, num_imgs) in enumerate(people):
140
+ for num in range(1, int(num_imgs) + 1):
141
+ img = self._get_path(identity, num)
142
+ data.append(img)
143
+ targets.append(self.class_to_idx[identity])
144
+
145
+ return data, targets
146
+
147
+ def _get_classes(self) -> Dict[str, int]:
148
+ with open(os.path.join(self.root, self.names)) as f:
149
+ lines = f.readlines()
150
+ names = [line.strip().split()[0] for line in lines]
151
+ class_to_idx = {name: i for i, name in enumerate(names)}
152
+ return class_to_idx
153
+
154
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
155
+ """
156
+ Args:
157
+ index (int): Index
158
+
159
+ Returns:
160
+ tuple: Tuple (image, target) where target is the identity of the person.
161
+ """
162
+ img = self._loader(self.data[index])
163
+ target = self.targets[index]
164
+
165
+ if self.transform is not None:
166
+ img = self.transform(img)
167
+
168
+ if self.target_transform is not None:
169
+ target = self.target_transform(target)
170
+
171
+ return img, target
172
+
173
+ def extra_repr(self) -> str:
174
+ return super().extra_repr() + f"\nClasses (identities): {len(self.class_to_idx)}"
175
+
176
+
177
+ class LFWPairs(_LFW):
178
+ """`LFW <http://vis-www.cs.umass.edu/lfw/>`_ Dataset.
179
+
180
+ Args:
181
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
182
+ ``lfw-py`` exists or will be saved to if download is set to True.
183
+ split (string, optional): The image split to use. Can be one of ``train``, ``test``,
184
+ ``10fold``. Defaults to ``10fold``.
185
+ image_set (str, optional): Type of image funneling to use, ``original``, ``funneled`` or
186
+ ``deepfunneled``. Defaults to ``funneled``.
187
+ transform (callable, optional): A function/transform that takes in a PIL image
188
+ and returns a transformed version. E.g, ``transforms.RandomRotation``
189
+ target_transform (callable, optional): A function/transform that takes in the
190
+ target and transforms it.
191
+ download (bool, optional): If true, downloads the dataset from the internet and
192
+ puts it in root directory. If dataset is already downloaded, it is not
193
+ downloaded again.
194
+
195
+ """
196
+
197
+ def __init__(
198
+ self,
199
+ root: str,
200
+ split: str = "10fold",
201
+ image_set: str = "funneled",
202
+ transform: Optional[Callable] = None,
203
+ target_transform: Optional[Callable] = None,
204
+ download: bool = False,
205
+ ) -> None:
206
+ super().__init__(root, split, image_set, "pairs", transform, target_transform, download)
207
+
208
+ self.pair_names, self.data, self.targets = self._get_pairs(self.images_dir)
209
+
210
+ def _get_pairs(self, images_dir: str) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]], List[int]]:
211
+ pair_names, data, targets = [], [], []
212
+ with open(os.path.join(self.root, self.labels_file)) as f:
213
+ lines = f.readlines()
214
+ if self.split == "10fold":
215
+ n_folds, n_pairs = lines[0].split("\t")
216
+ n_folds, n_pairs = int(n_folds), int(n_pairs)
217
+ else:
218
+ n_folds, n_pairs = 1, int(lines[0])
219
+ s = 1
220
+
221
+ for fold in range(n_folds):
222
+ matched_pairs = [line.strip().split("\t") for line in lines[s : s + n_pairs]]
223
+ unmatched_pairs = [line.strip().split("\t") for line in lines[s + n_pairs : s + (2 * n_pairs)]]
224
+ s += 2 * n_pairs
225
+ for pair in matched_pairs:
226
+ img1, img2, same = self._get_path(pair[0], pair[1]), self._get_path(pair[0], pair[2]), 1
227
+ pair_names.append((pair[0], pair[0]))
228
+ data.append((img1, img2))
229
+ targets.append(same)
230
+ for pair in unmatched_pairs:
231
+ img1, img2, same = self._get_path(pair[0], pair[1]), self._get_path(pair[2], pair[3]), 0
232
+ pair_names.append((pair[0], pair[2]))
233
+ data.append((img1, img2))
234
+ targets.append(same)
235
+
236
+ return pair_names, data, targets
237
+
238
+ def __getitem__(self, index: int) -> Tuple[Any, Any, int]:
239
+ """
240
+ Args:
241
+ index (int): Index
242
+
243
+ Returns:
244
+ tuple: (image1, image2, target) where target is `0` for different indentities and `1` for same identities.
245
+ """
246
+ img1, img2 = self.data[index]
247
+ img1, img2 = self._loader(img1), self._loader(img2)
248
+ target = self.targets[index]
249
+
250
+ if self.transform is not None:
251
+ img1, img2 = self.transform(img1), self.transform(img2)
252
+
253
+ if self.target_transform is not None:
254
+ target = self.target_transform(target)
255
+
256
+ return img1, img2, target
vllm/lib/python3.10/site-packages/torchvision/datasets/lsun.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os.path
3
+ import pickle
4
+ import string
5
+ from collections.abc import Iterable
6
+ from pathlib import Path
7
+ from typing import Any, Callable, cast, List, Optional, Tuple, Union
8
+
9
+ from PIL import Image
10
+
11
+ from .utils import iterable_to_str, verify_str_arg
12
+ from .vision import VisionDataset
13
+
14
+
15
+ class LSUNClass(VisionDataset):
16
+ def __init__(
17
+ self, root: str, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None
18
+ ) -> None:
19
+ import lmdb
20
+
21
+ super().__init__(root, transform=transform, target_transform=target_transform)
22
+
23
+ self.env = lmdb.open(root, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False)
24
+ with self.env.begin(write=False) as txn:
25
+ self.length = txn.stat()["entries"]
26
+ cache_file = "_cache_" + "".join(c for c in root if c in string.ascii_letters)
27
+ if os.path.isfile(cache_file):
28
+ self.keys = pickle.load(open(cache_file, "rb"))
29
+ else:
30
+ with self.env.begin(write=False) as txn:
31
+ self.keys = [key for key in txn.cursor().iternext(keys=True, values=False)]
32
+ pickle.dump(self.keys, open(cache_file, "wb"))
33
+
34
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
35
+ img, target = None, None
36
+ env = self.env
37
+ with env.begin(write=False) as txn:
38
+ imgbuf = txn.get(self.keys[index])
39
+
40
+ buf = io.BytesIO()
41
+ buf.write(imgbuf)
42
+ buf.seek(0)
43
+ img = Image.open(buf).convert("RGB")
44
+
45
+ if self.transform is not None:
46
+ img = self.transform(img)
47
+
48
+ if self.target_transform is not None:
49
+ target = self.target_transform(target)
50
+
51
+ return img, target
52
+
53
+ def __len__(self) -> int:
54
+ return self.length
55
+
56
+
57
+ class LSUN(VisionDataset):
58
+ """`LSUN <https://www.yf.io/p/lsun>`_ dataset.
59
+
60
+ You will need to install the ``lmdb`` package to use this dataset: run
61
+ ``pip install lmdb``
62
+
63
+ Args:
64
+ root (str or ``pathlib.Path``): Root directory for the database files.
65
+ classes (string or list): One of {'train', 'val', 'test'} or a list of
66
+ categories to load. e,g. ['bedroom_train', 'church_outdoor_train'].
67
+ transform (callable, optional): A function/transform that takes in a PIL image
68
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
69
+ target_transform (callable, optional): A function/transform that takes in the
70
+ target and transforms it.
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ root: Union[str, Path],
76
+ classes: Union[str, List[str]] = "train",
77
+ transform: Optional[Callable] = None,
78
+ target_transform: Optional[Callable] = None,
79
+ ) -> None:
80
+ super().__init__(root, transform=transform, target_transform=target_transform)
81
+ self.classes = self._verify_classes(classes)
82
+
83
+ # for each class, create an LSUNClassDataset
84
+ self.dbs = []
85
+ for c in self.classes:
86
+ self.dbs.append(LSUNClass(root=os.path.join(root, f"{c}_lmdb"), transform=transform))
87
+
88
+ self.indices = []
89
+ count = 0
90
+ for db in self.dbs:
91
+ count += len(db)
92
+ self.indices.append(count)
93
+
94
+ self.length = count
95
+
96
+ def _verify_classes(self, classes: Union[str, List[str]]) -> List[str]:
97
+ categories = [
98
+ "bedroom",
99
+ "bridge",
100
+ "church_outdoor",
101
+ "classroom",
102
+ "conference_room",
103
+ "dining_room",
104
+ "kitchen",
105
+ "living_room",
106
+ "restaurant",
107
+ "tower",
108
+ ]
109
+ dset_opts = ["train", "val", "test"]
110
+
111
+ try:
112
+ classes = cast(str, classes)
113
+ verify_str_arg(classes, "classes", dset_opts)
114
+ if classes == "test":
115
+ classes = [classes]
116
+ else:
117
+ classes = [c + "_" + classes for c in categories]
118
+ except ValueError:
119
+ if not isinstance(classes, Iterable):
120
+ msg = "Expected type str or Iterable for argument classes, but got type {}."
121
+ raise ValueError(msg.format(type(classes)))
122
+
123
+ classes = list(classes)
124
+ msg_fmtstr_type = "Expected type str for elements in argument classes, but got type {}."
125
+ for c in classes:
126
+ verify_str_arg(c, custom_msg=msg_fmtstr_type.format(type(c)))
127
+ c_short = c.split("_")
128
+ category, dset_opt = "_".join(c_short[:-1]), c_short[-1]
129
+
130
+ msg_fmtstr = "Unknown value '{}' for {}. Valid values are {{{}}}."
131
+ msg = msg_fmtstr.format(category, "LSUN class", iterable_to_str(categories))
132
+ verify_str_arg(category, valid_values=categories, custom_msg=msg)
133
+
134
+ msg = msg_fmtstr.format(dset_opt, "postfix", iterable_to_str(dset_opts))
135
+ verify_str_arg(dset_opt, valid_values=dset_opts, custom_msg=msg)
136
+
137
+ return classes
138
+
139
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
140
+ """
141
+ Args:
142
+ index (int): Index
143
+
144
+ Returns:
145
+ tuple: Tuple (image, target) where target is the index of the target category.
146
+ """
147
+ target = 0
148
+ sub = 0
149
+ for ind in self.indices:
150
+ if index < ind:
151
+ break
152
+ target += 1
153
+ sub = ind
154
+
155
+ db = self.dbs[target]
156
+ index = index - sub
157
+
158
+ if self.target_transform is not None:
159
+ target = self.target_transform(target)
160
+
161
+ img, _ = db[index]
162
+ return img, target
163
+
164
+ def __len__(self) -> int:
165
+ return self.length
166
+
167
+ def extra_repr(self) -> str:
168
+ return "Classes: {classes}".format(**self.__dict__)
vllm/lib/python3.10/site-packages/torchvision/datasets/mnist.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import codecs
2
+ import os
3
+ import os.path
4
+ import shutil
5
+ import string
6
+ import sys
7
+ import warnings
8
+ from pathlib import Path
9
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
10
+ from urllib.error import URLError
11
+
12
+ import numpy as np
13
+ import torch
14
+ from PIL import Image
15
+
16
+ from .utils import _flip_byte_order, check_integrity, download_and_extract_archive, extract_archive, verify_str_arg
17
+ from .vision import VisionDataset
18
+
19
+
20
+ class MNIST(VisionDataset):
21
+ """`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
22
+
23
+ Args:
24
+ root (str or ``pathlib.Path``): Root directory of dataset where ``MNIST/raw/train-images-idx3-ubyte``
25
+ and ``MNIST/raw/t10k-images-idx3-ubyte`` exist.
26
+ train (bool, optional): If True, creates dataset from ``train-images-idx3-ubyte``,
27
+ otherwise from ``t10k-images-idx3-ubyte``.
28
+ download (bool, optional): If True, downloads the dataset from the internet and
29
+ puts it in root directory. If dataset is already downloaded, it is not
30
+ downloaded again.
31
+ transform (callable, optional): A function/transform that takes in a PIL image
32
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
33
+ target_transform (callable, optional): A function/transform that takes in the
34
+ target and transforms it.
35
+ """
36
+
37
+ mirrors = [
38
+ "http://yann.lecun.com/exdb/mnist/",
39
+ "https://ossci-datasets.s3.amazonaws.com/mnist/",
40
+ ]
41
+
42
+ resources = [
43
+ ("train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
44
+ ("train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"),
45
+ ("t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"),
46
+ ("t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c"),
47
+ ]
48
+
49
+ training_file = "training.pt"
50
+ test_file = "test.pt"
51
+ classes = [
52
+ "0 - zero",
53
+ "1 - one",
54
+ "2 - two",
55
+ "3 - three",
56
+ "4 - four",
57
+ "5 - five",
58
+ "6 - six",
59
+ "7 - seven",
60
+ "8 - eight",
61
+ "9 - nine",
62
+ ]
63
+
64
+ @property
65
+ def train_labels(self):
66
+ warnings.warn("train_labels has been renamed targets")
67
+ return self.targets
68
+
69
+ @property
70
+ def test_labels(self):
71
+ warnings.warn("test_labels has been renamed targets")
72
+ return self.targets
73
+
74
+ @property
75
+ def train_data(self):
76
+ warnings.warn("train_data has been renamed data")
77
+ return self.data
78
+
79
+ @property
80
+ def test_data(self):
81
+ warnings.warn("test_data has been renamed data")
82
+ return self.data
83
+
84
+ def __init__(
85
+ self,
86
+ root: Union[str, Path],
87
+ train: bool = True,
88
+ transform: Optional[Callable] = None,
89
+ target_transform: Optional[Callable] = None,
90
+ download: bool = False,
91
+ ) -> None:
92
+ super().__init__(root, transform=transform, target_transform=target_transform)
93
+ self.train = train # training set or test set
94
+
95
+ if self._check_legacy_exist():
96
+ self.data, self.targets = self._load_legacy_data()
97
+ return
98
+
99
+ if download:
100
+ self.download()
101
+
102
+ if not self._check_exists():
103
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
104
+
105
+ self.data, self.targets = self._load_data()
106
+
107
+ def _check_legacy_exist(self):
108
+ processed_folder_exists = os.path.exists(self.processed_folder)
109
+ if not processed_folder_exists:
110
+ return False
111
+
112
+ return all(
113
+ check_integrity(os.path.join(self.processed_folder, file)) for file in (self.training_file, self.test_file)
114
+ )
115
+
116
+ def _load_legacy_data(self):
117
+ # This is for BC only. We no longer cache the data in a custom binary, but simply read from the raw data
118
+ # directly.
119
+ data_file = self.training_file if self.train else self.test_file
120
+ return torch.load(os.path.join(self.processed_folder, data_file), weights_only=True)
121
+
122
+ def _load_data(self):
123
+ image_file = f"{'train' if self.train else 't10k'}-images-idx3-ubyte"
124
+ data = read_image_file(os.path.join(self.raw_folder, image_file))
125
+
126
+ label_file = f"{'train' if self.train else 't10k'}-labels-idx1-ubyte"
127
+ targets = read_label_file(os.path.join(self.raw_folder, label_file))
128
+
129
+ return data, targets
130
+
131
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
132
+ """
133
+ Args:
134
+ index (int): Index
135
+
136
+ Returns:
137
+ tuple: (image, target) where target is index of the target class.
138
+ """
139
+ img, target = self.data[index], int(self.targets[index])
140
+
141
+ # doing this so that it is consistent with all other datasets
142
+ # to return a PIL Image
143
+ img = Image.fromarray(img.numpy(), mode="L")
144
+
145
+ if self.transform is not None:
146
+ img = self.transform(img)
147
+
148
+ if self.target_transform is not None:
149
+ target = self.target_transform(target)
150
+
151
+ return img, target
152
+
153
+ def __len__(self) -> int:
154
+ return len(self.data)
155
+
156
+ @property
157
+ def raw_folder(self) -> str:
158
+ return os.path.join(self.root, self.__class__.__name__, "raw")
159
+
160
+ @property
161
+ def processed_folder(self) -> str:
162
+ return os.path.join(self.root, self.__class__.__name__, "processed")
163
+
164
+ @property
165
+ def class_to_idx(self) -> Dict[str, int]:
166
+ return {_class: i for i, _class in enumerate(self.classes)}
167
+
168
+ def _check_exists(self) -> bool:
169
+ return all(
170
+ check_integrity(os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0]))
171
+ for url, _ in self.resources
172
+ )
173
+
174
+ def download(self) -> None:
175
+ """Download the MNIST data if it doesn't exist already."""
176
+
177
+ if self._check_exists():
178
+ return
179
+
180
+ os.makedirs(self.raw_folder, exist_ok=True)
181
+
182
+ # download files
183
+ for filename, md5 in self.resources:
184
+ for mirror in self.mirrors:
185
+ url = f"{mirror}{filename}"
186
+ try:
187
+ print(f"Downloading {url}")
188
+ download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
189
+ except URLError as error:
190
+ print(f"Failed to download (trying next):\n{error}")
191
+ continue
192
+ finally:
193
+ print()
194
+ break
195
+ else:
196
+ raise RuntimeError(f"Error downloading {filename}")
197
+
198
+ def extra_repr(self) -> str:
199
+ split = "Train" if self.train is True else "Test"
200
+ return f"Split: {split}"
201
+
202
+
203
+ class FashionMNIST(MNIST):
204
+ """`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
205
+
206
+ Args:
207
+ root (str or ``pathlib.Path``): Root directory of dataset where ``FashionMNIST/raw/train-images-idx3-ubyte``
208
+ and ``FashionMNIST/raw/t10k-images-idx3-ubyte`` exist.
209
+ train (bool, optional): If True, creates dataset from ``train-images-idx3-ubyte``,
210
+ otherwise from ``t10k-images-idx3-ubyte``.
211
+ download (bool, optional): If True, downloads the dataset from the internet and
212
+ puts it in root directory. If dataset is already downloaded, it is not
213
+ downloaded again.
214
+ transform (callable, optional): A function/transform that takes in a PIL image
215
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
216
+ target_transform (callable, optional): A function/transform that takes in the
217
+ target and transforms it.
218
+ """
219
+
220
+ mirrors = ["http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"]
221
+
222
+ resources = [
223
+ ("train-images-idx3-ubyte.gz", "8d4fb7e6c68d591d4c3dfef9ec88bf0d"),
224
+ ("train-labels-idx1-ubyte.gz", "25c81989df183df01b3e8a0aad5dffbe"),
225
+ ("t10k-images-idx3-ubyte.gz", "bef4ecab320f06d8554ea6380940ec79"),
226
+ ("t10k-labels-idx1-ubyte.gz", "bb300cfdad3c16e7a12a480ee83cd310"),
227
+ ]
228
+ classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
229
+
230
+
231
+ class KMNIST(MNIST):
232
+ """`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset.
233
+
234
+ Args:
235
+ root (str or ``pathlib.Path``): Root directory of dataset where ``KMNIST/raw/train-images-idx3-ubyte``
236
+ and ``KMNIST/raw/t10k-images-idx3-ubyte`` exist.
237
+ train (bool, optional): If True, creates dataset from ``train-images-idx3-ubyte``,
238
+ otherwise from ``t10k-images-idx3-ubyte``.
239
+ download (bool, optional): If True, downloads the dataset from the internet and
240
+ puts it in root directory. If dataset is already downloaded, it is not
241
+ downloaded again.
242
+ transform (callable, optional): A function/transform that takes in a PIL image
243
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
244
+ target_transform (callable, optional): A function/transform that takes in the
245
+ target and transforms it.
246
+ """
247
+
248
+ mirrors = ["http://codh.rois.ac.jp/kmnist/dataset/kmnist/"]
249
+
250
+ resources = [
251
+ ("train-images-idx3-ubyte.gz", "bdb82020997e1d708af4cf47b453dcf7"),
252
+ ("train-labels-idx1-ubyte.gz", "e144d726b3acfaa3e44228e80efcd344"),
253
+ ("t10k-images-idx3-ubyte.gz", "5c965bf0a639b31b8f53240b1b52f4d7"),
254
+ ("t10k-labels-idx1-ubyte.gz", "7320c461ea6c1c855c0b718fb2a4b134"),
255
+ ]
256
+ classes = ["o", "ki", "su", "tsu", "na", "ha", "ma", "ya", "re", "wo"]
257
+
258
+
259
+ class EMNIST(MNIST):
260
+ """`EMNIST <https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist>`_ Dataset.
261
+
262
+ Args:
263
+ root (str or ``pathlib.Path``): Root directory of dataset where ``EMNIST/raw/train-images-idx3-ubyte``
264
+ and ``EMNIST/raw/t10k-images-idx3-ubyte`` exist.
265
+ split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``,
266
+ ``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies
267
+ which one to use.
268
+ train (bool, optional): If True, creates dataset from ``training.pt``,
269
+ otherwise from ``test.pt``.
270
+ download (bool, optional): If True, downloads the dataset from the internet and
271
+ puts it in root directory. If dataset is already downloaded, it is not
272
+ downloaded again.
273
+ transform (callable, optional): A function/transform that takes in a PIL image
274
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
275
+ target_transform (callable, optional): A function/transform that takes in the
276
+ target and transforms it.
277
+ """
278
+
279
+ url = "https://biometrics.nist.gov/cs_links/EMNIST/gzip.zip"
280
+ md5 = "58c8d27c78d21e728a6bc7b3cc06412e"
281
+ splits = ("byclass", "bymerge", "balanced", "letters", "digits", "mnist")
282
+ # Merged Classes assumes Same structure for both uppercase and lowercase version
283
+ _merged_classes = {"c", "i", "j", "k", "l", "m", "o", "p", "s", "u", "v", "w", "x", "y", "z"}
284
+ _all_classes = set(string.digits + string.ascii_letters)
285
+ classes_split_dict = {
286
+ "byclass": sorted(list(_all_classes)),
287
+ "bymerge": sorted(list(_all_classes - _merged_classes)),
288
+ "balanced": sorted(list(_all_classes - _merged_classes)),
289
+ "letters": ["N/A"] + list(string.ascii_lowercase),
290
+ "digits": list(string.digits),
291
+ "mnist": list(string.digits),
292
+ }
293
+
294
+ def __init__(self, root: Union[str, Path], split: str, **kwargs: Any) -> None:
295
+ self.split = verify_str_arg(split, "split", self.splits)
296
+ self.training_file = self._training_file(split)
297
+ self.test_file = self._test_file(split)
298
+ super().__init__(root, **kwargs)
299
+ self.classes = self.classes_split_dict[self.split]
300
+
301
+ @staticmethod
302
+ def _training_file(split) -> str:
303
+ return f"training_{split}.pt"
304
+
305
+ @staticmethod
306
+ def _test_file(split) -> str:
307
+ return f"test_{split}.pt"
308
+
309
+ @property
310
+ def _file_prefix(self) -> str:
311
+ return f"emnist-{self.split}-{'train' if self.train else 'test'}"
312
+
313
+ @property
314
+ def images_file(self) -> str:
315
+ return os.path.join(self.raw_folder, f"{self._file_prefix}-images-idx3-ubyte")
316
+
317
+ @property
318
+ def labels_file(self) -> str:
319
+ return os.path.join(self.raw_folder, f"{self._file_prefix}-labels-idx1-ubyte")
320
+
321
+ def _load_data(self):
322
+ return read_image_file(self.images_file), read_label_file(self.labels_file)
323
+
324
+ def _check_exists(self) -> bool:
325
+ return all(check_integrity(file) for file in (self.images_file, self.labels_file))
326
+
327
+ def download(self) -> None:
328
+ """Download the EMNIST data if it doesn't exist already."""
329
+
330
+ if self._check_exists():
331
+ return
332
+
333
+ os.makedirs(self.raw_folder, exist_ok=True)
334
+
335
+ download_and_extract_archive(self.url, download_root=self.raw_folder, md5=self.md5)
336
+ gzip_folder = os.path.join(self.raw_folder, "gzip")
337
+ for gzip_file in os.listdir(gzip_folder):
338
+ if gzip_file.endswith(".gz"):
339
+ extract_archive(os.path.join(gzip_folder, gzip_file), self.raw_folder)
340
+ shutil.rmtree(gzip_folder)
341
+
342
+
343
+ class QMNIST(MNIST):
344
+ """`QMNIST <https://github.com/facebookresearch/qmnist>`_ Dataset.
345
+
346
+ Args:
347
+ root (str or ``pathlib.Path``): Root directory of dataset whose ``raw``
348
+ subdir contains binary files of the datasets.
349
+ what (string,optional): Can be 'train', 'test', 'test10k',
350
+ 'test50k', or 'nist' for respectively the mnist compatible
351
+ training set, the 60k qmnist testing set, the 10k qmnist
352
+ examples that match the mnist testing set, the 50k
353
+ remaining qmnist testing examples, or all the nist
354
+ digits. The default is to select 'train' or 'test'
355
+ according to the compatibility argument 'train'.
356
+ compat (bool,optional): A boolean that says whether the target
357
+ for each example is class number (for compatibility with
358
+ the MNIST dataloader) or a torch vector containing the
359
+ full qmnist information. Default=True.
360
+ download (bool, optional): If True, downloads the dataset from
361
+ the internet and puts it in root directory. If dataset is
362
+ already downloaded, it is not downloaded again.
363
+ transform (callable, optional): A function/transform that
364
+ takes in a PIL image and returns a transformed
365
+ version. E.g, ``transforms.RandomCrop``
366
+ target_transform (callable, optional): A function/transform
367
+ that takes in the target and transforms it.
368
+ train (bool,optional,compatibility): When argument 'what' is
369
+ not specified, this boolean decides whether to load the
370
+ training set or the testing set. Default: True.
371
+ """
372
+
373
+ subsets = {"train": "train", "test": "test", "test10k": "test", "test50k": "test", "nist": "nist"}
374
+ resources: Dict[str, List[Tuple[str, str]]] = { # type: ignore[assignment]
375
+ "train": [
376
+ (
377
+ "https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-images-idx3-ubyte.gz",
378
+ "ed72d4157d28c017586c42bc6afe6370",
379
+ ),
380
+ (
381
+ "https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-labels-idx2-int.gz",
382
+ "0058f8dd561b90ffdd0f734c6a30e5e4",
383
+ ),
384
+ ],
385
+ "test": [
386
+ (
387
+ "https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-images-idx3-ubyte.gz",
388
+ "1394631089c404de565df7b7aeaf9412",
389
+ ),
390
+ (
391
+ "https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-labels-idx2-int.gz",
392
+ "5b5b05890a5e13444e108efe57b788aa",
393
+ ),
394
+ ],
395
+ "nist": [
396
+ (
397
+ "https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-images-idx3-ubyte.xz",
398
+ "7f124b3b8ab81486c9d8c2749c17f834",
399
+ ),
400
+ (
401
+ "https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-labels-idx2-int.xz",
402
+ "5ed0e788978e45d4a8bd4b7caec3d79d",
403
+ ),
404
+ ],
405
+ }
406
+ classes = [
407
+ "0 - zero",
408
+ "1 - one",
409
+ "2 - two",
410
+ "3 - three",
411
+ "4 - four",
412
+ "5 - five",
413
+ "6 - six",
414
+ "7 - seven",
415
+ "8 - eight",
416
+ "9 - nine",
417
+ ]
418
+
419
+ def __init__(
420
+ self, root: Union[str, Path], what: Optional[str] = None, compat: bool = True, train: bool = True, **kwargs: Any
421
+ ) -> None:
422
+ if what is None:
423
+ what = "train" if train else "test"
424
+ self.what = verify_str_arg(what, "what", tuple(self.subsets.keys()))
425
+ self.compat = compat
426
+ self.data_file = what + ".pt"
427
+ self.training_file = self.data_file
428
+ self.test_file = self.data_file
429
+ super().__init__(root, train, **kwargs)
430
+
431
+ @property
432
+ def images_file(self) -> str:
433
+ (url, _), _ = self.resources[self.subsets[self.what]]
434
+ return os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0])
435
+
436
+ @property
437
+ def labels_file(self) -> str:
438
+ _, (url, _) = self.resources[self.subsets[self.what]]
439
+ return os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0])
440
+
441
+ def _check_exists(self) -> bool:
442
+ return all(check_integrity(file) for file in (self.images_file, self.labels_file))
443
+
444
+ def _load_data(self):
445
+ data = read_sn3_pascalvincent_tensor(self.images_file)
446
+ if data.dtype != torch.uint8:
447
+ raise TypeError(f"data should be of dtype torch.uint8 instead of {data.dtype}")
448
+ if data.ndimension() != 3:
449
+ raise ValueError("data should have 3 dimensions instead of {data.ndimension()}")
450
+
451
+ targets = read_sn3_pascalvincent_tensor(self.labels_file).long()
452
+ if targets.ndimension() != 2:
453
+ raise ValueError(f"targets should have 2 dimensions instead of {targets.ndimension()}")
454
+
455
+ if self.what == "test10k":
456
+ data = data[0:10000, :, :].clone()
457
+ targets = targets[0:10000, :].clone()
458
+ elif self.what == "test50k":
459
+ data = data[10000:, :, :].clone()
460
+ targets = targets[10000:, :].clone()
461
+
462
+ return data, targets
463
+
464
+ def download(self) -> None:
465
+ """Download the QMNIST data if it doesn't exist already.
466
+ Note that we only download what has been asked for (argument 'what').
467
+ """
468
+ if self._check_exists():
469
+ return
470
+
471
+ os.makedirs(self.raw_folder, exist_ok=True)
472
+ split = self.resources[self.subsets[self.what]]
473
+
474
+ for url, md5 in split:
475
+ download_and_extract_archive(url, self.raw_folder, md5=md5)
476
+
477
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
478
+ # redefined to handle the compat flag
479
+ img, target = self.data[index], self.targets[index]
480
+ img = Image.fromarray(img.numpy(), mode="L")
481
+ if self.transform is not None:
482
+ img = self.transform(img)
483
+ if self.compat:
484
+ target = int(target[0])
485
+ if self.target_transform is not None:
486
+ target = self.target_transform(target)
487
+ return img, target
488
+
489
+ def extra_repr(self) -> str:
490
+ return f"Split: {self.what}"
491
+
492
+
493
+ def get_int(b: bytes) -> int:
494
+ return int(codecs.encode(b, "hex"), 16)
495
+
496
+
497
+ SN3_PASCALVINCENT_TYPEMAP = {
498
+ 8: torch.uint8,
499
+ 9: torch.int8,
500
+ 11: torch.int16,
501
+ 12: torch.int32,
502
+ 13: torch.float32,
503
+ 14: torch.float64,
504
+ }
505
+
506
+
507
+ def read_sn3_pascalvincent_tensor(path: str, strict: bool = True) -> torch.Tensor:
508
+ """Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
509
+ Argument may be a filename, compressed filename, or file object.
510
+ """
511
+ # read
512
+ with open(path, "rb") as f:
513
+ data = f.read()
514
+
515
+ # parse
516
+ if sys.byteorder == "little":
517
+ magic = get_int(data[0:4])
518
+ nd = magic % 256
519
+ ty = magic // 256
520
+ else:
521
+ nd = get_int(data[0:1])
522
+ ty = get_int(data[1:2]) + get_int(data[2:3]) * 256 + get_int(data[3:4]) * 256 * 256
523
+
524
+ assert 1 <= nd <= 3
525
+ assert 8 <= ty <= 14
526
+ torch_type = SN3_PASCALVINCENT_TYPEMAP[ty]
527
+ s = [get_int(data[4 * (i + 1) : 4 * (i + 2)]) for i in range(nd)]
528
+
529
+ if sys.byteorder == "big":
530
+ for i in range(len(s)):
531
+ s[i] = int.from_bytes(s[i].to_bytes(4, byteorder="little"), byteorder="big", signed=False)
532
+
533
+ parsed = torch.frombuffer(bytearray(data), dtype=torch_type, offset=(4 * (nd + 1)))
534
+
535
+ # The MNIST format uses the big endian byte order, while `torch.frombuffer` uses whatever the system uses. In case
536
+ # that is little endian and the dtype has more than one byte, we need to flip them.
537
+ if sys.byteorder == "little" and parsed.element_size() > 1:
538
+ parsed = _flip_byte_order(parsed)
539
+
540
+ assert parsed.shape[0] == np.prod(s) or not strict
541
+ return parsed.view(*s)
542
+
543
+
544
+ def read_label_file(path: str) -> torch.Tensor:
545
+ x = read_sn3_pascalvincent_tensor(path, strict=False)
546
+ if x.dtype != torch.uint8:
547
+ raise TypeError(f"x should be of dtype torch.uint8 instead of {x.dtype}")
548
+ if x.ndimension() != 1:
549
+ raise ValueError(f"x should have 1 dimension instead of {x.ndimension()}")
550
+ return x.long()
551
+
552
+
553
+ def read_image_file(path: str) -> torch.Tensor:
554
+ x = read_sn3_pascalvincent_tensor(path, strict=False)
555
+ if x.dtype != torch.uint8:
556
+ raise TypeError(f"x should be of dtype torch.uint8 instead of {x.dtype}")
557
+ if x.ndimension() != 3:
558
+ raise ValueError(f"x should have 3 dimension instead of {x.ndimension()}")
559
+ return x
vllm/lib/python3.10/site-packages/torchvision/datasets/moving_mnist.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ from pathlib import Path
3
+ from typing import Callable, Optional, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+ from torchvision.datasets.utils import download_url, verify_str_arg
8
+ from torchvision.datasets.vision import VisionDataset
9
+
10
+
11
+ class MovingMNIST(VisionDataset):
12
+ """`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.
13
+
14
+ Args:
15
+ root (str or ``pathlib.Path``): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.
16
+ split (string, optional): The dataset split, supports ``None`` (default), ``"train"`` and ``"test"``.
17
+ If ``split=None``, the full data is returned.
18
+ split_ratio (int, optional): The split ratio of number of frames. If ``split="train"``, the first split
19
+ frames ``data[:, :split_ratio]`` is returned. If ``split="test"``, the last split frames ``data[:, split_ratio:]``
20
+ is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.
21
+ transform (callable, optional): A function/transform that takes in a torch Tensor
22
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
23
+ download (bool, optional): If true, downloads the dataset from the internet and
24
+ puts it in root directory. If dataset is already downloaded, it is not
25
+ downloaded again.
26
+ """
27
+
28
+ _URL = "http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy"
29
+
30
+ def __init__(
31
+ self,
32
+ root: Union[str, Path],
33
+ split: Optional[str] = None,
34
+ split_ratio: int = 10,
35
+ download: bool = False,
36
+ transform: Optional[Callable] = None,
37
+ ) -> None:
38
+ super().__init__(root, transform=transform)
39
+
40
+ self._base_folder = os.path.join(self.root, self.__class__.__name__)
41
+ self._filename = self._URL.split("/")[-1]
42
+
43
+ if split is not None:
44
+ verify_str_arg(split, "split", ("train", "test"))
45
+ self.split = split
46
+
47
+ if not isinstance(split_ratio, int):
48
+ raise TypeError(f"`split_ratio` should be an integer, but got {type(split_ratio)}")
49
+ elif not (1 <= split_ratio <= 19):
50
+ raise ValueError(f"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.")
51
+ self.split_ratio = split_ratio
52
+
53
+ if download:
54
+ self.download()
55
+
56
+ if not self._check_exists():
57
+ raise RuntimeError("Dataset not found. You can use download=True to download it.")
58
+
59
+ data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
60
+ if self.split == "train":
61
+ data = data[: self.split_ratio]
62
+ elif self.split == "test":
63
+ data = data[self.split_ratio :]
64
+ self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
65
+
66
+ def __getitem__(self, idx: int) -> torch.Tensor:
67
+ """
68
+ Args:
69
+ idx (int): Index
70
+ Returns:
71
+ torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.
72
+ """
73
+ data = self.data[idx]
74
+ if self.transform is not None:
75
+ data = self.transform(data)
76
+
77
+ return data
78
+
79
+ def __len__(self) -> int:
80
+ return len(self.data)
81
+
82
+ def _check_exists(self) -> bool:
83
+ return os.path.exists(os.path.join(self._base_folder, self._filename))
84
+
85
+ def download(self) -> None:
86
+ if self._check_exists():
87
+ return
88
+
89
+ download_url(
90
+ url=self._URL,
91
+ root=self._base_folder,
92
+ filename=self._filename,
93
+ md5="be083ec986bfe91a449d63653c411eb2",
94
+ )
vllm/lib/python3.10/site-packages/torchvision/datasets/omniglot.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import join
2
+ from pathlib import Path
3
+ from typing import Any, Callable, List, Optional, Tuple, Union
4
+
5
+ from PIL import Image
6
+
7
+ from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
8
+ from .vision import VisionDataset
9
+
10
+
11
+ class Omniglot(VisionDataset):
12
+ """`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
13
+
14
+ Args:
15
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
16
+ ``omniglot-py`` exists.
17
+ background (bool, optional): If True, creates dataset from the "background" set, otherwise
18
+ creates from the "evaluation" set. This terminology is defined by the authors.
19
+ transform (callable, optional): A function/transform that takes in a PIL image
20
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
21
+ target_transform (callable, optional): A function/transform that takes in the
22
+ target and transforms it.
23
+ download (bool, optional): If true, downloads the dataset zip files from the internet and
24
+ puts it in root directory. If the zip files are already downloaded, they are not
25
+ downloaded again.
26
+ """
27
+
28
+ folder = "omniglot-py"
29
+ download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
30
+ zips_md5 = {
31
+ "images_background": "68d2efa1b9178cc56df9314c21c6e718",
32
+ "images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
33
+ }
34
+
35
+ def __init__(
36
+ self,
37
+ root: Union[str, Path],
38
+ background: bool = True,
39
+ transform: Optional[Callable] = None,
40
+ target_transform: Optional[Callable] = None,
41
+ download: bool = False,
42
+ ) -> None:
43
+ super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
44
+ self.background = background
45
+
46
+ if download:
47
+ self.download()
48
+
49
+ if not self._check_integrity():
50
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
51
+
52
+ self.target_folder = join(self.root, self._get_target_folder())
53
+ self._alphabets = list_dir(self.target_folder)
54
+ self._characters: List[str] = sum(
55
+ ([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
56
+ )
57
+ self._character_images = [
58
+ [(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
59
+ for idx, character in enumerate(self._characters)
60
+ ]
61
+ self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
62
+
63
+ def __len__(self) -> int:
64
+ return len(self._flat_character_images)
65
+
66
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
67
+ """
68
+ Args:
69
+ index (int): Index
70
+
71
+ Returns:
72
+ tuple: (image, target) where target is index of the target character class.
73
+ """
74
+ image_name, character_class = self._flat_character_images[index]
75
+ image_path = join(self.target_folder, self._characters[character_class], image_name)
76
+ image = Image.open(image_path, mode="r").convert("L")
77
+
78
+ if self.transform:
79
+ image = self.transform(image)
80
+
81
+ if self.target_transform:
82
+ character_class = self.target_transform(character_class)
83
+
84
+ return image, character_class
85
+
86
+ def _check_integrity(self) -> bool:
87
+ zip_filename = self._get_target_folder()
88
+ if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
89
+ return False
90
+ return True
91
+
92
+ def download(self) -> None:
93
+ if self._check_integrity():
94
+ print("Files already downloaded and verified")
95
+ return
96
+
97
+ filename = self._get_target_folder()
98
+ zip_filename = filename + ".zip"
99
+ url = self.download_url_prefix + "/" + zip_filename
100
+ download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
101
+
102
+ def _get_target_folder(self) -> str:
103
+ return "images_background" if self.background else "images_evaluation"
vllm/lib/python3.10/site-packages/torchvision/datasets/oxford_iiit_pet.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import pathlib
4
+ from typing import Any, Callable, Optional, Sequence, Tuple, Union
5
+
6
+ from PIL import Image
7
+
8
+ from .utils import download_and_extract_archive, verify_str_arg
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class OxfordIIITPet(VisionDataset):
13
+ """`Oxford-IIIT Pet Dataset <https://www.robots.ox.ac.uk/~vgg/data/pets/>`_.
14
+
15
+ Args:
16
+ root (str or ``pathlib.Path``): Root directory of the dataset.
17
+ split (string, optional): The dataset split, supports ``"trainval"`` (default) or ``"test"``.
18
+ target_types (string, sequence of strings, optional): Types of target to use. Can be ``category`` (default) or
19
+ ``segmentation``. Can also be a list to output a tuple with all specified target types. The types represent:
20
+
21
+ - ``category`` (int): Label for one of the 37 pet categories.
22
+ - ``binary-category`` (int): Binary label for cat or dog.
23
+ - ``segmentation`` (PIL image): Segmentation trimap of the image.
24
+
25
+ If empty, ``None`` will be returned as target.
26
+
27
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
28
+ version. E.g, ``transforms.RandomCrop``.
29
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
30
+ download (bool, optional): If True, downloads the dataset from the internet and puts it into
31
+ ``root/oxford-iiit-pet``. If dataset is already downloaded, it is not downloaded again.
32
+ """
33
+
34
+ _RESOURCES = (
35
+ ("https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz", "5c4f3ee8e5d25df40f4fd59a7f44e54c"),
36
+ ("https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz", "95a8c909bbe2e81eed6a22bccdf3f68f"),
37
+ )
38
+ _VALID_TARGET_TYPES = ("category", "binary-category", "segmentation")
39
+
40
+ def __init__(
41
+ self,
42
+ root: Union[str, pathlib.Path],
43
+ split: str = "trainval",
44
+ target_types: Union[Sequence[str], str] = "category",
45
+ transforms: Optional[Callable] = None,
46
+ transform: Optional[Callable] = None,
47
+ target_transform: Optional[Callable] = None,
48
+ download: bool = False,
49
+ ):
50
+ self._split = verify_str_arg(split, "split", ("trainval", "test"))
51
+ if isinstance(target_types, str):
52
+ target_types = [target_types]
53
+ self._target_types = [
54
+ verify_str_arg(target_type, "target_types", self._VALID_TARGET_TYPES) for target_type in target_types
55
+ ]
56
+
57
+ super().__init__(root, transforms=transforms, transform=transform, target_transform=target_transform)
58
+ self._base_folder = pathlib.Path(self.root) / "oxford-iiit-pet"
59
+ self._images_folder = self._base_folder / "images"
60
+ self._anns_folder = self._base_folder / "annotations"
61
+ self._segs_folder = self._anns_folder / "trimaps"
62
+
63
+ if download:
64
+ self._download()
65
+
66
+ if not self._check_exists():
67
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
68
+
69
+ image_ids = []
70
+ self._labels = []
71
+ self._bin_labels = []
72
+ with open(self._anns_folder / f"{self._split}.txt") as file:
73
+ for line in file:
74
+ image_id, label, bin_label, _ = line.strip().split()
75
+ image_ids.append(image_id)
76
+ self._labels.append(int(label) - 1)
77
+ self._bin_labels.append(int(bin_label) - 1)
78
+
79
+ self.bin_classes = ["Cat", "Dog"]
80
+ self.classes = [
81
+ " ".join(part.title() for part in raw_cls.split("_"))
82
+ for raw_cls, _ in sorted(
83
+ {(image_id.rsplit("_", 1)[0], label) for image_id, label in zip(image_ids, self._labels)},
84
+ key=lambda image_id_and_label: image_id_and_label[1],
85
+ )
86
+ ]
87
+ self.bin_class_to_idx = dict(zip(self.bin_classes, range(len(self.bin_classes))))
88
+ self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
89
+
90
+ self._images = [self._images_folder / f"{image_id}.jpg" for image_id in image_ids]
91
+ self._segs = [self._segs_folder / f"{image_id}.png" for image_id in image_ids]
92
+
93
+ def __len__(self) -> int:
94
+ return len(self._images)
95
+
96
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
97
+ image = Image.open(self._images[idx]).convert("RGB")
98
+
99
+ target: Any = []
100
+ for target_type in self._target_types:
101
+ if target_type == "category":
102
+ target.append(self._labels[idx])
103
+ elif target_type == "binary-category":
104
+ target.append(self._bin_labels[idx])
105
+ else: # target_type == "segmentation"
106
+ target.append(Image.open(self._segs[idx]))
107
+
108
+ if not target:
109
+ target = None
110
+ elif len(target) == 1:
111
+ target = target[0]
112
+ else:
113
+ target = tuple(target)
114
+
115
+ if self.transforms:
116
+ image, target = self.transforms(image, target)
117
+
118
+ return image, target
119
+
120
+ def _check_exists(self) -> bool:
121
+ for folder in (self._images_folder, self._anns_folder):
122
+ if not (os.path.exists(folder) and os.path.isdir(folder)):
123
+ return False
124
+ else:
125
+ return True
126
+
127
+ def _download(self) -> None:
128
+ if self._check_exists():
129
+ return
130
+
131
+ for url, md5 in self._RESOURCES:
132
+ download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5)
vllm/lib/python3.10/site-packages/torchvision/datasets/pcam.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pathlib
2
+ from typing import Any, Callable, Optional, Tuple, Union
3
+
4
+ from PIL import Image
5
+
6
+ from .utils import _decompress, download_file_from_google_drive, verify_str_arg
7
+ from .vision import VisionDataset
8
+
9
+
10
+ class PCAM(VisionDataset):
11
+ """`PCAM Dataset <https://github.com/basveeling/pcam>`_.
12
+
13
+ The PatchCamelyon dataset is a binary classification dataset with 327,680
14
+ color images (96px x 96px), extracted from histopathologic scans of lymph node
15
+ sections. Each image is annotated with a binary label indicating presence of
16
+ metastatic tissue.
17
+
18
+ This dataset requires the ``h5py`` package which you can install with ``pip install h5py``.
19
+
20
+ Args:
21
+ root (str or ``pathlib.Path``): Root directory of the dataset.
22
+ split (string, optional): The dataset split, supports ``"train"`` (default), ``"test"`` or ``"val"``.
23
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
24
+ version. E.g, ``transforms.RandomCrop``.
25
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
26
+ download (bool, optional): If True, downloads the dataset from the internet and puts it into ``root/pcam``. If
27
+ dataset is already downloaded, it is not downloaded again.
28
+
29
+ .. warning::
30
+
31
+ To download the dataset `gdown <https://github.com/wkentaro/gdown>`_ is required.
32
+ """
33
+
34
+ _FILES = {
35
+ "train": {
36
+ "images": (
37
+ "camelyonpatch_level_2_split_train_x.h5", # Data file name
38
+ "1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2", # Google Drive ID
39
+ "1571f514728f59376b705fc836ff4b63", # md5 hash
40
+ ),
41
+ "targets": (
42
+ "camelyonpatch_level_2_split_train_y.h5",
43
+ "1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
44
+ "35c2d7259d906cfc8143347bb8e05be7",
45
+ ),
46
+ },
47
+ "test": {
48
+ "images": (
49
+ "camelyonpatch_level_2_split_test_x.h5",
50
+ "1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
51
+ "d8c2d60d490dbd479f8199bdfa0cf6ec",
52
+ ),
53
+ "targets": (
54
+ "camelyonpatch_level_2_split_test_y.h5",
55
+ "17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
56
+ "60a7035772fbdb7f34eb86d4420cf66a",
57
+ ),
58
+ },
59
+ "val": {
60
+ "images": (
61
+ "camelyonpatch_level_2_split_valid_x.h5",
62
+ "1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
63
+ "d5b63470df7cfa627aeec8b9dc0c066e",
64
+ ),
65
+ "targets": (
66
+ "camelyonpatch_level_2_split_valid_y.h5",
67
+ "1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
68
+ "2b85f58b927af9964a4c15b8f7e8f179",
69
+ ),
70
+ },
71
+ }
72
+
73
+ def __init__(
74
+ self,
75
+ root: Union[str, pathlib.Path],
76
+ split: str = "train",
77
+ transform: Optional[Callable] = None,
78
+ target_transform: Optional[Callable] = None,
79
+ download: bool = False,
80
+ ):
81
+ try:
82
+ import h5py
83
+
84
+ self.h5py = h5py
85
+ except ImportError:
86
+ raise RuntimeError(
87
+ "h5py is not found. This dataset needs to have h5py installed: please run pip install h5py"
88
+ )
89
+
90
+ self._split = verify_str_arg(split, "split", ("train", "test", "val"))
91
+
92
+ super().__init__(root, transform=transform, target_transform=target_transform)
93
+ self._base_folder = pathlib.Path(self.root) / "pcam"
94
+
95
+ if download:
96
+ self._download()
97
+
98
+ if not self._check_exists():
99
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
100
+
101
+ def __len__(self) -> int:
102
+ images_file = self._FILES[self._split]["images"][0]
103
+ with self.h5py.File(self._base_folder / images_file) as images_data:
104
+ return images_data["x"].shape[0]
105
+
106
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
107
+ images_file = self._FILES[self._split]["images"][0]
108
+ with self.h5py.File(self._base_folder / images_file) as images_data:
109
+ image = Image.fromarray(images_data["x"][idx]).convert("RGB")
110
+
111
+ targets_file = self._FILES[self._split]["targets"][0]
112
+ with self.h5py.File(self._base_folder / targets_file) as targets_data:
113
+ target = int(targets_data["y"][idx, 0, 0, 0]) # shape is [num_images, 1, 1, 1]
114
+
115
+ if self.transform:
116
+ image = self.transform(image)
117
+ if self.target_transform:
118
+ target = self.target_transform(target)
119
+
120
+ return image, target
121
+
122
+ def _check_exists(self) -> bool:
123
+ images_file = self._FILES[self._split]["images"][0]
124
+ targets_file = self._FILES[self._split]["targets"][0]
125
+ return all(self._base_folder.joinpath(h5_file).exists() for h5_file in (images_file, targets_file))
126
+
127
+ def _download(self) -> None:
128
+ if self._check_exists():
129
+ return
130
+
131
+ for file_name, file_id, md5 in self._FILES[self._split].values():
132
+ archive_name = file_name + ".gz"
133
+ download_file_from_google_drive(file_id, str(self._base_folder), filename=archive_name, md5=md5)
134
+ _decompress(str(self._base_folder / archive_name))
vllm/lib/python3.10/site-packages/torchvision/datasets/phototour.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Any, Callable, List, Optional, Tuple, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+ from PIL import Image
8
+
9
+ from .utils import download_url
10
+ from .vision import VisionDataset
11
+
12
+
13
+ class PhotoTour(VisionDataset):
14
+ """`Multi-view Stereo Correspondence <http://matthewalunbrown.com/patchdata/patchdata.html>`_ Dataset.
15
+
16
+ .. note::
17
+
18
+ We only provide the newer version of the dataset, since the authors state that it
19
+
20
+ is more suitable for training descriptors based on difference of Gaussian, or Harris corners, as the
21
+ patches are centred on real interest point detections, rather than being projections of 3D points as is the
22
+ case in the old dataset.
23
+
24
+ The original dataset is available under http://phototour.cs.washington.edu/patches/default.htm.
25
+
26
+
27
+ Args:
28
+ root (str or ``pathlib.Path``): Root directory where images are.
29
+ name (string): Name of the dataset to load.
30
+ transform (callable, optional): A function/transform that takes in a PIL image
31
+ and returns a transformed version.
32
+ download (bool, optional): If true, downloads the dataset from the internet and
33
+ puts it in root directory. If dataset is already downloaded, it is not
34
+ downloaded again.
35
+
36
+ """
37
+
38
+ urls = {
39
+ "notredame_harris": [
40
+ "http://matthewalunbrown.com/patchdata/notredame_harris.zip",
41
+ "notredame_harris.zip",
42
+ "69f8c90f78e171349abdf0307afefe4d",
43
+ ],
44
+ "yosemite_harris": [
45
+ "http://matthewalunbrown.com/patchdata/yosemite_harris.zip",
46
+ "yosemite_harris.zip",
47
+ "a73253d1c6fbd3ba2613c45065c00d46",
48
+ ],
49
+ "liberty_harris": [
50
+ "http://matthewalunbrown.com/patchdata/liberty_harris.zip",
51
+ "liberty_harris.zip",
52
+ "c731fcfb3abb4091110d0ae8c7ba182c",
53
+ ],
54
+ "notredame": [
55
+ "http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip",
56
+ "notredame.zip",
57
+ "509eda8535847b8c0a90bbb210c83484",
58
+ ],
59
+ "yosemite": ["http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip", "yosemite.zip", "533b2e8eb7ede31be40abc317b2fd4f0"],
60
+ "liberty": ["http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip", "liberty.zip", "fdd9152f138ea5ef2091746689176414"],
61
+ }
62
+ means = {
63
+ "notredame": 0.4854,
64
+ "yosemite": 0.4844,
65
+ "liberty": 0.4437,
66
+ "notredame_harris": 0.4854,
67
+ "yosemite_harris": 0.4844,
68
+ "liberty_harris": 0.4437,
69
+ }
70
+ stds = {
71
+ "notredame": 0.1864,
72
+ "yosemite": 0.1818,
73
+ "liberty": 0.2019,
74
+ "notredame_harris": 0.1864,
75
+ "yosemite_harris": 0.1818,
76
+ "liberty_harris": 0.2019,
77
+ }
78
+ lens = {
79
+ "notredame": 468159,
80
+ "yosemite": 633587,
81
+ "liberty": 450092,
82
+ "liberty_harris": 379587,
83
+ "yosemite_harris": 450912,
84
+ "notredame_harris": 325295,
85
+ }
86
+ image_ext = "bmp"
87
+ info_file = "info.txt"
88
+ matches_files = "m50_100000_100000_0.txt"
89
+
90
+ def __init__(
91
+ self,
92
+ root: Union[str, Path],
93
+ name: str,
94
+ train: bool = True,
95
+ transform: Optional[Callable] = None,
96
+ download: bool = False,
97
+ ) -> None:
98
+ super().__init__(root, transform=transform)
99
+ self.name = name
100
+ self.data_dir = os.path.join(self.root, name)
101
+ self.data_down = os.path.join(self.root, f"{name}.zip")
102
+ self.data_file = os.path.join(self.root, f"{name}.pt")
103
+
104
+ self.train = train
105
+ self.mean = self.means[name]
106
+ self.std = self.stds[name]
107
+
108
+ if download:
109
+ self.download()
110
+
111
+ if not self._check_datafile_exists():
112
+ self.cache()
113
+
114
+ # load the serialized data
115
+ self.data, self.labels, self.matches = torch.load(self.data_file, weights_only=True)
116
+
117
+ def __getitem__(self, index: int) -> Union[torch.Tensor, Tuple[Any, Any, torch.Tensor]]:
118
+ """
119
+ Args:
120
+ index (int): Index
121
+
122
+ Returns:
123
+ tuple: (data1, data2, matches)
124
+ """
125
+ if self.train:
126
+ data = self.data[index]
127
+ if self.transform is not None:
128
+ data = self.transform(data)
129
+ return data
130
+ m = self.matches[index]
131
+ data1, data2 = self.data[m[0]], self.data[m[1]]
132
+ if self.transform is not None:
133
+ data1 = self.transform(data1)
134
+ data2 = self.transform(data2)
135
+ return data1, data2, m[2]
136
+
137
+ def __len__(self) -> int:
138
+ return len(self.data if self.train else self.matches)
139
+
140
+ def _check_datafile_exists(self) -> bool:
141
+ return os.path.exists(self.data_file)
142
+
143
+ def _check_downloaded(self) -> bool:
144
+ return os.path.exists(self.data_dir)
145
+
146
+ def download(self) -> None:
147
+ if self._check_datafile_exists():
148
+ print(f"# Found cached data {self.data_file}")
149
+ return
150
+
151
+ if not self._check_downloaded():
152
+ # download files
153
+ url = self.urls[self.name][0]
154
+ filename = self.urls[self.name][1]
155
+ md5 = self.urls[self.name][2]
156
+ fpath = os.path.join(self.root, filename)
157
+
158
+ download_url(url, self.root, filename, md5)
159
+
160
+ print(f"# Extracting data {self.data_down}\n")
161
+
162
+ import zipfile
163
+
164
+ with zipfile.ZipFile(fpath, "r") as z:
165
+ z.extractall(self.data_dir)
166
+
167
+ os.unlink(fpath)
168
+
169
+ def cache(self) -> None:
170
+ # process and save as torch files
171
+ print(f"# Caching data {self.data_file}")
172
+
173
+ dataset = (
174
+ read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),
175
+ read_info_file(self.data_dir, self.info_file),
176
+ read_matches_files(self.data_dir, self.matches_files),
177
+ )
178
+
179
+ with open(self.data_file, "wb") as f:
180
+ torch.save(dataset, f)
181
+
182
+ def extra_repr(self) -> str:
183
+ split = "Train" if self.train is True else "Test"
184
+ return f"Split: {split}"
185
+
186
+
187
+ def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor:
188
+ """Return a Tensor containing the patches"""
189
+
190
+ def PIL2array(_img: Image.Image) -> np.ndarray:
191
+ """Convert PIL image type to numpy 2D array"""
192
+ return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
193
+
194
+ def find_files(_data_dir: str, _image_ext: str) -> List[str]:
195
+ """Return a list with the file names of the images containing the patches"""
196
+ files = []
197
+ # find those files with the specified extension
198
+ for file_dir in os.listdir(_data_dir):
199
+ if file_dir.endswith(_image_ext):
200
+ files.append(os.path.join(_data_dir, file_dir))
201
+ return sorted(files) # sort files in ascend order to keep relations
202
+
203
+ patches = []
204
+ list_files = find_files(data_dir, image_ext)
205
+
206
+ for fpath in list_files:
207
+ img = Image.open(fpath)
208
+ for y in range(0, img.height, 64):
209
+ for x in range(0, img.width, 64):
210
+ patch = img.crop((x, y, x + 64, y + 64))
211
+ patches.append(PIL2array(patch))
212
+ return torch.ByteTensor(np.array(patches[:n]))
213
+
214
+
215
+ def read_info_file(data_dir: str, info_file: str) -> torch.Tensor:
216
+ """Return a Tensor containing the list of labels
217
+ Read the file and keep only the ID of the 3D point.
218
+ """
219
+ with open(os.path.join(data_dir, info_file)) as f:
220
+ labels = [int(line.split()[0]) for line in f]
221
+ return torch.LongTensor(labels)
222
+
223
+
224
+ def read_matches_files(data_dir: str, matches_file: str) -> torch.Tensor:
225
+ """Return a Tensor containing the ground truth matches
226
+ Read the file and keep only 3D point ID.
227
+ Matches are represented with a 1, non matches with a 0.
228
+ """
229
+ matches = []
230
+ with open(os.path.join(data_dir, matches_file)) as f:
231
+ for line in f:
232
+ line_split = line.split()
233
+ matches.append([int(line_split[0]), int(line_split[3]), int(line_split[1] == line_split[4])])
234
+ return torch.LongTensor(matches)
vllm/lib/python3.10/site-packages/torchvision/datasets/places365.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from os import path
3
+ from pathlib import Path
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5
+ from urllib.parse import urljoin
6
+
7
+ from .folder import default_loader
8
+ from .utils import check_integrity, download_and_extract_archive, verify_str_arg
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class Places365(VisionDataset):
13
+ r"""`Places365 <http://places2.csail.mit.edu/index.html>`_ classification dataset.
14
+
15
+ Args:
16
+ root (str or ``pathlib.Path``): Root directory of the Places365 dataset.
17
+ split (string, optional): The dataset split. Can be one of ``train-standard`` (default), ``train-challenge``,
18
+ ``val``.
19
+ small (bool, optional): If ``True``, uses the small images, i.e. resized to 256 x 256 pixels, instead of the
20
+ high resolution ones.
21
+ download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
22
+ downloaded archives are not downloaded again.
23
+ transform (callable, optional): A function/transform that takes in a PIL image
24
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
25
+ target_transform (callable, optional): A function/transform that takes in the
26
+ target and transforms it.
27
+ loader (callable, optional): A function to load an image given its path.
28
+
29
+ Attributes:
30
+ classes (list): List of the class names.
31
+ class_to_idx (dict): Dict with items (class_name, class_index).
32
+ imgs (list): List of (image path, class_index) tuples
33
+ targets (list): The class_index value for each image in the dataset
34
+
35
+ Raises:
36
+ RuntimeError: If ``download is False`` and the meta files, i.e. the devkit, are not present or corrupted.
37
+ RuntimeError: If ``download is True`` and the image archive is already extracted.
38
+ """
39
+ _SPLITS = ("train-standard", "train-challenge", "val")
40
+ _BASE_URL = "http://data.csail.mit.edu/places/places365/"
41
+ # {variant: (archive, md5)}
42
+ _DEVKIT_META = {
43
+ "standard": ("filelist_places365-standard.tar", "35a0585fee1fa656440f3ab298f8479c"),
44
+ "challenge": ("filelist_places365-challenge.tar", "70a8307e459c3de41690a7c76c931734"),
45
+ }
46
+ # (file, md5)
47
+ _CATEGORIES_META = ("categories_places365.txt", "06c963b85866bd0649f97cb43dd16673")
48
+ # {split: (file, md5)}
49
+ _FILE_LIST_META = {
50
+ "train-standard": ("places365_train_standard.txt", "30f37515461640559006b8329efbed1a"),
51
+ "train-challenge": ("places365_train_challenge.txt", "b2931dc997b8c33c27e7329c073a6b57"),
52
+ "val": ("places365_val.txt", "e9f2fd57bfd9d07630173f4e8708e4b1"),
53
+ }
54
+ # {(split, small): (file, md5)}
55
+ _IMAGES_META = {
56
+ ("train-standard", False): ("train_large_places365standard.tar", "67e186b496a84c929568076ed01a8aa1"),
57
+ ("train-challenge", False): ("train_large_places365challenge.tar", "605f18e68e510c82b958664ea134545f"),
58
+ ("val", False): ("val_large.tar", "9b71c4993ad89d2d8bcbdc4aef38042f"),
59
+ ("train-standard", True): ("train_256_places365standard.tar", "53ca1c756c3d1e7809517cc47c5561c5"),
60
+ ("train-challenge", True): ("train_256_places365challenge.tar", "741915038a5e3471ec7332404dfb64ef"),
61
+ ("val", True): ("val_256.tar", "e27b17d8d44f4af9a78502beb927f808"),
62
+ }
63
+
64
+ def __init__(
65
+ self,
66
+ root: Union[str, Path],
67
+ split: str = "train-standard",
68
+ small: bool = False,
69
+ download: bool = False,
70
+ transform: Optional[Callable] = None,
71
+ target_transform: Optional[Callable] = None,
72
+ loader: Callable[[str], Any] = default_loader,
73
+ ) -> None:
74
+ super().__init__(root, transform=transform, target_transform=target_transform)
75
+
76
+ self.split = self._verify_split(split)
77
+ self.small = small
78
+ self.loader = loader
79
+
80
+ self.classes, self.class_to_idx = self.load_categories(download)
81
+ self.imgs, self.targets = self.load_file_list(download)
82
+
83
+ if download:
84
+ self.download_images()
85
+
86
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
87
+ file, target = self.imgs[index]
88
+ image = self.loader(file)
89
+
90
+ if self.transforms is not None:
91
+ image, target = self.transforms(image, target)
92
+
93
+ return image, target
94
+
95
+ def __len__(self) -> int:
96
+ return len(self.imgs)
97
+
98
+ @property
99
+ def variant(self) -> str:
100
+ return "challenge" if "challenge" in self.split else "standard"
101
+
102
+ @property
103
+ def images_dir(self) -> str:
104
+ size = "256" if self.small else "large"
105
+ if self.split.startswith("train"):
106
+ dir = f"data_{size}_{self.variant}"
107
+ else:
108
+ dir = f"{self.split}_{size}"
109
+ return path.join(self.root, dir)
110
+
111
+ def load_categories(self, download: bool = True) -> Tuple[List[str], Dict[str, int]]:
112
+ def process(line: str) -> Tuple[str, int]:
113
+ cls, idx = line.split()
114
+ return cls, int(idx)
115
+
116
+ file, md5 = self._CATEGORIES_META
117
+ file = path.join(self.root, file)
118
+ if not self._check_integrity(file, md5, download):
119
+ self.download_devkit()
120
+
121
+ with open(file) as fh:
122
+ class_to_idx = dict(process(line) for line in fh)
123
+
124
+ return sorted(class_to_idx.keys()), class_to_idx
125
+
126
+ def load_file_list(self, download: bool = True) -> Tuple[List[Tuple[str, int]], List[int]]:
127
+ def process(line: str, sep="/") -> Tuple[str, int]:
128
+ image, idx = line.split()
129
+ return path.join(self.images_dir, image.lstrip(sep).replace(sep, os.sep)), int(idx)
130
+
131
+ file, md5 = self._FILE_LIST_META[self.split]
132
+ file = path.join(self.root, file)
133
+ if not self._check_integrity(file, md5, download):
134
+ self.download_devkit()
135
+
136
+ with open(file) as fh:
137
+ images = [process(line) for line in fh]
138
+
139
+ _, targets = zip(*images)
140
+ return images, list(targets)
141
+
142
+ def download_devkit(self) -> None:
143
+ file, md5 = self._DEVKIT_META[self.variant]
144
+ download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)
145
+
146
+ def download_images(self) -> None:
147
+ if path.exists(self.images_dir):
148
+ raise RuntimeError(
149
+ f"The directory {self.images_dir} already exists. If you want to re-download or re-extract the images, "
150
+ f"delete the directory."
151
+ )
152
+
153
+ file, md5 = self._IMAGES_META[(self.split, self.small)]
154
+ download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)
155
+
156
+ if self.split.startswith("train"):
157
+ os.rename(self.images_dir.rsplit("_", 1)[0], self.images_dir)
158
+
159
+ def extra_repr(self) -> str:
160
+ return "\n".join(("Split: {split}", "Small: {small}")).format(**self.__dict__)
161
+
162
+ def _verify_split(self, split: str) -> str:
163
+ return verify_str_arg(split, "split", self._SPLITS)
164
+
165
+ def _check_integrity(self, file: str, md5: str, download: bool) -> bool:
166
+ integrity = check_integrity(file, md5=md5)
167
+ if not integrity and not download:
168
+ raise RuntimeError(
169
+ f"The file {file} does not exist or is corrupted. You can set download=True to download it."
170
+ )
171
+ return integrity
vllm/lib/python3.10/site-packages/torchvision/datasets/rendered_sst2.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Any, Callable, Optional, Tuple, Union
3
+
4
+ import PIL.Image
5
+
6
+ from .folder import make_dataset
7
+ from .utils import download_and_extract_archive, verify_str_arg
8
+ from .vision import VisionDataset
9
+
10
+
11
+ class RenderedSST2(VisionDataset):
12
+ """`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
13
+
14
+ Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
15
+ character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
16
+ Treebank v2 dataset.
17
+
18
+ This dataset contains two classes (positive and negative) and is divided in three splits: a train
19
+ split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
20
+ (444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
21
+
22
+ Args:
23
+ root (str or ``pathlib.Path``): Root directory of the dataset.
24
+ split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
25
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
26
+ version. E.g, ``transforms.RandomCrop``.
27
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
28
+ download (bool, optional): If True, downloads the dataset from the internet and
29
+ puts it in root directory. If dataset is already downloaded, it is not
30
+ downloaded again. Default is False.
31
+ """
32
+
33
+ _URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
34
+ _MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
35
+
36
+ def __init__(
37
+ self,
38
+ root: Union[str, Path],
39
+ split: str = "train",
40
+ transform: Optional[Callable] = None,
41
+ target_transform: Optional[Callable] = None,
42
+ download: bool = False,
43
+ ) -> None:
44
+ super().__init__(root, transform=transform, target_transform=target_transform)
45
+ self._split = verify_str_arg(split, "split", ("train", "val", "test"))
46
+ self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
47
+ self._base_folder = Path(self.root) / "rendered-sst2"
48
+ self.classes = ["negative", "positive"]
49
+ self.class_to_idx = {"negative": 0, "positive": 1}
50
+
51
+ if download:
52
+ self._download()
53
+
54
+ if not self._check_exists():
55
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
56
+
57
+ self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
58
+
59
+ def __len__(self) -> int:
60
+ return len(self._samples)
61
+
62
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
63
+ image_file, label = self._samples[idx]
64
+ image = PIL.Image.open(image_file).convert("RGB")
65
+
66
+ if self.transform:
67
+ image = self.transform(image)
68
+
69
+ if self.target_transform:
70
+ label = self.target_transform(label)
71
+
72
+ return image, label
73
+
74
+ def extra_repr(self) -> str:
75
+ return f"split={self._split}"
76
+
77
+ def _check_exists(self) -> bool:
78
+ for class_label in set(self.classes):
79
+ if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
80
+ return False
81
+ return True
82
+
83
+ def _download(self) -> None:
84
+ if self._check_exists():
85
+ return
86
+ download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
vllm/lib/python3.10/site-packages/torchvision/datasets/sbd.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from pathlib import Path
4
+ from typing import Any, Callable, Optional, Tuple, Union
5
+
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ from .utils import download_and_extract_archive, download_url, verify_str_arg
10
+ from .vision import VisionDataset
11
+
12
+
13
+ class SBDataset(VisionDataset):
14
+ """`Semantic Boundaries Dataset <http://home.bharathh.info/pubs/codes/SBD/download.html>`_
15
+
16
+ The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset.
17
+
18
+ .. note ::
19
+
20
+ Please note that the train and val splits included with this dataset are different from
21
+ the splits in the PASCAL VOC dataset. In particular some "train" images might be part of
22
+ VOC2012 val.
23
+ If you are interested in testing on VOC 2012 val, then use `image_set='train_noval'`,
24
+ which excludes all val images.
25
+
26
+ .. warning::
27
+
28
+ This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
29
+
30
+ Args:
31
+ root (str or ``pathlib.Path``): Root directory of the Semantic Boundaries Dataset
32
+ image_set (string, optional): Select the image_set to use, ``train``, ``val`` or ``train_noval``.
33
+ Image set ``train_noval`` excludes VOC 2012 val images.
34
+ mode (string, optional): Select target type. Possible values 'boundaries' or 'segmentation'.
35
+ In case of 'boundaries', the target is an array of shape `[num_classes, H, W]`,
36
+ where `num_classes=20`.
37
+ download (bool, optional): If true, downloads the dataset from the internet and
38
+ puts it in root directory. If dataset is already downloaded, it is not
39
+ downloaded again.
40
+ transforms (callable, optional): A function/transform that takes input sample and its target as entry
41
+ and returns a transformed version. Input sample is PIL image and target is a numpy array
42
+ if `mode='boundaries'` or PIL image if `mode='segmentation'`.
43
+ """
44
+
45
+ url = "https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz"
46
+ md5 = "82b4d87ceb2ed10f6038a1cba92111cb"
47
+ filename = "benchmark.tgz"
48
+
49
+ voc_train_url = "https://www.cs.cornell.edu/~bharathh/train_noval.txt"
50
+ voc_split_filename = "train_noval.txt"
51
+ voc_split_md5 = "79bff800c5f0b1ec6b21080a3c066722"
52
+
53
+ def __init__(
54
+ self,
55
+ root: Union[str, Path],
56
+ image_set: str = "train",
57
+ mode: str = "boundaries",
58
+ download: bool = False,
59
+ transforms: Optional[Callable] = None,
60
+ ) -> None:
61
+
62
+ try:
63
+ from scipy.io import loadmat
64
+
65
+ self._loadmat = loadmat
66
+ except ImportError:
67
+ raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
68
+
69
+ super().__init__(root, transforms)
70
+ self.image_set = verify_str_arg(image_set, "image_set", ("train", "val", "train_noval"))
71
+ self.mode = verify_str_arg(mode, "mode", ("segmentation", "boundaries"))
72
+ self.num_classes = 20
73
+
74
+ sbd_root = self.root
75
+ image_dir = os.path.join(sbd_root, "img")
76
+ mask_dir = os.path.join(sbd_root, "cls")
77
+
78
+ if download:
79
+ download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.md5)
80
+ extracted_ds_root = os.path.join(self.root, "benchmark_RELEASE", "dataset")
81
+ for f in ["cls", "img", "inst", "train.txt", "val.txt"]:
82
+ old_path = os.path.join(extracted_ds_root, f)
83
+ shutil.move(old_path, sbd_root)
84
+ if self.image_set == "train_noval":
85
+ # Note: this is failing as of June 2024 https://github.com/pytorch/vision/issues/8471
86
+ download_url(self.voc_train_url, sbd_root, self.voc_split_filename, self.voc_split_md5)
87
+
88
+ if not os.path.isdir(sbd_root):
89
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
90
+
91
+ split_f = os.path.join(sbd_root, image_set.rstrip("\n") + ".txt")
92
+
93
+ with open(os.path.join(split_f)) as fh:
94
+ file_names = [x.strip() for x in fh.readlines()]
95
+
96
+ self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
97
+ self.masks = [os.path.join(mask_dir, x + ".mat") for x in file_names]
98
+
99
+ self._get_target = self._get_segmentation_target if self.mode == "segmentation" else self._get_boundaries_target
100
+
101
+ def _get_segmentation_target(self, filepath: str) -> Image.Image:
102
+ mat = self._loadmat(filepath)
103
+ return Image.fromarray(mat["GTcls"][0]["Segmentation"][0])
104
+
105
+ def _get_boundaries_target(self, filepath: str) -> np.ndarray:
106
+ mat = self._loadmat(filepath)
107
+ return np.concatenate(
108
+ [np.expand_dims(mat["GTcls"][0]["Boundaries"][0][i][0].toarray(), axis=0) for i in range(self.num_classes)],
109
+ axis=0,
110
+ )
111
+
112
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
113
+ img = Image.open(self.images[index]).convert("RGB")
114
+ target = self._get_target(self.masks[index])
115
+
116
+ if self.transforms is not None:
117
+ img, target = self.transforms(img, target)
118
+
119
+ return img, target
120
+
121
+ def __len__(self) -> int:
122
+ return len(self.images)
123
+
124
+ def extra_repr(self) -> str:
125
+ lines = ["Image set: {image_set}", "Mode: {mode}"]
126
+ return "\n".join(lines).format(**self.__dict__)
vllm/lib/python3.10/site-packages/torchvision/datasets/sbu.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Any, Callable, Optional, Tuple, Union
4
+
5
+ from PIL import Image
6
+
7
+ from .utils import check_integrity, download_and_extract_archive, download_url
8
+ from .vision import VisionDataset
9
+
10
+
11
+ class SBU(VisionDataset):
12
+ """`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
13
+
14
+ Args:
15
+ root (str or ``pathlib.Path``): Root directory of dataset where tarball
16
+ ``SBUCaptionedPhotoDataset.tar.gz`` exists.
17
+ transform (callable, optional): A function/transform that takes in a PIL image
18
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
19
+ target_transform (callable, optional): A function/transform that takes in the
20
+ target and transforms it.
21
+ download (bool, optional): If True, downloads the dataset from the internet and
22
+ puts it in root directory. If dataset is already downloaded, it is not
23
+ downloaded again.
24
+ """
25
+
26
+ url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
27
+ filename = "SBUCaptionedPhotoDataset.tar.gz"
28
+ md5_checksum = "9aec147b3488753cf758b4d493422285"
29
+
30
+ def __init__(
31
+ self,
32
+ root: Union[str, Path],
33
+ transform: Optional[Callable] = None,
34
+ target_transform: Optional[Callable] = None,
35
+ download: bool = True,
36
+ ) -> None:
37
+ super().__init__(root, transform=transform, target_transform=target_transform)
38
+
39
+ if download:
40
+ self.download()
41
+
42
+ if not self._check_integrity():
43
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
44
+
45
+ # Read the caption for each photo
46
+ self.photos = []
47
+ self.captions = []
48
+
49
+ file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
50
+ file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
51
+
52
+ for line1, line2 in zip(open(file1), open(file2)):
53
+ url = line1.rstrip()
54
+ photo = os.path.basename(url)
55
+ filename = os.path.join(self.root, "dataset", photo)
56
+ if os.path.exists(filename):
57
+ caption = line2.rstrip()
58
+ self.photos.append(photo)
59
+ self.captions.append(caption)
60
+
61
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
62
+ """
63
+ Args:
64
+ index (int): Index
65
+
66
+ Returns:
67
+ tuple: (image, target) where target is a caption for the photo.
68
+ """
69
+ filename = os.path.join(self.root, "dataset", self.photos[index])
70
+ img = Image.open(filename).convert("RGB")
71
+ if self.transform is not None:
72
+ img = self.transform(img)
73
+
74
+ target = self.captions[index]
75
+ if self.target_transform is not None:
76
+ target = self.target_transform(target)
77
+
78
+ return img, target
79
+
80
+ def __len__(self) -> int:
81
+ """The number of photos in the dataset."""
82
+ return len(self.photos)
83
+
84
+ def _check_integrity(self) -> bool:
85
+ """Check the md5 checksum of the downloaded tarball."""
86
+ root = self.root
87
+ fpath = os.path.join(root, self.filename)
88
+ if not check_integrity(fpath, self.md5_checksum):
89
+ return False
90
+ return True
91
+
92
+ def download(self) -> None:
93
+ """Download and extract the tarball, and download each individual photo."""
94
+
95
+ if self._check_integrity():
96
+ print("Files already downloaded and verified")
97
+ return
98
+
99
+ download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
100
+
101
+ # Download individual photos
102
+ with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
103
+ for line in fh:
104
+ url = line.rstrip()
105
+ try:
106
+ download_url(url, os.path.join(self.root, "dataset"))
107
+ except OSError:
108
+ # The images point to public images on Flickr.
109
+ # Note: Images might be removed by users at anytime.
110
+ pass
vllm/lib/python3.10/site-packages/torchvision/datasets/semeion.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ from pathlib import Path
3
+ from typing import Any, Callable, Optional, Tuple, Union
4
+
5
+ import numpy as np
6
+ from PIL import Image
7
+
8
+ from .utils import check_integrity, download_url
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class SEMEION(VisionDataset):
13
+ r"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset.
14
+
15
+ Args:
16
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
17
+ ``semeion.py`` exists.
18
+ transform (callable, optional): A function/transform that takes in a PIL image
19
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
20
+ target_transform (callable, optional): A function/transform that takes in the
21
+ target and transforms it.
22
+ download (bool, optional): If true, downloads the dataset from the internet and
23
+ puts it in root directory. If dataset is already downloaded, it is not
24
+ downloaded again.
25
+
26
+ """
27
+ url = "http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
28
+ filename = "semeion.data"
29
+ md5_checksum = "cb545d371d2ce14ec121470795a77432"
30
+
31
+ def __init__(
32
+ self,
33
+ root: Union[str, Path],
34
+ transform: Optional[Callable] = None,
35
+ target_transform: Optional[Callable] = None,
36
+ download: bool = True,
37
+ ) -> None:
38
+ super().__init__(root, transform=transform, target_transform=target_transform)
39
+
40
+ if download:
41
+ self.download()
42
+
43
+ if not self._check_integrity():
44
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
45
+
46
+ fp = os.path.join(self.root, self.filename)
47
+ data = np.loadtxt(fp)
48
+ # convert value to 8 bit unsigned integer
49
+ # color (white #255) the pixels
50
+ self.data = (data[:, :256] * 255).astype("uint8")
51
+ self.data = np.reshape(self.data, (-1, 16, 16))
52
+ self.labels = np.nonzero(data[:, 256:])[1]
53
+
54
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
55
+ """
56
+ Args:
57
+ index (int): Index
58
+
59
+ Returns:
60
+ tuple: (image, target) where target is index of the target class.
61
+ """
62
+ img, target = self.data[index], int(self.labels[index])
63
+
64
+ # doing this so that it is consistent with all other datasets
65
+ # to return a PIL Image
66
+ img = Image.fromarray(img, mode="L")
67
+
68
+ if self.transform is not None:
69
+ img = self.transform(img)
70
+
71
+ if self.target_transform is not None:
72
+ target = self.target_transform(target)
73
+
74
+ return img, target
75
+
76
+ def __len__(self) -> int:
77
+ return len(self.data)
78
+
79
+ def _check_integrity(self) -> bool:
80
+ root = self.root
81
+ fpath = os.path.join(root, self.filename)
82
+ if not check_integrity(fpath, self.md5_checksum):
83
+ return False
84
+ return True
85
+
86
+ def download(self) -> None:
87
+ if self._check_integrity():
88
+ print("Files already downloaded and verified")
89
+ return
90
+
91
+ root = self.root
92
+ download_url(self.url, root, self.filename, self.md5_checksum)
vllm/lib/python3.10/site-packages/torchvision/datasets/stanford_cars.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pathlib
2
+ from typing import Any, Callable, Optional, Tuple, Union
3
+
4
+ from PIL import Image
5
+
6
+ from .utils import verify_str_arg
7
+ from .vision import VisionDataset
8
+
9
+
10
+ class StanfordCars(VisionDataset):
11
+ """Stanford Cars Dataset
12
+
13
+ The Cars dataset contains 16,185 images of 196 classes of cars. The data is
14
+ split into 8,144 training images and 8,041 testing images, where each class
15
+ has been split roughly in a 50-50 split
16
+
17
+ The original URL is https://ai.stanford.edu/~jkrause/cars/car_dataset.html, but it is broken.
18
+
19
+ .. note::
20
+
21
+ This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
22
+
23
+ Args:
24
+ root (str or ``pathlib.Path``): Root directory of dataset
25
+ split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
26
+ transform (callable, optional): A function/transform that takes in a PIL image
27
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
28
+ target_transform (callable, optional): A function/transform that takes in the
29
+ target and transforms it.
30
+ download (bool, optional): This parameter exists for backward compatibility but it does not
31
+ download the dataset, since the original URL is not available anymore. The dataset
32
+ seems to be available on Kaggle so you can try to manually download it using
33
+ `these instructions <https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616>`_.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ root: Union[str, pathlib.Path],
39
+ split: str = "train",
40
+ transform: Optional[Callable] = None,
41
+ target_transform: Optional[Callable] = None,
42
+ download: bool = False,
43
+ ) -> None:
44
+
45
+ try:
46
+ import scipy.io as sio
47
+ except ImportError:
48
+ raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
49
+
50
+ super().__init__(root, transform=transform, target_transform=target_transform)
51
+
52
+ self._split = verify_str_arg(split, "split", ("train", "test"))
53
+ self._base_folder = pathlib.Path(root) / "stanford_cars"
54
+ devkit = self._base_folder / "devkit"
55
+
56
+ if self._split == "train":
57
+ self._annotations_mat_path = devkit / "cars_train_annos.mat"
58
+ self._images_base_path = self._base_folder / "cars_train"
59
+ else:
60
+ self._annotations_mat_path = self._base_folder / "cars_test_annos_withlabels.mat"
61
+ self._images_base_path = self._base_folder / "cars_test"
62
+
63
+ if download:
64
+ self.download()
65
+
66
+ if not self._check_exists():
67
+ raise RuntimeError(
68
+ "Dataset not found. Try to manually download following the instructions in "
69
+ "https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616."
70
+ )
71
+
72
+ self._samples = [
73
+ (
74
+ str(self._images_base_path / annotation["fname"]),
75
+ annotation["class"] - 1, # Original target mapping starts from 1, hence -1
76
+ )
77
+ for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)["annotations"]
78
+ ]
79
+
80
+ self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)["class_names"].tolist()
81
+ self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
82
+
83
+ def __len__(self) -> int:
84
+ return len(self._samples)
85
+
86
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
87
+ """Returns pil_image and class_id for given index"""
88
+ image_path, target = self._samples[idx]
89
+ pil_image = Image.open(image_path).convert("RGB")
90
+
91
+ if self.transform is not None:
92
+ pil_image = self.transform(pil_image)
93
+ if self.target_transform is not None:
94
+ target = self.target_transform(target)
95
+ return pil_image, target
96
+
97
+ def _check_exists(self) -> bool:
98
+ if not (self._base_folder / "devkit").is_dir():
99
+ return False
100
+
101
+ return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
102
+
103
+ def download(self):
104
+ raise ValueError(
105
+ "The original URL is broken so the StanfordCars dataset is not available for automatic "
106
+ "download anymore. You can try to download it manually following "
107
+ "https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616, "
108
+ "and set download=False to avoid this error."
109
+ )
vllm/lib/python3.10/site-packages/torchvision/datasets/stl10.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ from pathlib import Path
3
+ from typing import Any, Callable, cast, Optional, Tuple, Union
4
+
5
+ import numpy as np
6
+ from PIL import Image
7
+
8
+ from .utils import check_integrity, download_and_extract_archive, verify_str_arg
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class STL10(VisionDataset):
13
+ """`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.
14
+
15
+ Args:
16
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
17
+ ``stl10_binary`` exists.
18
+ split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
19
+ Accordingly, dataset is selected.
20
+ folds (int, optional): One of {0-9} or None.
21
+ For training, loads one of the 10 pre-defined folds of 1k samples for the
22
+ standard evaluation procedure. If no value is passed, loads the 5k samples.
23
+ transform (callable, optional): A function/transform that takes in a PIL image
24
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
25
+ target_transform (callable, optional): A function/transform that takes in the
26
+ target and transforms it.
27
+ download (bool, optional): If true, downloads the dataset from the internet and
28
+ puts it in root directory. If dataset is already downloaded, it is not
29
+ downloaded again.
30
+ """
31
+
32
+ base_folder = "stl10_binary"
33
+ url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"
34
+ filename = "stl10_binary.tar.gz"
35
+ tgz_md5 = "91f7769df0f17e558f3565bffb0c7dfb"
36
+ class_names_file = "class_names.txt"
37
+ folds_list_file = "fold_indices.txt"
38
+ train_list = [
39
+ ["train_X.bin", "918c2871b30a85fa023e0c44e0bee87f"],
40
+ ["train_y.bin", "5a34089d4802c674881badbb80307741"],
41
+ ["unlabeled_X.bin", "5242ba1fed5e4be9e1e742405eb56ca4"],
42
+ ]
43
+
44
+ test_list = [["test_X.bin", "7f263ba9f9e0b06b93213547f721ac82"], ["test_y.bin", "36f9794fa4beb8a2c72628de14fa638e"]]
45
+ splits = ("train", "train+unlabeled", "unlabeled", "test")
46
+
47
+ def __init__(
48
+ self,
49
+ root: Union[str, Path],
50
+ split: str = "train",
51
+ folds: Optional[int] = None,
52
+ transform: Optional[Callable] = None,
53
+ target_transform: Optional[Callable] = None,
54
+ download: bool = False,
55
+ ) -> None:
56
+ super().__init__(root, transform=transform, target_transform=target_transform)
57
+ self.split = verify_str_arg(split, "split", self.splits)
58
+ self.folds = self._verify_folds(folds)
59
+
60
+ if download:
61
+ self.download()
62
+ elif not self._check_integrity():
63
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
64
+
65
+ # now load the picked numpy arrays
66
+ self.labels: Optional[np.ndarray]
67
+ if self.split == "train":
68
+ self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
69
+ self.labels = cast(np.ndarray, self.labels)
70
+ self.__load_folds(folds)
71
+
72
+ elif self.split == "train+unlabeled":
73
+ self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
74
+ self.labels = cast(np.ndarray, self.labels)
75
+ self.__load_folds(folds)
76
+ unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
77
+ self.data = np.concatenate((self.data, unlabeled_data))
78
+ self.labels = np.concatenate((self.labels, np.asarray([-1] * unlabeled_data.shape[0])))
79
+
80
+ elif self.split == "unlabeled":
81
+ self.data, _ = self.__loadfile(self.train_list[2][0])
82
+ self.labels = np.asarray([-1] * self.data.shape[0])
83
+ else: # self.split == 'test':
84
+ self.data, self.labels = self.__loadfile(self.test_list[0][0], self.test_list[1][0])
85
+
86
+ class_file = os.path.join(self.root, self.base_folder, self.class_names_file)
87
+ if os.path.isfile(class_file):
88
+ with open(class_file) as f:
89
+ self.classes = f.read().splitlines()
90
+
91
+ def _verify_folds(self, folds: Optional[int]) -> Optional[int]:
92
+ if folds is None:
93
+ return folds
94
+ elif isinstance(folds, int):
95
+ if folds in range(10):
96
+ return folds
97
+ msg = "Value for argument folds should be in the range [0, 10), but got {}."
98
+ raise ValueError(msg.format(folds))
99
+ else:
100
+ msg = "Expected type None or int for argument folds, but got type {}."
101
+ raise ValueError(msg.format(type(folds)))
102
+
103
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
104
+ """
105
+ Args:
106
+ index (int): Index
107
+
108
+ Returns:
109
+ tuple: (image, target) where target is index of the target class.
110
+ """
111
+ target: Optional[int]
112
+ if self.labels is not None:
113
+ img, target = self.data[index], int(self.labels[index])
114
+ else:
115
+ img, target = self.data[index], None
116
+
117
+ # doing this so that it is consistent with all other datasets
118
+ # to return a PIL Image
119
+ img = Image.fromarray(np.transpose(img, (1, 2, 0)))
120
+
121
+ if self.transform is not None:
122
+ img = self.transform(img)
123
+
124
+ if self.target_transform is not None:
125
+ target = self.target_transform(target)
126
+
127
+ return img, target
128
+
129
+ def __len__(self) -> int:
130
+ return self.data.shape[0]
131
+
132
+ def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
133
+ labels = None
134
+ if labels_file:
135
+ path_to_labels = os.path.join(self.root, self.base_folder, labels_file)
136
+ with open(path_to_labels, "rb") as f:
137
+ labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based
138
+
139
+ path_to_data = os.path.join(self.root, self.base_folder, data_file)
140
+ with open(path_to_data, "rb") as f:
141
+ # read whole file in uint8 chunks
142
+ everything = np.fromfile(f, dtype=np.uint8)
143
+ images = np.reshape(everything, (-1, 3, 96, 96))
144
+ images = np.transpose(images, (0, 1, 3, 2))
145
+
146
+ return images, labels
147
+
148
+ def _check_integrity(self) -> bool:
149
+ for filename, md5 in self.train_list + self.test_list:
150
+ fpath = os.path.join(self.root, self.base_folder, filename)
151
+ if not check_integrity(fpath, md5):
152
+ return False
153
+ return True
154
+
155
+ def download(self) -> None:
156
+ if self._check_integrity():
157
+ print("Files already downloaded and verified")
158
+ return
159
+ download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
160
+ self._check_integrity()
161
+
162
+ def extra_repr(self) -> str:
163
+ return "Split: {split}".format(**self.__dict__)
164
+
165
+ def __load_folds(self, folds: Optional[int]) -> None:
166
+ # loads one of the folds if specified
167
+ if folds is None:
168
+ return
169
+ path_to_folds = os.path.join(self.root, self.base_folder, self.folds_list_file)
170
+ with open(path_to_folds) as f:
171
+ str_idx = f.read().splitlines()[folds]
172
+ list_idx = np.fromstring(str_idx, dtype=np.int64, sep=" ")
173
+ self.data = self.data[list_idx, :, :, :]
174
+ if self.labels is not None:
175
+ self.labels = self.labels[list_idx]
vllm/lib/python3.10/site-packages/torchvision/datasets/sun397.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Any, Callable, Optional, Tuple, Union
3
+
4
+ import PIL.Image
5
+
6
+ from .utils import download_and_extract_archive
7
+ from .vision import VisionDataset
8
+
9
+
10
+ class SUN397(VisionDataset):
11
+ """`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
12
+
13
+ The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
14
+ 397 categories with 108'754 images.
15
+
16
+ Args:
17
+ root (str or ``pathlib.Path``): Root directory of the dataset.
18
+ transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
19
+ version. E.g, ``transforms.RandomCrop``.
20
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
21
+ download (bool, optional): If true, downloads the dataset from the internet and
22
+ puts it in root directory. If dataset is already downloaded, it is not
23
+ downloaded again.
24
+ """
25
+
26
+ _DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
27
+ _DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
28
+
29
+ def __init__(
30
+ self,
31
+ root: Union[str, Path],
32
+ transform: Optional[Callable] = None,
33
+ target_transform: Optional[Callable] = None,
34
+ download: bool = False,
35
+ ) -> None:
36
+ super().__init__(root, transform=transform, target_transform=target_transform)
37
+ self._data_dir = Path(self.root) / "SUN397"
38
+
39
+ if download:
40
+ self._download()
41
+
42
+ if not self._check_exists():
43
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
44
+
45
+ with open(self._data_dir / "ClassName.txt") as f:
46
+ self.classes = [c[3:].strip() for c in f]
47
+
48
+ self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
49
+ self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
50
+
51
+ self._labels = [
52
+ self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
53
+ ]
54
+
55
+ def __len__(self) -> int:
56
+ return len(self._image_files)
57
+
58
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
59
+ image_file, label = self._image_files[idx], self._labels[idx]
60
+ image = PIL.Image.open(image_file).convert("RGB")
61
+
62
+ if self.transform:
63
+ image = self.transform(image)
64
+
65
+ if self.target_transform:
66
+ label = self.target_transform(label)
67
+
68
+ return image, label
69
+
70
+ def _check_exists(self) -> bool:
71
+ return self._data_dir.is_dir()
72
+
73
+ def _download(self) -> None:
74
+ if self._check_exists():
75
+ return
76
+ download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
vllm/lib/python3.10/site-packages/torchvision/datasets/ucf101.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
4
+
5
+ from torch import Tensor
6
+
7
+ from .folder import find_classes, make_dataset
8
+ from .video_utils import VideoClips
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class UCF101(VisionDataset):
13
+ """
14
+ `UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.
15
+
16
+ UCF101 is an action recognition video dataset.
17
+ This dataset consider every video as a collection of video clips of fixed size, specified
18
+ by ``frames_per_clip``, where the step in frames between each clip is given by
19
+ ``step_between_clips``. The dataset itself can be downloaded from the dataset website;
20
+ annotations that ``annotation_path`` should be pointing to can be downloaded from `here
21
+ <https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip>`_.
22
+
23
+ To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
24
+ and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
25
+ elements will come from video 1, and the next three elements from video 2.
26
+ Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
27
+ frames in a video might be present.
28
+
29
+ Internally, it uses a VideoClips object to handle clip creation.
30
+
31
+ Args:
32
+ root (str or ``pathlib.Path``): Root directory of the UCF101 Dataset.
33
+ annotation_path (str): path to the folder containing the split files;
34
+ see docstring above for download instructions of these files
35
+ frames_per_clip (int): number of frames in a clip.
36
+ step_between_clips (int, optional): number of frames between each clip.
37
+ fold (int, optional): which fold to use. Should be between 1 and 3.
38
+ train (bool, optional): if ``True``, creates a dataset from the train split,
39
+ otherwise from the ``test`` split.
40
+ transform (callable, optional): A function/transform that takes in a TxHxWxC video
41
+ and returns a transformed version.
42
+ output_format (str, optional): The format of the output video tensors (before transforms).
43
+ Can be either "THWC" (default) or "TCHW".
44
+
45
+ Returns:
46
+ tuple: A 3-tuple with the following entries:
47
+
48
+ - video (Tensor[T, H, W, C] or Tensor[T, C, H, W]): The `T` video frames
49
+ - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
50
+ and `L` is the number of points
51
+ - label (int): class of the video clip
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ root: Union[str, Path],
57
+ annotation_path: str,
58
+ frames_per_clip: int,
59
+ step_between_clips: int = 1,
60
+ frame_rate: Optional[int] = None,
61
+ fold: int = 1,
62
+ train: bool = True,
63
+ transform: Optional[Callable] = None,
64
+ _precomputed_metadata: Optional[Dict[str, Any]] = None,
65
+ num_workers: int = 1,
66
+ _video_width: int = 0,
67
+ _video_height: int = 0,
68
+ _video_min_dimension: int = 0,
69
+ _audio_samples: int = 0,
70
+ output_format: str = "THWC",
71
+ ) -> None:
72
+ super().__init__(root)
73
+ if not 1 <= fold <= 3:
74
+ raise ValueError(f"fold should be between 1 and 3, got {fold}")
75
+
76
+ extensions = ("avi",)
77
+ self.fold = fold
78
+ self.train = train
79
+
80
+ self.classes, class_to_idx = find_classes(self.root)
81
+ self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
82
+ video_list = [x[0] for x in self.samples]
83
+ video_clips = VideoClips(
84
+ video_list,
85
+ frames_per_clip,
86
+ step_between_clips,
87
+ frame_rate,
88
+ _precomputed_metadata,
89
+ num_workers=num_workers,
90
+ _video_width=_video_width,
91
+ _video_height=_video_height,
92
+ _video_min_dimension=_video_min_dimension,
93
+ _audio_samples=_audio_samples,
94
+ output_format=output_format,
95
+ )
96
+ # we bookkeep the full version of video clips because we want to be able
97
+ # to return the metadata of full version rather than the subset version of
98
+ # video clips
99
+ self.full_video_clips = video_clips
100
+ self.indices = self._select_fold(video_list, annotation_path, fold, train)
101
+ self.video_clips = video_clips.subset(self.indices)
102
+ self.transform = transform
103
+
104
+ @property
105
+ def metadata(self) -> Dict[str, Any]:
106
+ return self.full_video_clips.metadata
107
+
108
+ def _select_fold(self, video_list: List[str], annotation_path: str, fold: int, train: bool) -> List[int]:
109
+ name = "train" if train else "test"
110
+ name = f"{name}list{fold:02d}.txt"
111
+ f = os.path.join(annotation_path, name)
112
+ selected_files = set()
113
+ with open(f) as fid:
114
+ data = fid.readlines()
115
+ data = [x.strip().split(" ")[0] for x in data]
116
+ data = [os.path.join(self.root, *x.split("/")) for x in data]
117
+ selected_files.update(data)
118
+ indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]
119
+ return indices
120
+
121
+ def __len__(self) -> int:
122
+ return self.video_clips.num_clips()
123
+
124
+ def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
125
+ video, audio, info, video_idx = self.video_clips.get_clip(idx)
126
+ label = self.samples[self.indices[video_idx]][1]
127
+
128
+ if self.transform is not None:
129
+ video = self.transform(video)
130
+
131
+ return video, audio, label
vllm/lib/python3.10/site-packages/torchvision/datasets/usps.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Any, Callable, Optional, Tuple, Union
4
+
5
+ import numpy as np
6
+ from PIL import Image
7
+
8
+ from .utils import download_url
9
+ from .vision import VisionDataset
10
+
11
+
12
+ class USPS(VisionDataset):
13
+ """`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset.
14
+ The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``.
15
+ The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]``
16
+ and make pixel values in ``[0, 255]``.
17
+
18
+ Args:
19
+ root (str or ``pathlib.Path``): Root directory of dataset to store``USPS`` data files.
20
+ train (bool, optional): If True, creates dataset from ``usps.bz2``,
21
+ otherwise from ``usps.t.bz2``.
22
+ transform (callable, optional): A function/transform that takes in a PIL image
23
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
24
+ target_transform (callable, optional): A function/transform that takes in the
25
+ target and transforms it.
26
+ download (bool, optional): If true, downloads the dataset from the internet and
27
+ puts it in root directory. If dataset is already downloaded, it is not
28
+ downloaded again.
29
+
30
+ """
31
+
32
+ split_list = {
33
+ "train": [
34
+ "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2",
35
+ "usps.bz2",
36
+ "ec16c51db3855ca6c91edd34d0e9b197",
37
+ ],
38
+ "test": [
39
+ "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2",
40
+ "usps.t.bz2",
41
+ "8ea070ee2aca1ac39742fdd1ef5ed118",
42
+ ],
43
+ }
44
+
45
+ def __init__(
46
+ self,
47
+ root: Union[str, Path],
48
+ train: bool = True,
49
+ transform: Optional[Callable] = None,
50
+ target_transform: Optional[Callable] = None,
51
+ download: bool = False,
52
+ ) -> None:
53
+ super().__init__(root, transform=transform, target_transform=target_transform)
54
+ split = "train" if train else "test"
55
+ url, filename, checksum = self.split_list[split]
56
+ full_path = os.path.join(self.root, filename)
57
+
58
+ if download and not os.path.exists(full_path):
59
+ download_url(url, self.root, filename, md5=checksum)
60
+
61
+ import bz2
62
+
63
+ with bz2.open(full_path) as fp:
64
+ raw_data = [line.decode().split() for line in fp.readlines()]
65
+ tmp_list = [[x.split(":")[-1] for x in data[1:]] for data in raw_data]
66
+ imgs = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16))
67
+ imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8)
68
+ targets = [int(d[0]) - 1 for d in raw_data]
69
+
70
+ self.data = imgs
71
+ self.targets = targets
72
+
73
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
74
+ """
75
+ Args:
76
+ index (int): Index
77
+
78
+ Returns:
79
+ tuple: (image, target) where target is index of the target class.
80
+ """
81
+ img, target = self.data[index], int(self.targets[index])
82
+
83
+ # doing this so that it is consistent with all other datasets
84
+ # to return a PIL Image
85
+ img = Image.fromarray(img, mode="L")
86
+
87
+ if self.transform is not None:
88
+ img = self.transform(img)
89
+
90
+ if self.target_transform is not None:
91
+ target = self.target_transform(target)
92
+
93
+ return img, target
94
+
95
+ def __len__(self) -> int:
96
+ return len(self.data)
vllm/lib/python3.10/site-packages/torchvision/datasets/utils.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bz2
2
+ import gzip
3
+ import hashlib
4
+ import lzma
5
+ import os
6
+ import os.path
7
+ import pathlib
8
+ import re
9
+ import sys
10
+ import tarfile
11
+ import urllib
12
+ import urllib.error
13
+ import urllib.request
14
+ import zipfile
15
+ from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Tuple, TypeVar, Union
16
+ from urllib.parse import urlparse
17
+
18
+ import numpy as np
19
+ import torch
20
+ from torch.utils.model_zoo import tqdm
21
+
22
+ from .._internally_replaced_utils import _download_file_from_remote_location, _is_remote_location_available
23
+
24
+ USER_AGENT = "pytorch/vision"
25
+
26
+
27
+ def _urlretrieve(url: str, filename: Union[str, pathlib.Path], chunk_size: int = 1024 * 32) -> None:
28
+ with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response:
29
+ with open(filename, "wb") as fh, tqdm(total=response.length, unit="B", unit_scale=True) as pbar:
30
+ while chunk := response.read(chunk_size):
31
+ fh.write(chunk)
32
+ pbar.update(len(chunk))
33
+
34
+
35
+ def calculate_md5(fpath: Union[str, pathlib.Path], chunk_size: int = 1024 * 1024) -> str:
36
+ # Setting the `usedforsecurity` flag does not change anything about the functionality, but indicates that we are
37
+ # not using the MD5 checksum for cryptography. This enables its usage in restricted environments like FIPS. Without
38
+ # it torchvision.datasets is unusable in these environments since we perform a MD5 check everywhere.
39
+ if sys.version_info >= (3, 9):
40
+ md5 = hashlib.md5(usedforsecurity=False)
41
+ else:
42
+ md5 = hashlib.md5()
43
+ with open(fpath, "rb") as f:
44
+ while chunk := f.read(chunk_size):
45
+ md5.update(chunk)
46
+ return md5.hexdigest()
47
+
48
+
49
+ def check_md5(fpath: Union[str, pathlib.Path], md5: str, **kwargs: Any) -> bool:
50
+ return md5 == calculate_md5(fpath, **kwargs)
51
+
52
+
53
+ def check_integrity(fpath: Union[str, pathlib.Path], md5: Optional[str] = None) -> bool:
54
+ if not os.path.isfile(fpath):
55
+ return False
56
+ if md5 is None:
57
+ return True
58
+ return check_md5(fpath, md5)
59
+
60
+
61
+ def _get_redirect_url(url: str, max_hops: int = 3) -> str:
62
+ initial_url = url
63
+ headers = {"Method": "HEAD", "User-Agent": USER_AGENT}
64
+
65
+ for _ in range(max_hops + 1):
66
+ with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response:
67
+ if response.url == url or response.url is None:
68
+ return url
69
+
70
+ url = response.url
71
+ else:
72
+ raise RecursionError(
73
+ f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}."
74
+ )
75
+
76
+
77
+ def _get_google_drive_file_id(url: str) -> Optional[str]:
78
+ parts = urlparse(url)
79
+
80
+ if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
81
+ return None
82
+
83
+ match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
84
+ if match is None:
85
+ return None
86
+
87
+ return match.group("id")
88
+
89
+
90
+ def download_url(
91
+ url: str,
92
+ root: Union[str, pathlib.Path],
93
+ filename: Optional[Union[str, pathlib.Path]] = None,
94
+ md5: Optional[str] = None,
95
+ max_redirect_hops: int = 3,
96
+ ) -> None:
97
+ """Download a file from a url and place it in root.
98
+
99
+ Args:
100
+ url (str): URL to download file from
101
+ root (str): Directory to place downloaded file in
102
+ filename (str, optional): Name to save the file under. If None, use the basename of the URL
103
+ md5 (str, optional): MD5 checksum of the download. If None, do not check
104
+ max_redirect_hops (int, optional): Maximum number of redirect hops allowed
105
+ """
106
+ root = os.path.expanduser(root)
107
+ if not filename:
108
+ filename = os.path.basename(url)
109
+ fpath = os.fspath(os.path.join(root, filename))
110
+
111
+ os.makedirs(root, exist_ok=True)
112
+
113
+ # check if file is already present locally
114
+ if check_integrity(fpath, md5):
115
+ print("Using downloaded and verified file: " + fpath)
116
+ return
117
+
118
+ if _is_remote_location_available():
119
+ _download_file_from_remote_location(fpath, url)
120
+ else:
121
+ # expand redirect chain if needed
122
+ url = _get_redirect_url(url, max_hops=max_redirect_hops)
123
+
124
+ # check if file is located on Google Drive
125
+ file_id = _get_google_drive_file_id(url)
126
+ if file_id is not None:
127
+ return download_file_from_google_drive(file_id, root, filename, md5)
128
+
129
+ # download the file
130
+ try:
131
+ print("Downloading " + url + " to " + fpath)
132
+ _urlretrieve(url, fpath)
133
+ except (urllib.error.URLError, OSError) as e: # type: ignore[attr-defined]
134
+ if url[:5] == "https":
135
+ url = url.replace("https:", "http:")
136
+ print("Failed download. Trying https -> http instead. Downloading " + url + " to " + fpath)
137
+ _urlretrieve(url, fpath)
138
+ else:
139
+ raise e
140
+
141
+ # check integrity of downloaded file
142
+ if not check_integrity(fpath, md5):
143
+ raise RuntimeError("File not found or corrupted.")
144
+
145
+
146
+ def list_dir(root: Union[str, pathlib.Path], prefix: bool = False) -> List[str]:
147
+ """List all directories at a given root
148
+
149
+ Args:
150
+ root (str): Path to directory whose folders need to be listed
151
+ prefix (bool, optional): If true, prepends the path to each result, otherwise
152
+ only returns the name of the directories found
153
+ """
154
+ root = os.path.expanduser(root)
155
+ directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
156
+ if prefix is True:
157
+ directories = [os.path.join(root, d) for d in directories]
158
+ return directories
159
+
160
+
161
+ def list_files(root: Union[str, pathlib.Path], suffix: str, prefix: bool = False) -> List[str]:
162
+ """List all files ending with a suffix at a given root
163
+
164
+ Args:
165
+ root (str): Path to directory whose folders need to be listed
166
+ suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
167
+ It uses the Python "str.endswith" method and is passed directly
168
+ prefix (bool, optional): If true, prepends the path to each result, otherwise
169
+ only returns the name of the files found
170
+ """
171
+ root = os.path.expanduser(root)
172
+ files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
173
+ if prefix is True:
174
+ files = [os.path.join(root, d) for d in files]
175
+ return files
176
+
177
+
178
+ def download_file_from_google_drive(
179
+ file_id: str,
180
+ root: Union[str, pathlib.Path],
181
+ filename: Optional[Union[str, pathlib.Path]] = None,
182
+ md5: Optional[str] = None,
183
+ ):
184
+ """Download a Google Drive file from and place it in root.
185
+
186
+ Args:
187
+ file_id (str): id of file to be downloaded
188
+ root (str): Directory to place downloaded file in
189
+ filename (str, optional): Name to save the file under. If None, use the id of the file.
190
+ md5 (str, optional): MD5 checksum of the download. If None, do not check
191
+ """
192
+ try:
193
+ import gdown
194
+ except ModuleNotFoundError:
195
+ raise RuntimeError(
196
+ "To download files from GDrive, 'gdown' is required. You can install it with 'pip install gdown'."
197
+ )
198
+
199
+ root = os.path.expanduser(root)
200
+ if not filename:
201
+ filename = file_id
202
+ fpath = os.fspath(os.path.join(root, filename))
203
+
204
+ os.makedirs(root, exist_ok=True)
205
+
206
+ if check_integrity(fpath, md5):
207
+ print(f"Using downloaded {'and verified ' if md5 else ''}file: {fpath}")
208
+ return
209
+
210
+ gdown.download(id=file_id, output=fpath, quiet=False, user_agent=USER_AGENT)
211
+
212
+ if not check_integrity(fpath, md5):
213
+ raise RuntimeError("File not found or corrupted.")
214
+
215
+
216
+ def _extract_tar(
217
+ from_path: Union[str, pathlib.Path], to_path: Union[str, pathlib.Path], compression: Optional[str]
218
+ ) -> None:
219
+ with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar:
220
+ tar.extractall(to_path)
221
+
222
+
223
+ _ZIP_COMPRESSION_MAP: Dict[str, int] = {
224
+ ".bz2": zipfile.ZIP_BZIP2,
225
+ ".xz": zipfile.ZIP_LZMA,
226
+ }
227
+
228
+
229
+ def _extract_zip(
230
+ from_path: Union[str, pathlib.Path], to_path: Union[str, pathlib.Path], compression: Optional[str]
231
+ ) -> None:
232
+ with zipfile.ZipFile(
233
+ from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED
234
+ ) as zip:
235
+ zip.extractall(to_path)
236
+
237
+
238
+ _ARCHIVE_EXTRACTORS: Dict[str, Callable[[Union[str, pathlib.Path], Union[str, pathlib.Path], Optional[str]], None]] = {
239
+ ".tar": _extract_tar,
240
+ ".zip": _extract_zip,
241
+ }
242
+ _COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {
243
+ ".bz2": bz2.open,
244
+ ".gz": gzip.open,
245
+ ".xz": lzma.open,
246
+ }
247
+ _FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {
248
+ ".tbz": (".tar", ".bz2"),
249
+ ".tbz2": (".tar", ".bz2"),
250
+ ".tgz": (".tar", ".gz"),
251
+ }
252
+
253
+
254
+ def _detect_file_type(file: Union[str, pathlib.Path]) -> Tuple[str, Optional[str], Optional[str]]:
255
+ """Detect the archive type and/or compression of a file.
256
+
257
+ Args:
258
+ file (str): the filename
259
+
260
+ Returns:
261
+ (tuple): tuple of suffix, archive type, and compression
262
+
263
+ Raises:
264
+ RuntimeError: if file has no suffix or suffix is not supported
265
+ """
266
+ suffixes = pathlib.Path(file).suffixes
267
+ if not suffixes:
268
+ raise RuntimeError(
269
+ f"File '{file}' has no suffixes that could be used to detect the archive type and compression."
270
+ )
271
+ suffix = suffixes[-1]
272
+
273
+ # check if the suffix is a known alias
274
+ if suffix in _FILE_TYPE_ALIASES:
275
+ return (suffix, *_FILE_TYPE_ALIASES[suffix])
276
+
277
+ # check if the suffix is an archive type
278
+ if suffix in _ARCHIVE_EXTRACTORS:
279
+ return suffix, suffix, None
280
+
281
+ # check if the suffix is a compression
282
+ if suffix in _COMPRESSED_FILE_OPENERS:
283
+ # check for suffix hierarchy
284
+ if len(suffixes) > 1:
285
+ suffix2 = suffixes[-2]
286
+
287
+ # check if the suffix2 is an archive type
288
+ if suffix2 in _ARCHIVE_EXTRACTORS:
289
+ return suffix2 + suffix, suffix2, suffix
290
+
291
+ return suffix, None, suffix
292
+
293
+ valid_suffixes = sorted(set(_FILE_TYPE_ALIASES) | set(_ARCHIVE_EXTRACTORS) | set(_COMPRESSED_FILE_OPENERS))
294
+ raise RuntimeError(f"Unknown compression or archive type: '{suffix}'.\nKnown suffixes are: '{valid_suffixes}'.")
295
+
296
+
297
+ def _decompress(
298
+ from_path: Union[str, pathlib.Path],
299
+ to_path: Optional[Union[str, pathlib.Path]] = None,
300
+ remove_finished: bool = False,
301
+ ) -> pathlib.Path:
302
+ r"""Decompress a file.
303
+
304
+ The compression is automatically detected from the file name.
305
+
306
+ Args:
307
+ from_path (str): Path to the file to be decompressed.
308
+ to_path (str): Path to the decompressed file. If omitted, ``from_path`` without compression extension is used.
309
+ remove_finished (bool): If ``True``, remove the file after the extraction.
310
+
311
+ Returns:
312
+ (str): Path to the decompressed file.
313
+ """
314
+ suffix, archive_type, compression = _detect_file_type(from_path)
315
+ if not compression:
316
+ raise RuntimeError(f"Couldn't detect a compression from suffix {suffix}.")
317
+
318
+ if to_path is None:
319
+ to_path = pathlib.Path(os.fspath(from_path).replace(suffix, archive_type if archive_type is not None else ""))
320
+
321
+ # We don't need to check for a missing key here, since this was already done in _detect_file_type()
322
+ compressed_file_opener = _COMPRESSED_FILE_OPENERS[compression]
323
+
324
+ with compressed_file_opener(from_path, "rb") as rfh, open(to_path, "wb") as wfh:
325
+ wfh.write(rfh.read())
326
+
327
+ if remove_finished:
328
+ os.remove(from_path)
329
+
330
+ return pathlib.Path(to_path)
331
+
332
+
333
+ def extract_archive(
334
+ from_path: Union[str, pathlib.Path],
335
+ to_path: Optional[Union[str, pathlib.Path]] = None,
336
+ remove_finished: bool = False,
337
+ ) -> Union[str, pathlib.Path]:
338
+ """Extract an archive.
339
+
340
+ The archive type and a possible compression is automatically detected from the file name. If the file is compressed
341
+ but not an archive the call is dispatched to :func:`decompress`.
342
+
343
+ Args:
344
+ from_path (str): Path to the file to be extracted.
345
+ to_path (str): Path to the directory the file will be extracted to. If omitted, the directory of the file is
346
+ used.
347
+ remove_finished (bool): If ``True``, remove the file after the extraction.
348
+
349
+ Returns:
350
+ (str): Path to the directory the file was extracted to.
351
+ """
352
+
353
+ def path_or_str(ret_path: pathlib.Path) -> Union[str, pathlib.Path]:
354
+ if isinstance(from_path, str):
355
+ return os.fspath(ret_path)
356
+ else:
357
+ return ret_path
358
+
359
+ if to_path is None:
360
+ to_path = os.path.dirname(from_path)
361
+
362
+ suffix, archive_type, compression = _detect_file_type(from_path)
363
+ if not archive_type:
364
+ ret_path = _decompress(
365
+ from_path,
366
+ os.path.join(to_path, os.path.basename(from_path).replace(suffix, "")),
367
+ remove_finished=remove_finished,
368
+ )
369
+ return path_or_str(ret_path)
370
+
371
+ # We don't need to check for a missing key here, since this was already done in _detect_file_type()
372
+ extractor = _ARCHIVE_EXTRACTORS[archive_type]
373
+
374
+ extractor(from_path, to_path, compression)
375
+ if remove_finished:
376
+ os.remove(from_path)
377
+
378
+ return path_or_str(pathlib.Path(to_path))
379
+
380
+
381
+ def download_and_extract_archive(
382
+ url: str,
383
+ download_root: Union[str, pathlib.Path],
384
+ extract_root: Optional[Union[str, pathlib.Path]] = None,
385
+ filename: Optional[Union[str, pathlib.Path]] = None,
386
+ md5: Optional[str] = None,
387
+ remove_finished: bool = False,
388
+ ) -> None:
389
+ download_root = os.path.expanduser(download_root)
390
+ if extract_root is None:
391
+ extract_root = download_root
392
+ if not filename:
393
+ filename = os.path.basename(url)
394
+
395
+ download_url(url, download_root, filename, md5)
396
+
397
+ archive = os.path.join(download_root, filename)
398
+ print(f"Extracting {archive} to {extract_root}")
399
+ extract_archive(archive, extract_root, remove_finished)
400
+
401
+
402
+ def iterable_to_str(iterable: Iterable) -> str:
403
+ return "'" + "', '".join([str(item) for item in iterable]) + "'"
404
+
405
+
406
+ T = TypeVar("T", str, bytes)
407
+
408
+
409
+ def verify_str_arg(
410
+ value: T,
411
+ arg: Optional[str] = None,
412
+ valid_values: Optional[Iterable[T]] = None,
413
+ custom_msg: Optional[str] = None,
414
+ ) -> T:
415
+ if not isinstance(value, str):
416
+ if arg is None:
417
+ msg = "Expected type str, but got type {type}."
418
+ else:
419
+ msg = "Expected type str for argument {arg}, but got type {type}."
420
+ msg = msg.format(type=type(value), arg=arg)
421
+ raise ValueError(msg)
422
+
423
+ if valid_values is None:
424
+ return value
425
+
426
+ if value not in valid_values:
427
+ if custom_msg is not None:
428
+ msg = custom_msg
429
+ else:
430
+ msg = "Unknown value '{value}' for argument {arg}. Valid values are {{{valid_values}}}."
431
+ msg = msg.format(value=value, arg=arg, valid_values=iterable_to_str(valid_values))
432
+ raise ValueError(msg)
433
+
434
+ return value
435
+
436
+
437
+ def _read_pfm(file_name: Union[str, pathlib.Path], slice_channels: int = 2) -> np.ndarray:
438
+ """Read file in .pfm format. Might contain either 1 or 3 channels of data.
439
+
440
+ Args:
441
+ file_name (str): Path to the file.
442
+ slice_channels (int): Number of channels to slice out of the file.
443
+ Useful for reading different data formats stored in .pfm files: Optical Flows, Stereo Disparity Maps, etc.
444
+ """
445
+
446
+ with open(file_name, "rb") as f:
447
+ header = f.readline().rstrip()
448
+ if header not in [b"PF", b"Pf"]:
449
+ raise ValueError("Invalid PFM file")
450
+
451
+ dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
452
+ if not dim_match:
453
+ raise Exception("Malformed PFM header.")
454
+ w, h = (int(dim) for dim in dim_match.groups())
455
+
456
+ scale = float(f.readline().rstrip())
457
+ if scale < 0: # little-endian
458
+ endian = "<"
459
+ scale = -scale
460
+ else:
461
+ endian = ">" # big-endian
462
+
463
+ data = np.fromfile(f, dtype=endian + "f")
464
+
465
+ pfm_channels = 3 if header == b"PF" else 1
466
+
467
+ data = data.reshape(h, w, pfm_channels).transpose(2, 0, 1)
468
+ data = np.flip(data, axis=1) # flip on h dimension
469
+ data = data[:slice_channels, :, :]
470
+ return data.astype(np.float32)
471
+
472
+
473
+ def _flip_byte_order(t: torch.Tensor) -> torch.Tensor:
474
+ return (
475
+ t.contiguous().view(torch.uint8).view(*t.shape, t.element_size()).flip(-1).view(*t.shape[:-1], -1).view(t.dtype)
476
+ )
vllm/lib/python3.10/site-packages/torchvision/datasets/video_utils.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bisect
2
+ import math
3
+ import warnings
4
+ from fractions import Fraction
5
+ from typing import Any, Callable, cast, Dict, List, Optional, Tuple, TypeVar, Union
6
+
7
+ import torch
8
+ from torchvision.io import _probe_video_from_file, _read_video_from_file, read_video, read_video_timestamps
9
+
10
+ from .utils import tqdm
11
+
12
+ T = TypeVar("T")
13
+
14
+
15
+ def pts_convert(pts: int, timebase_from: Fraction, timebase_to: Fraction, round_func: Callable = math.floor) -> int:
16
+ """convert pts between different time bases
17
+ Args:
18
+ pts: presentation timestamp, float
19
+ timebase_from: original timebase. Fraction
20
+ timebase_to: new timebase. Fraction
21
+ round_func: rounding function.
22
+ """
23
+ new_pts = Fraction(pts, 1) * timebase_from / timebase_to
24
+ return round_func(new_pts)
25
+
26
+
27
+ def unfold(tensor: torch.Tensor, size: int, step: int, dilation: int = 1) -> torch.Tensor:
28
+ """
29
+ similar to tensor.unfold, but with the dilation
30
+ and specialized for 1d tensors
31
+
32
+ Returns all consecutive windows of `size` elements, with
33
+ `step` between windows. The distance between each element
34
+ in a window is given by `dilation`.
35
+ """
36
+ if tensor.dim() != 1:
37
+ raise ValueError(f"tensor should have 1 dimension instead of {tensor.dim()}")
38
+ o_stride = tensor.stride(0)
39
+ numel = tensor.numel()
40
+ new_stride = (step * o_stride, dilation * o_stride)
41
+ new_size = ((numel - (dilation * (size - 1) + 1)) // step + 1, size)
42
+ if new_size[0] < 1:
43
+ new_size = (0, size)
44
+ return torch.as_strided(tensor, new_size, new_stride)
45
+
46
+
47
+ class _VideoTimestampsDataset:
48
+ """
49
+ Dataset used to parallelize the reading of the timestamps
50
+ of a list of videos, given their paths in the filesystem.
51
+
52
+ Used in VideoClips and defined at top level, so it can be
53
+ pickled when forking.
54
+ """
55
+
56
+ def __init__(self, video_paths: List[str]) -> None:
57
+ self.video_paths = video_paths
58
+
59
+ def __len__(self) -> int:
60
+ return len(self.video_paths)
61
+
62
+ def __getitem__(self, idx: int) -> Tuple[List[int], Optional[float]]:
63
+ return read_video_timestamps(self.video_paths[idx])
64
+
65
+
66
+ def _collate_fn(x: T) -> T:
67
+ """
68
+ Dummy collate function to be used with _VideoTimestampsDataset
69
+ """
70
+ return x
71
+
72
+
73
+ class VideoClips:
74
+ """
75
+ Given a list of video files, computes all consecutive subvideos of size
76
+ `clip_length_in_frames`, where the distance between each subvideo in the
77
+ same video is defined by `frames_between_clips`.
78
+ If `frame_rate` is specified, it will also resample all the videos to have
79
+ the same frame rate, and the clips will refer to this frame rate.
80
+
81
+ Creating this instance the first time is time-consuming, as it needs to
82
+ decode all the videos in `video_paths`. It is recommended that you
83
+ cache the results after instantiation of the class.
84
+
85
+ Recreating the clips for different clip lengths is fast, and can be done
86
+ with the `compute_clips` method.
87
+
88
+ Args:
89
+ video_paths (List[str]): paths to the video files
90
+ clip_length_in_frames (int): size of a clip in number of frames
91
+ frames_between_clips (int): step (in frames) between each clip
92
+ frame_rate (float, optional): if specified, it will resample the video
93
+ so that it has `frame_rate`, and then the clips will be defined
94
+ on the resampled video
95
+ num_workers (int): how many subprocesses to use for data loading.
96
+ 0 means that the data will be loaded in the main process. (default: 0)
97
+ output_format (str): The format of the output video tensors. Can be either "THWC" (default) or "TCHW".
98
+ """
99
+
100
+ def __init__(
101
+ self,
102
+ video_paths: List[str],
103
+ clip_length_in_frames: int = 16,
104
+ frames_between_clips: int = 1,
105
+ frame_rate: Optional[float] = None,
106
+ _precomputed_metadata: Optional[Dict[str, Any]] = None,
107
+ num_workers: int = 0,
108
+ _video_width: int = 0,
109
+ _video_height: int = 0,
110
+ _video_min_dimension: int = 0,
111
+ _video_max_dimension: int = 0,
112
+ _audio_samples: int = 0,
113
+ _audio_channels: int = 0,
114
+ output_format: str = "THWC",
115
+ ) -> None:
116
+
117
+ self.video_paths = video_paths
118
+ self.num_workers = num_workers
119
+
120
+ # these options are not valid for pyav backend
121
+ self._video_width = _video_width
122
+ self._video_height = _video_height
123
+ self._video_min_dimension = _video_min_dimension
124
+ self._video_max_dimension = _video_max_dimension
125
+ self._audio_samples = _audio_samples
126
+ self._audio_channels = _audio_channels
127
+ self.output_format = output_format.upper()
128
+ if self.output_format not in ("THWC", "TCHW"):
129
+ raise ValueError(f"output_format should be either 'THWC' or 'TCHW', got {output_format}.")
130
+
131
+ if _precomputed_metadata is None:
132
+ self._compute_frame_pts()
133
+ else:
134
+ self._init_from_metadata(_precomputed_metadata)
135
+ self.compute_clips(clip_length_in_frames, frames_between_clips, frame_rate)
136
+
137
+ def _compute_frame_pts(self) -> None:
138
+ self.video_pts = [] # len = num_videos. Each entry is a tensor of shape (num_frames_in_video,)
139
+ self.video_fps: List[float] = [] # len = num_videos
140
+
141
+ # strategy: use a DataLoader to parallelize read_video_timestamps
142
+ # so need to create a dummy dataset first
143
+ import torch.utils.data
144
+
145
+ dl: torch.utils.data.DataLoader = torch.utils.data.DataLoader(
146
+ _VideoTimestampsDataset(self.video_paths), # type: ignore[arg-type]
147
+ batch_size=16,
148
+ num_workers=self.num_workers,
149
+ collate_fn=_collate_fn,
150
+ )
151
+
152
+ with tqdm(total=len(dl)) as pbar:
153
+ for batch in dl:
154
+ pbar.update(1)
155
+ batch_pts, batch_fps = list(zip(*batch))
156
+ # we need to specify dtype=torch.long because for empty list,
157
+ # torch.as_tensor will use torch.float as default dtype. This
158
+ # happens when decoding fails and no pts is returned in the list.
159
+ batch_pts = [torch.as_tensor(pts, dtype=torch.long) for pts in batch_pts]
160
+ self.video_pts.extend(batch_pts)
161
+ self.video_fps.extend(batch_fps)
162
+
163
+ def _init_from_metadata(self, metadata: Dict[str, Any]) -> None:
164
+ self.video_paths = metadata["video_paths"]
165
+ assert len(self.video_paths) == len(metadata["video_pts"])
166
+ self.video_pts = metadata["video_pts"]
167
+ assert len(self.video_paths) == len(metadata["video_fps"])
168
+ self.video_fps = metadata["video_fps"]
169
+
170
+ @property
171
+ def metadata(self) -> Dict[str, Any]:
172
+ _metadata = {
173
+ "video_paths": self.video_paths,
174
+ "video_pts": self.video_pts,
175
+ "video_fps": self.video_fps,
176
+ }
177
+ return _metadata
178
+
179
+ def subset(self, indices: List[int]) -> "VideoClips":
180
+ video_paths = [self.video_paths[i] for i in indices]
181
+ video_pts = [self.video_pts[i] for i in indices]
182
+ video_fps = [self.video_fps[i] for i in indices]
183
+ metadata = {
184
+ "video_paths": video_paths,
185
+ "video_pts": video_pts,
186
+ "video_fps": video_fps,
187
+ }
188
+ return type(self)(
189
+ video_paths,
190
+ clip_length_in_frames=self.num_frames,
191
+ frames_between_clips=self.step,
192
+ frame_rate=self.frame_rate,
193
+ _precomputed_metadata=metadata,
194
+ num_workers=self.num_workers,
195
+ _video_width=self._video_width,
196
+ _video_height=self._video_height,
197
+ _video_min_dimension=self._video_min_dimension,
198
+ _video_max_dimension=self._video_max_dimension,
199
+ _audio_samples=self._audio_samples,
200
+ _audio_channels=self._audio_channels,
201
+ output_format=self.output_format,
202
+ )
203
+
204
+ @staticmethod
205
+ def compute_clips_for_video(
206
+ video_pts: torch.Tensor, num_frames: int, step: int, fps: Optional[float], frame_rate: Optional[float] = None
207
+ ) -> Tuple[torch.Tensor, Union[List[slice], torch.Tensor]]:
208
+ if fps is None:
209
+ # if for some reason the video doesn't have fps (because doesn't have a video stream)
210
+ # set the fps to 1. The value doesn't matter, because video_pts is empty anyway
211
+ fps = 1
212
+ if frame_rate is None:
213
+ frame_rate = fps
214
+ total_frames = len(video_pts) * frame_rate / fps
215
+ _idxs = VideoClips._resample_video_idx(int(math.floor(total_frames)), fps, frame_rate)
216
+ video_pts = video_pts[_idxs]
217
+ clips = unfold(video_pts, num_frames, step)
218
+ if not clips.numel():
219
+ warnings.warn(
220
+ "There aren't enough frames in the current video to get a clip for the given clip length and "
221
+ "frames between clips. The video (and potentially others) will be skipped."
222
+ )
223
+ idxs: Union[List[slice], torch.Tensor]
224
+ if isinstance(_idxs, slice):
225
+ idxs = [_idxs] * len(clips)
226
+ else:
227
+ idxs = unfold(_idxs, num_frames, step)
228
+ return clips, idxs
229
+
230
+ def compute_clips(self, num_frames: int, step: int, frame_rate: Optional[float] = None) -> None:
231
+ """
232
+ Compute all consecutive sequences of clips from video_pts.
233
+ Always returns clips of size `num_frames`, meaning that the
234
+ last few frames in a video can potentially be dropped.
235
+
236
+ Args:
237
+ num_frames (int): number of frames for the clip
238
+ step (int): distance between two clips
239
+ frame_rate (int, optional): The frame rate
240
+ """
241
+ self.num_frames = num_frames
242
+ self.step = step
243
+ self.frame_rate = frame_rate
244
+ self.clips = []
245
+ self.resampling_idxs = []
246
+ for video_pts, fps in zip(self.video_pts, self.video_fps):
247
+ clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate)
248
+ self.clips.append(clips)
249
+ self.resampling_idxs.append(idxs)
250
+ clip_lengths = torch.as_tensor([len(v) for v in self.clips])
251
+ self.cumulative_sizes = clip_lengths.cumsum(0).tolist()
252
+
253
+ def __len__(self) -> int:
254
+ return self.num_clips()
255
+
256
+ def num_videos(self) -> int:
257
+ return len(self.video_paths)
258
+
259
+ def num_clips(self) -> int:
260
+ """
261
+ Number of subclips that are available in the video list.
262
+ """
263
+ return self.cumulative_sizes[-1]
264
+
265
+ def get_clip_location(self, idx: int) -> Tuple[int, int]:
266
+ """
267
+ Converts a flattened representation of the indices into a video_idx, clip_idx
268
+ representation.
269
+ """
270
+ video_idx = bisect.bisect_right(self.cumulative_sizes, idx)
271
+ if video_idx == 0:
272
+ clip_idx = idx
273
+ else:
274
+ clip_idx = idx - self.cumulative_sizes[video_idx - 1]
275
+ return video_idx, clip_idx
276
+
277
+ @staticmethod
278
+ def _resample_video_idx(num_frames: int, original_fps: float, new_fps: float) -> Union[slice, torch.Tensor]:
279
+ step = original_fps / new_fps
280
+ if step.is_integer():
281
+ # optimization: if step is integer, don't need to perform
282
+ # advanced indexing
283
+ step = int(step)
284
+ return slice(None, None, step)
285
+ idxs = torch.arange(num_frames, dtype=torch.float32) * step
286
+ idxs = idxs.floor().to(torch.int64)
287
+ return idxs
288
+
289
+ def get_clip(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any], int]:
290
+ """
291
+ Gets a subclip from a list of videos.
292
+
293
+ Args:
294
+ idx (int): index of the subclip. Must be between 0 and num_clips().
295
+
296
+ Returns:
297
+ video (Tensor)
298
+ audio (Tensor)
299
+ info (Dict)
300
+ video_idx (int): index of the video in `video_paths`
301
+ """
302
+ if idx >= self.num_clips():
303
+ raise IndexError(f"Index {idx} out of range ({self.num_clips()} number of clips)")
304
+ video_idx, clip_idx = self.get_clip_location(idx)
305
+ video_path = self.video_paths[video_idx]
306
+ clip_pts = self.clips[video_idx][clip_idx]
307
+
308
+ from torchvision import get_video_backend
309
+
310
+ backend = get_video_backend()
311
+
312
+ if backend == "pyav":
313
+ # check for invalid options
314
+ if self._video_width != 0:
315
+ raise ValueError("pyav backend doesn't support _video_width != 0")
316
+ if self._video_height != 0:
317
+ raise ValueError("pyav backend doesn't support _video_height != 0")
318
+ if self._video_min_dimension != 0:
319
+ raise ValueError("pyav backend doesn't support _video_min_dimension != 0")
320
+ if self._video_max_dimension != 0:
321
+ raise ValueError("pyav backend doesn't support _video_max_dimension != 0")
322
+ if self._audio_samples != 0:
323
+ raise ValueError("pyav backend doesn't support _audio_samples != 0")
324
+
325
+ if backend == "pyav":
326
+ start_pts = clip_pts[0].item()
327
+ end_pts = clip_pts[-1].item()
328
+ video, audio, info = read_video(video_path, start_pts, end_pts)
329
+ else:
330
+ _info = _probe_video_from_file(video_path)
331
+ video_fps = _info.video_fps
332
+ audio_fps = None
333
+
334
+ video_start_pts = cast(int, clip_pts[0].item())
335
+ video_end_pts = cast(int, clip_pts[-1].item())
336
+
337
+ audio_start_pts, audio_end_pts = 0, -1
338
+ audio_timebase = Fraction(0, 1)
339
+ video_timebase = Fraction(_info.video_timebase.numerator, _info.video_timebase.denominator)
340
+ if _info.has_audio:
341
+ audio_timebase = Fraction(_info.audio_timebase.numerator, _info.audio_timebase.denominator)
342
+ audio_start_pts = pts_convert(video_start_pts, video_timebase, audio_timebase, math.floor)
343
+ audio_end_pts = pts_convert(video_end_pts, video_timebase, audio_timebase, math.ceil)
344
+ audio_fps = _info.audio_sample_rate
345
+ video, audio, _ = _read_video_from_file(
346
+ video_path,
347
+ video_width=self._video_width,
348
+ video_height=self._video_height,
349
+ video_min_dimension=self._video_min_dimension,
350
+ video_max_dimension=self._video_max_dimension,
351
+ video_pts_range=(video_start_pts, video_end_pts),
352
+ video_timebase=video_timebase,
353
+ audio_samples=self._audio_samples,
354
+ audio_channels=self._audio_channels,
355
+ audio_pts_range=(audio_start_pts, audio_end_pts),
356
+ audio_timebase=audio_timebase,
357
+ )
358
+
359
+ info = {"video_fps": video_fps}
360
+ if audio_fps is not None:
361
+ info["audio_fps"] = audio_fps
362
+
363
+ if self.frame_rate is not None:
364
+ resampling_idx = self.resampling_idxs[video_idx][clip_idx]
365
+ if isinstance(resampling_idx, torch.Tensor):
366
+ resampling_idx = resampling_idx - resampling_idx[0]
367
+ video = video[resampling_idx]
368
+ info["video_fps"] = self.frame_rate
369
+ assert len(video) == self.num_frames, f"{video.shape} x {self.num_frames}"
370
+
371
+ if self.output_format == "TCHW":
372
+ # [T,H,W,C] --> [T,C,H,W]
373
+ video = video.permute(0, 3, 1, 2)
374
+
375
+ return video, audio, info, video_idx
376
+
377
+ def __getstate__(self) -> Dict[str, Any]:
378
+ video_pts_sizes = [len(v) for v in self.video_pts]
379
+ # To be back-compatible, we convert data to dtype torch.long as needed
380
+ # because for empty list, in legacy implementation, torch.as_tensor will
381
+ # use torch.float as default dtype. This happens when decoding fails and
382
+ # no pts is returned in the list.
383
+ video_pts = [x.to(torch.int64) for x in self.video_pts]
384
+ # video_pts can be an empty list if no frames have been decoded
385
+ if video_pts:
386
+ video_pts = torch.cat(video_pts) # type: ignore[assignment]
387
+ # avoid bug in https://github.com/pytorch/pytorch/issues/32351
388
+ # TODO: Revert it once the bug is fixed.
389
+ video_pts = video_pts.numpy() # type: ignore[attr-defined]
390
+
391
+ # make a copy of the fields of self
392
+ d = self.__dict__.copy()
393
+ d["video_pts_sizes"] = video_pts_sizes
394
+ d["video_pts"] = video_pts
395
+ # delete the following attributes to reduce the size of dictionary. They
396
+ # will be re-computed in "__setstate__()"
397
+ del d["clips"]
398
+ del d["resampling_idxs"]
399
+ del d["cumulative_sizes"]
400
+
401
+ # for backwards-compatibility
402
+ d["_version"] = 2
403
+ return d
404
+
405
+ def __setstate__(self, d: Dict[str, Any]) -> None:
406
+ # for backwards-compatibility
407
+ if "_version" not in d:
408
+ self.__dict__ = d
409
+ return
410
+
411
+ video_pts = torch.as_tensor(d["video_pts"], dtype=torch.int64)
412
+ video_pts = torch.split(video_pts, d["video_pts_sizes"], dim=0)
413
+ # don't need this info anymore
414
+ del d["video_pts_sizes"]
415
+
416
+ d["video_pts"] = video_pts
417
+ self.__dict__ = d
418
+ # recompute attributes "clips", "resampling_idxs" and other derivative ones
419
+ self.compute_clips(self.num_frames, self.step, self.frame_rate)
vllm/lib/python3.10/site-packages/torchvision/datasets/voc.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5
+ from xml.etree.ElementTree import Element as ET_Element
6
+
7
+ try:
8
+ from defusedxml.ElementTree import parse as ET_parse
9
+ except ImportError:
10
+ from xml.etree.ElementTree import parse as ET_parse
11
+
12
+ from PIL import Image
13
+
14
+ from .utils import download_and_extract_archive, verify_str_arg
15
+ from .vision import VisionDataset
16
+
17
+ DATASET_YEAR_DICT = {
18
+ "2012": {
19
+ "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar",
20
+ "filename": "VOCtrainval_11-May-2012.tar",
21
+ "md5": "6cd6e144f989b92b3379bac3b3de84fd",
22
+ "base_dir": os.path.join("VOCdevkit", "VOC2012"),
23
+ },
24
+ "2011": {
25
+ "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar",
26
+ "filename": "VOCtrainval_25-May-2011.tar",
27
+ "md5": "6c3384ef61512963050cb5d687e5bf1e",
28
+ "base_dir": os.path.join("TrainVal", "VOCdevkit", "VOC2011"),
29
+ },
30
+ "2010": {
31
+ "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar",
32
+ "filename": "VOCtrainval_03-May-2010.tar",
33
+ "md5": "da459979d0c395079b5c75ee67908abb",
34
+ "base_dir": os.path.join("VOCdevkit", "VOC2010"),
35
+ },
36
+ "2009": {
37
+ "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar",
38
+ "filename": "VOCtrainval_11-May-2009.tar",
39
+ "md5": "a3e00b113cfcfebf17e343f59da3caa1",
40
+ "base_dir": os.path.join("VOCdevkit", "VOC2009"),
41
+ },
42
+ "2008": {
43
+ "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar",
44
+ "filename": "VOCtrainval_11-May-2012.tar",
45
+ "md5": "2629fa636546599198acfcfbfcf1904a",
46
+ "base_dir": os.path.join("VOCdevkit", "VOC2008"),
47
+ },
48
+ "2007": {
49
+ "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar",
50
+ "filename": "VOCtrainval_06-Nov-2007.tar",
51
+ "md5": "c52e279531787c972589f7e41ab4ae64",
52
+ "base_dir": os.path.join("VOCdevkit", "VOC2007"),
53
+ },
54
+ "2007-test": {
55
+ "url": "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar",
56
+ "filename": "VOCtest_06-Nov-2007.tar",
57
+ "md5": "b6e924de25625d8de591ea690078ad9f",
58
+ "base_dir": os.path.join("VOCdevkit", "VOC2007"),
59
+ },
60
+ }
61
+
62
+
63
+ class _VOCBase(VisionDataset):
64
+ _SPLITS_DIR: str
65
+ _TARGET_DIR: str
66
+ _TARGET_FILE_EXT: str
67
+
68
+ def __init__(
69
+ self,
70
+ root: Union[str, Path],
71
+ year: str = "2012",
72
+ image_set: str = "train",
73
+ download: bool = False,
74
+ transform: Optional[Callable] = None,
75
+ target_transform: Optional[Callable] = None,
76
+ transforms: Optional[Callable] = None,
77
+ ):
78
+ super().__init__(root, transforms, transform, target_transform)
79
+
80
+ self.year = verify_str_arg(year, "year", valid_values=[str(yr) for yr in range(2007, 2013)])
81
+
82
+ valid_image_sets = ["train", "trainval", "val"]
83
+ if year == "2007":
84
+ valid_image_sets.append("test")
85
+ self.image_set = verify_str_arg(image_set, "image_set", valid_image_sets)
86
+
87
+ key = "2007-test" if year == "2007" and image_set == "test" else year
88
+ dataset_year_dict = DATASET_YEAR_DICT[key]
89
+
90
+ self.url = dataset_year_dict["url"]
91
+ self.filename = dataset_year_dict["filename"]
92
+ self.md5 = dataset_year_dict["md5"]
93
+
94
+ base_dir = dataset_year_dict["base_dir"]
95
+ voc_root = os.path.join(self.root, base_dir)
96
+
97
+ if download:
98
+ download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.md5)
99
+
100
+ if not os.path.isdir(voc_root):
101
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
102
+
103
+ splits_dir = os.path.join(voc_root, "ImageSets", self._SPLITS_DIR)
104
+ split_f = os.path.join(splits_dir, image_set.rstrip("\n") + ".txt")
105
+ with open(os.path.join(split_f)) as f:
106
+ file_names = [x.strip() for x in f.readlines()]
107
+
108
+ image_dir = os.path.join(voc_root, "JPEGImages")
109
+ self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
110
+
111
+ target_dir = os.path.join(voc_root, self._TARGET_DIR)
112
+ self.targets = [os.path.join(target_dir, x + self._TARGET_FILE_EXT) for x in file_names]
113
+
114
+ assert len(self.images) == len(self.targets)
115
+
116
+ def __len__(self) -> int:
117
+ return len(self.images)
118
+
119
+
120
+ class VOCSegmentation(_VOCBase):
121
+ """`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset.
122
+
123
+ Args:
124
+ root (str or ``pathlib.Path``): Root directory of the VOC Dataset.
125
+ year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``.
126
+ image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If
127
+ ``year=="2007"``, can also be ``"test"``.
128
+ download (bool, optional): If true, downloads the dataset from the internet and
129
+ puts it in root directory. If dataset is already downloaded, it is not
130
+ downloaded again.
131
+ transform (callable, optional): A function/transform that takes in a PIL image
132
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
133
+ target_transform (callable, optional): A function/transform that takes in the
134
+ target and transforms it.
135
+ transforms (callable, optional): A function/transform that takes input sample and its target as entry
136
+ and returns a transformed version.
137
+ """
138
+
139
+ _SPLITS_DIR = "Segmentation"
140
+ _TARGET_DIR = "SegmentationClass"
141
+ _TARGET_FILE_EXT = ".png"
142
+
143
+ @property
144
+ def masks(self) -> List[str]:
145
+ return self.targets
146
+
147
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
148
+ """
149
+ Args:
150
+ index (int): Index
151
+
152
+ Returns:
153
+ tuple: (image, target) where target is the image segmentation.
154
+ """
155
+ img = Image.open(self.images[index]).convert("RGB")
156
+ target = Image.open(self.masks[index])
157
+
158
+ if self.transforms is not None:
159
+ img, target = self.transforms(img, target)
160
+
161
+ return img, target
162
+
163
+
164
+ class VOCDetection(_VOCBase):
165
+ """`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset.
166
+
167
+ Args:
168
+ root (str or ``pathlib.Path``): Root directory of the VOC Dataset.
169
+ year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``.
170
+ image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If
171
+ ``year=="2007"``, can also be ``"test"``.
172
+ download (bool, optional): If true, downloads the dataset from the internet and
173
+ puts it in root directory. If dataset is already downloaded, it is not
174
+ downloaded again.
175
+ (default: alphabetic indexing of VOC's 20 classes).
176
+ transform (callable, optional): A function/transform that takes in a PIL image
177
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
178
+ target_transform (callable, required): A function/transform that takes in the
179
+ target and transforms it.
180
+ transforms (callable, optional): A function/transform that takes input sample and its target as entry
181
+ and returns a transformed version.
182
+ """
183
+
184
+ _SPLITS_DIR = "Main"
185
+ _TARGET_DIR = "Annotations"
186
+ _TARGET_FILE_EXT = ".xml"
187
+
188
+ @property
189
+ def annotations(self) -> List[str]:
190
+ return self.targets
191
+
192
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
193
+ """
194
+ Args:
195
+ index (int): Index
196
+
197
+ Returns:
198
+ tuple: (image, target) where target is a dictionary of the XML tree.
199
+ """
200
+ img = Image.open(self.images[index]).convert("RGB")
201
+ target = self.parse_voc_xml(ET_parse(self.annotations[index]).getroot())
202
+
203
+ if self.transforms is not None:
204
+ img, target = self.transforms(img, target)
205
+
206
+ return img, target
207
+
208
+ @staticmethod
209
+ def parse_voc_xml(node: ET_Element) -> Dict[str, Any]:
210
+ voc_dict: Dict[str, Any] = {}
211
+ children = list(node)
212
+ if children:
213
+ def_dic: Dict[str, Any] = collections.defaultdict(list)
214
+ for dc in map(VOCDetection.parse_voc_xml, children):
215
+ for ind, v in dc.items():
216
+ def_dic[ind].append(v)
217
+ if node.tag == "annotation":
218
+ def_dic["object"] = [def_dic["object"]]
219
+ voc_dict = {node.tag: {ind: v[0] if len(v) == 1 else v for ind, v in def_dic.items()}}
220
+ if node.text:
221
+ text = node.text.strip()
222
+ if not children:
223
+ voc_dict[node.tag] = text
224
+ return voc_dict