code stringlengths 1.14k 31.2k | apis list | extract_api stringlengths 187 38.3k |
|---|---|---|
import pathlib
from typing import Any, Dict, List, Tuple
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter
from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling
from torchvision.prototype.features import EncodedImage, Label
class Country211(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"country211",
homepage="https://github.com/openai/CLIP/blob/main/data/country211.md",
valid_options=dict(split=("train", "val", "test")),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
_SPLIT_NAME_MAPPER = {
"train": "train",
"val": "valid",
"test": "test",
}
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self.categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._SPLIT_NAME_MAPPER[config.split]))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self, root: pathlib.Path) -> List[str]:
resources = self.resources(self.default_config)
dp = resources[0].load(root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
| [
"torchdata.datapipes.iter.Mapper"
] | [((1862, 1880), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (1876, 1880), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling\n'), ((1894, 1911), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (1907, 1911), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling\n'), ((1927, 1959), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (1933, 1959), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter\n'), ((792, 946), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://openaipublic.azureedge.net/clip/data/country211.tgz"""'], {'sha256': '"""c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c"""'}), "('https://openaipublic.azureedge.net/clip/data/country211.tgz',\n sha256='c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c')\n", (804, 946), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource\n'), ((1771, 1847), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""parent.parent.name"""', 'self._SPLIT_NAME_MAPPER[config.split]'], {}), "('parent.parent.name', self._SPLIT_NAME_MAPPER[config.split])\n", (1786, 1847), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling\n'), ((1228, 1246), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1240, 1246), False, 'import pathlib\n'), ((1298, 1355), 'torchvision.prototype.features.Label.from_category', 'Label.from_category', (['category'], {'categories': 'self.categories'}), '(category, categories=self.categories)\n', (1317, 1355), False, 'from torchvision.prototype.features import EncodedImage, Label\n'), ((1398, 1428), 'torchvision.prototype.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (1420, 1428), False, 'from torchvision.prototype.features import EncodedImage, Label\n'), ((1531, 1552), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1543, 1552), False, 'import pathlib\n'), ((2146, 2164), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2158, 2164), False, 'import pathlib\n')] |
import enum
import pdb
import functools
import pathlib
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, cast, Union
from xml.etree import ElementTree
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Filter,
Demultiplexer,
IterKeyZipper,
LineReader,
)
from torchvision.datasets import VOCDetection
from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset
from Dataset4EO.datasets.utils._internal import (
path_accessor,
getitem,
INFINITE_BUFFER_SIZE,
path_comparator,
hint_sharding,
hint_shuffling,
read_categories_file,
)
from Dataset4EO.features import BoundingBox, Label, EncodedImage
from .._api import register_dataset, register_info
NAME = "voc"
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor')
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class VOC(Dataset):
"""
- **homepage**: http://host.robots.ox.ac.uk/pascal/VOC/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
year: str = "2012",
task: str = "segmentation",
skip_integrity_check: bool = False,
) -> None:
self._year = self._verify_str_arg(year, "year", ("2007", "2008", "2009", "2010", "2011", "2012"))
if split == "test" and year != "2007":
raise ValueError("`split='test'` is only available for `year='2007'`")
else:
self._split = self._verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._task = self._verify_str_arg(task, "task", ("detection", "segmentation"))
self._anns_folder = "Annotations" if task == "detection" else "SegmentationClass"
self._split_folder = "Main" if task == "detection" else "Segmentation"
self._categories = _info()["categories"]
self.CLASSES = CLASSES
self.PALETTE = PALETTE
super().__init__(root, skip_integrity_check=skip_integrity_check)
_TRAIN_VAL_ARCHIVES = {
"2007": ("VOCtrainval_06-Nov-2007.tar", "7d8cd951101b0957ddfd7a530bdc8a94f06121cfc1e511bb5937e973020c7508"),
"2008": ("VOCtrainval_14-Jul-2008.tar", "7f0ca53c1b5a838fbe946965fc106c6e86832183240af5c88e3f6c306318d42e"),
"2009": ("VOCtrainval_11-May-2009.tar", "11cbe1741fb5bdadbbca3c08e9ec62cd95c14884845527d50847bc2cf57e7fd6"),
"2010": ("VOCtrainval_03-May-2010.tar", "1af4189cbe44323ab212bff7afbc7d0f55a267cc191eb3aac911037887e5c7d4"),
"2011": ("VOCtrainval_25-May-2011.tar", "0a7f5f5d154f7290ec65ec3f78b72ef72c6d93ff6d79acd40dc222a9ee5248ba"),
"2012": ("VOCtrainval_11-May-2012.tar", "e14f763270cf193d0b5f74b169f44157a4b0c6efa708f4dd0ff78ee691763bcb"),
}
_TEST_ARCHIVES = {
"2007": ("VOCtest_06-Nov-2007.tar", "6836888e2e01dca84577a849d339fa4f73e1e4f135d312430c4856b5609b4892")
}
def _resources(self) -> List[OnlineResource]:
file_name, sha256 = (self._TEST_ARCHIVES if self._split == "test" else self._TRAIN_VAL_ARCHIVES)[self._year]
archive = HttpResource(f"http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}", sha256=sha256)
return [archive]
def _is_in_folder(self, data: Tuple[str, Any], *, name: str, depth: int = 1) -> bool:
path = pathlib.Path(data[0])
return name in path.parent.parts[-depth:]
class _Demux(enum.IntEnum):
SPLIT = 0
IMAGES = 1
ANNS = 2
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
if self._is_in_folder(data, name="ImageSets", depth=2):
return self._Demux.SPLIT
elif self._is_in_folder(data, name="JPEGImages"):
return self._Demux.IMAGES
elif self._is_in_folder(data, name=self._anns_folder):
return self._Demux.ANNS
else:
return None
def _parse_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return cast(Dict[str, Any], VOCDetection.parse_voc_xml(ElementTree.parse(buffer).getroot())["annotation"])
def _prepare_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
anns = self._parse_detection_ann(buffer)
instances = anns["object"]
return dict(
bounding_boxes=BoundingBox(
[
[int(instance["bndbox"][part]) for part in ("xmin", "ymin", "xmax", "ymax")]
for instance in instances
],
format="xyxy",
image_size=cast(Tuple[int, int], tuple(int(anns["size"][dim]) for dim in ("height", "width"))),
),
labels=Label(
[self._categories.index(instance["name"]) for instance in instances], categories=self._categories
),
)
def _prepare_segmentation_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return dict(segmentation=EncodedImage.from_file(buffer))
def _prepare_sample(
self,
data: Tuple[Tuple[Tuple[str, str], Tuple[str, BinaryIO]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, image_data = split_and_image_data
image_path, image_buffer = image_data
ann_path, ann_buffer = ann_data
image_path = pathlib.PosixPath(image_path).name
ann_path = pathlib.PosixPath(ann_path).name
#{'img_info': {'filename': '2009_000801.jpg', 'ann': {'seg_map': '2009_000801.png'}}, 'ann_info': {'seg_map': '2009_000801.png'}}
img_info = dict({'filename':image_path, 'ann':dict({'seg_map':ann_path})})
return img_info
return dict(
(self._prepare_detection_ann if self._task == "detection" else self._prepare_segmentation_ann)(ann_buffer),
image_path=image_path,
image=EncodedImage.from_file(image_buffer),
ann_path=ann_path,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
split_dp, images_dp, anns_dp = Demultiplexer(
archive_dp,
3,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
split_dp = Filter(split_dp, functools.partial(self._is_in_folder, name=self._split_folder))
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True)
split_dp = hint_shuffling(split_dp)
split_dp = hint_sharding(split_dp)
dp = split_dp
for level, data_dp in enumerate((images_dp, anns_dp)):
dp = IterKeyZipper(
dp,
data_dp,
key_fn=getitem(*[0] * level, 1),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
("train", "2007", "detection"): 2_501,
("train", "2007", "segmentation"): 209,
("train", "2008", "detection"): 2_111,
("train", "2008", "segmentation"): 511,
("train", "2009", "detection"): 3_473,
("train", "2009", "segmentation"): 749,
("train", "2010", "detection"): 4_998,
("train", "2010", "segmentation"): 964,
("train", "2011", "detection"): 5_717,
("train", "2011", "segmentation"): 1_112,
("train", "2012", "detection"): 5_717,
("train", "2012", "segmentation"): 1_464,
("val", "2007", "detection"): 2_510,
("val", "2007", "segmentation"): 213,
("val", "2008", "detection"): 2_221,
("val", "2008", "segmentation"): 512,
("val", "2009", "detection"): 3_581,
("val", "2009", "segmentation"): 750,
("val", "2010", "detection"): 5_105,
("val", "2010", "segmentation"): 964,
("val", "2011", "detection"): 5_823,
("val", "2011", "segmentation"): 1_111,
("val", "2012", "detection"): 5_823,
("val", "2012", "segmentation"): 1_449,
("trainval", "2007", "detection"): 5_011,
("trainval", "2007", "segmentation"): 422,
("trainval", "2008", "detection"): 4_332,
("trainval", "2008", "segmentation"): 1_023,
("trainval", "2009", "detection"): 7_054,
("trainval", "2009", "segmentation"): 1_499,
("trainval", "2010", "detection"): 10_103,
("trainval", "2010", "segmentation"): 1_928,
("trainval", "2011", "detection"): 11_540,
("trainval", "2011", "segmentation"): 2_223,
("trainval", "2012", "detection"): 11_540,
("trainval", "2012", "segmentation"): 2_913,
("test", "2007", "detection"): 4_952,
("test", "2007", "segmentation"): 210,
}[(self._split, self._year, self._task)]
def _filter_anns(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == self._Demux.ANNS
def _generate_categories(self) -> List[str]:
self._task = "detection"
resources = self._resources()
archive_dp = resources[0].load(self._root)
dp = Filter(archive_dp, self._filter_anns)
dp = Mapper(dp, self._parse_detection_ann, input_col=1)
categories = sorted({instance["name"] for _, anns in dp for instance in anns["object"]})
# We add a background category to be used during segmentation
categories.insert(0, "__background__")
return categories
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.LineReader",
"torchdata.datapipes.iter.Demultiplexer",
"torchdata.datapipes.iter.Filter"
] | [((3711, 3818), 'Dataset4EO.datasets.utils.HttpResource', 'HttpResource', (['f"""http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}"""'], {'sha256': 'sha256'}), "(\n f'http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}',\n sha256=sha256)\n", (3723, 3818), False, 'from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset\n'), ((3941, 3962), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (3953, 3962), False, 'import pathlib\n'), ((6688, 6794), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['archive_dp', '(3)', 'self._classify_archive'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(archive_dp, 3, self._classify_archive, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (6701, 6794), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((7065, 7098), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['split_dp'], {'decode': '(True)'}), '(split_dp, decode=True)\n', (7075, 7098), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((7118, 7142), 'Dataset4EO.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['split_dp'], {}), '(split_dp)\n', (7132, 7142), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7162, 7185), 'Dataset4EO.datasets.utils._internal.hint_sharding', 'hint_sharding', (['split_dp'], {}), '(split_dp)\n', (7175, 7185), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7527, 7559), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (7533, 7559), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((9955, 9992), 'torchdata.datapipes.iter.Filter', 'Filter', (['archive_dp', 'self._filter_anns'], {}), '(archive_dp, self._filter_anns)\n', (9961, 9992), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((10006, 10056), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._parse_detection_ann'], {'input_col': '(1)'}), '(dp, self._parse_detection_ann, input_col=1)\n', (10012, 10056), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((1465, 1491), 'Dataset4EO.datasets.utils._internal.read_categories_file', 'read_categories_file', (['NAME'], {}), '(NAME)\n', (1485, 1491), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5914, 5943), 'pathlib.PosixPath', 'pathlib.PosixPath', (['image_path'], {}), '(image_path)\n', (5931, 5943), False, 'import pathlib\n'), ((5968, 5995), 'pathlib.PosixPath', 'pathlib.PosixPath', (['ann_path'], {}), '(ann_path)\n', (5985, 5995), False, 'import pathlib\n'), ((6899, 6961), 'functools.partial', 'functools.partial', (['self._is_in_folder'], {'name': 'self._split_folder'}), '(self._is_in_folder, name=self._split_folder)\n', (6916, 6961), False, 'import functools\n'), ((6999, 7044), 'Dataset4EO.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', 'f"""{self._split}.txt"""'], {}), "('name', f'{self._split}.txt')\n", (7014, 7044), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5530, 5560), 'Dataset4EO.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (5552, 5560), False, 'from Dataset4EO.features import BoundingBox, Label, EncodedImage\n'), ((6441, 6477), 'Dataset4EO.features.EncodedImage.from_file', 'EncodedImage.from_file', (['image_buffer'], {}), '(image_buffer)\n', (6463, 6477), False, 'from Dataset4EO.features import BoundingBox, Label, EncodedImage\n'), ((7372, 7398), 'Dataset4EO.datasets.utils._internal.getitem', 'getitem', (['*([0] * level)', '(1)'], {}), '(*([0] * level), 1)\n', (7379, 7398), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7425, 7446), 'Dataset4EO.datasets.utils._internal.path_accessor', 'path_accessor', (['"""stem"""'], {}), "('stem')\n", (7438, 7446), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((4644, 4669), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['buffer'], {}), '(buffer)\n', (4661, 4669), False, 'from xml.etree import ElementTree\n')] |
from torchtext._internal.module_utils import is_module_available
from typing import Union, Tuple
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_add_docstring_header,
_create_dataset_directory,
)
import os
URL = {
'train': "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv",
'test': "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv",
}
MD5 = {
'train': "b1a00f826fdfbd249f79597b59e1dc12",
'test': "d52ea96a97a2d943681189a97654912d",
}
NUM_LINES = {
'train': 120000,
'test': 7600,
}
DATASET_NAME = "AG_NEWS"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=4)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AG_NEWS(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL[split]])
cache_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, split + ".csv"),
hash_dict={os.path.join(root, split + ".csv"): MD5[split]},
hash_type="md5"
)
cache_dp = HttpReader(cache_dp)
cache_dp = cache_dp.end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="r")
return cache_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| [
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.HttpReader"
] | [((101, 133), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (120, 133), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((775, 832), 'torchtext.data.datasets_utils._add_docstring_header', '_add_docstring_header', ([], {'num_lines': 'NUM_LINES', 'num_classes': '(4)'}), '(num_lines=NUM_LINES, num_classes=4)\n', (796, 832), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((834, 886), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (859, 886), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((888, 927), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'test')"], {}), "(('train', 'test'))\n", (908, 927), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((1185, 1214), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL[split]]'], {}), '([URL[split]])\n', (1200, 1214), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1431, 1451), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (1441, 1451), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1537, 1567), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'mode': '"""r"""'}), "(cache_dp, mode='r')\n", (1547, 1567), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((994, 1026), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1013, 1026), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1282, 1316), 'os.path.join', 'os.path.join', (['root', "(split + '.csv')"], {}), "(root, split + '.csv')\n", (1294, 1316), False, 'import os\n'), ((1337, 1371), 'os.path.join', 'os.path.join', (['root', "(split + '.csv')"], {}), "(root, split + '.csv')\n", (1349, 1371), False, 'import os\n')] |
import time
import torch
import torchdata
import torchfunc
from .datasets import ExampleDataset, ExampleIterable
from .utils import artificial_slowdown, enumerate_step, index_is_sample
def test_basic_iterable():
dataset = ExampleIterable(0, 100).map(lambda value: value + 12)
for index, item in enumerate(dataset):
assert index + 12 == item
def test_iterable_filter():
dataset = (
ExampleIterable(0, 100)
.map(lambda value: value + 12)
.filter(lambda elem: elem % 2 == 0)
)
for index, item in enumerate_step(dataset, start=12, step=2):
assert index == item
def test_basic_dataset():
dataset = ExampleDataset(0, 25).map(lambda sample: sample * sample).cache()
for index, value in enumerate(dataset):
assert index ** 2 == value
def test_dataset_multiple_cache():
# Range-like Dataset mapped to item ** 3
dataset = (
ExampleDataset(0, 25)
.cache()
.map(lambda sample: (sample + sample, sample))
.cache()
.map(lambda sample: sample[0] - sample[-1])
.cache()
.map(lambda sample: sample ** 3)
.cache()
)
# Iterate through dataset
for _ in dataset:
pass
for index, value in enumerate(dataset):
assert index ** 3 == value
def test_dataset_cache_speedup():
dataset = ExampleDataset(0, 5).map(artificial_slowdown).cache()
with torchfunc.Timer() as timer:
index_is_sample(dataset)
assert timer.checkpoint() > 5
index_is_sample(dataset)
assert timer.checkpoint() < 0.2
def test_dataset_complicated_cache():
dataset = (
(
(
ExampleDataset(0, 25)
| ExampleDataset(0, 25).map(lambda value: value * -1)
)
.cache()
.map(lambda sample: sample[0] + sample[1] + sample[0])
.cache()
.map(lambda sample: sample + sample)
| ExampleDataset(0, 25)
)
.cache()
.map(lambda values: ((values, values), values))
.map(torchdata.maps.Flatten())
.cache()
.map(lambda values: values[1])
.map(lambda value: value ** 2)
)
for index, value in enumerate(dataset):
assert index ** 2 == value
def test_apply():
def summation(generator):
return sum(value for value in generator)
assert ExampleDataset(0, 101).apply(summation) == 5050 # Returns 5050
def test_reduce():
assert ExampleDataset(0, 10).reduce(lambda x, y: x + y) == 45
def test_reduce_initializer():
assert ExampleDataset(0, 10).reduce(lambda x, y: x + y, 10) == 55
def test_repr():
assert (
repr(ExampleDataset(0, 5))
== "tests.datasets.ExampleDataset(values=[0, 1, 2, 3, 4])"
)
def test_dataset_dataloader():
# Range-like Dataset mapped to item ** 3
dataset = (
ExampleDataset(0, 25)
.cache()
.map(lambda sample: (sample + sample, sample))
.cache()
.map(lambda sample: sample[0] - sample[-1])
.cache()
.map(lambda sample: sample ** 3)
.cache()
)
# Iterate through dataset
for element in torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=3):
print(element)
| [
"torchdata.maps.Flatten"
] | [((3190, 3254), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(True)', 'batch_size': '(3)'}), '(dataset, shuffle=True, batch_size=3)\n', (3217, 3254), False, 'import torch\n'), ((1419, 1436), 'torchfunc.Timer', 'torchfunc.Timer', ([], {}), '()\n', (1434, 1436), False, 'import torchfunc\n'), ((2083, 2107), 'torchdata.maps.Flatten', 'torchdata.maps.Flatten', ([], {}), '()\n', (2105, 2107), False, 'import torchdata\n')] |
from typing import (
Iterator,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
)
import io
import torch
import torch.utils.data.datapipes as dp
from torchdata.datapipes.iter import S3FileLister, S3FileLoader
from torchdata.datapipes.utils import StreamWrapper
from torchrec.datasets.utils import (
LoadFiles,
ReadLinesFromCSV)
from torch.utils.data import IterDataPipe
from torchrec.datasets.criteo import _default_row_mapper
s3_prefixes = ['s3://criteo-dataset/day_0']
dp_s3_urls = S3FileLister(s3_prefixes)
dp_s3_files = S3FileLoader(dp_s3_urls) # outputs in (url, BytesIO)
# more datapipes to convert loaded bytes, e.g.
class LoadWithTextIOWrapper(IterDataPipe):
def __init__(self, paths, **open_kw):
self.paths = paths
self.open_kw: Any = open_kw # pyre-ignore[4]
def __iter__(self) -> Iterator[Any]:
for url, buffer in self.paths:
yield url, io.TextIOWrapper(buffer, encoding='utf-8')
class S3CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example:
>>> datapipe = CriteoIterDataPipe(
>>> ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
>>> )
>>> datapipe = dp.iter.Batcher(datapipe, 100)
>>> datapipe = dp.iter.Collator(datapipe)
>>> batch = next(iter(datapipe))
"""
def __init__(
self,
paths: S3FileLoader,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
# datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = LoadWithTextIOWrapper(paths)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
#print(dp_s3_files)
#datapipe = StreamWrapper(dp_s3_files).parse_csv_files(delimiter=' ')
#for d in datapipe: # Start loading data
datapipe = S3CriteoIterDataPipe(dp_s3_files)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
print(batch.keys())
| [
"torchdata.datapipes.iter.S3FileLoader",
"torchdata.datapipes.iter.S3FileLister"
] | [((520, 545), 'torchdata.datapipes.iter.S3FileLister', 'S3FileLister', (['s3_prefixes'], {}), '(s3_prefixes)\n', (532, 545), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((560, 584), 'torchdata.datapipes.iter.S3FileLoader', 'S3FileLoader', (['dp_s3_urls'], {}), '(dp_s3_urls)\n', (572, 584), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((3173, 3203), 'torch.utils.data.datapipes.iter.Batcher', 'dp.iter.Batcher', (['datapipe', '(100)'], {}), '(datapipe, 100)\n', (3188, 3203), True, 'import torch.utils.data.datapipes as dp\n'), ((3215, 3241), 'torch.utils.data.datapipes.iter.Collator', 'dp.iter.Collator', (['datapipe'], {}), '(datapipe)\n', (3231, 3241), True, 'import torch.utils.data.datapipes as dp\n'), ((2416, 2450), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (2448, 2450), False, 'import torch\n'), ((2821, 2863), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['datapipe'], {'delimiter': '"""\t"""'}), "(datapipe, delimiter='\\t')\n", (2837, 2863), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV\n'), ((2915, 2956), 'torch.utils.data.datapipes.iter.Mapper', 'dp.iter.Mapper', (['datapipe', 'self.row_mapper'], {}), '(datapipe, self.row_mapper)\n', (2929, 2956), True, 'import torch.utils.data.datapipes as dp\n'), ((932, 974), 'io.TextIOWrapper', 'io.TextIOWrapper', (['buffer'], {'encoding': '"""utf-8"""'}), "(buffer, encoding='utf-8')\n", (948, 974), False, 'import io\n')] |
import os
from functools import partial
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, IterableWrapper
from torchtext._download_hooks import HttpReader
URL = {
"train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json",
"dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json",
}
MD5 = {
"train": "981b29407e0affa3b1b156f72073b945",
"dev": "3e85deb501d4e538b6bc56f786231552",
}
NUM_LINES = {
"train": 87599,
"dev": 10570,
}
DATASET_NAME = "SQuAD1"
def _filepath_fn(root, split, _=None):
return os.path.join(root, os.path.basename(URL[split]))
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD1(root: str, split: Union[Tuple[str], str]):
"""SQuAD1 Dataset
For additional details refer to https://rajpurkar.github.io/SQuAD-explorer/
Number of lines per split:
- train: 87599
- dev: 10570
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev`)
:returns: DataPipe that yields data points from SQuaAD1 dataset which consist of context, question, list of answers and corresponding index in context
:rtype: (str, str, list(str), list(int))
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root, split),
hash_dict={_filepath_fn(root, split): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
return cache_dp.parse_json_files().read_squad().shuffle().set_shuffle(False).sharding_filter()
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.FileOpener"
] | [((245, 277), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (264, 277), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((866, 918), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (891, 918), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((920, 958), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev')"], {}), "(('train', 'dev'))\n", (940, 958), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((1850, 1879), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL[split]]'], {}), '([URL[split]])\n', (1865, 1879), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((2203, 2241), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'encoding': '"""utf-8"""'}), "(cache_dp, encoding='utf-8')\n", (2213, 2241), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((833, 861), 'os.path.basename', 'os.path.basename', (['URL[split]'], {}), '(URL[split])\n', (849, 861), False, 'import os\n'), ((1637, 1669), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1656, 1669), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1980, 2014), 'functools.partial', 'partial', (['_filepath_fn', 'root', 'split'], {}), '(_filepath_fn, root, split)\n', (1987, 2014), False, 'from functools import partial\n'), ((2121, 2141), 'torchtext._download_hooks.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (2131, 2141), False, 'from torchtext._download_hooks import HttpReader\n')] |
import progressbar
import torch
from tele.meter import SumMeter
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchdata.mpii import MpiiData
from dsnt.data import MPIIDataset
from dsnt.util import timer, type_as_index, reverse_tensor
def generate_predictions(model, dataset, use_flipped=True, batch_size=1, time_meter=None):
"""Generate predictions with the model"""
if use_flipped:
assert batch_size == 1, 'test-time flip augmentation only work with batch_size=1'
sum_meter = SumMeter()
model.cuda()
model.eval()
loader = DataLoader(dataset, batch_size, num_workers=4, pin_memory=True)
preds = torch.DoubleTensor(len(dataset), 16, 2).zero_()
completed = 0
with progressbar.ProgressBar(max_value=len(dataset)) as bar:
for i, batch in enumerate(loader):
batch_size = batch['input'].size(0)
sum_meter.reset()
with timer(sum_meter):
if use_flipped:
sample = batch['input']
rev_sample = reverse_tensor(batch['input'], -1)
in_var = Variable(torch.cat([sample, rev_sample], 0).cuda(), volatile=True)
hm_var = model.forward_part1(in_var)
if isinstance(hm_var, list):
# Just use the last heatmap from stacked hourglass
hm_var = hm_var[-1]
hm1, hm2 = hm_var.split(1)
hm2 = reverse_tensor(hm2, -1)
hm2 = hm2.index_select(-3, type_as_index(MPIIDataset.HFLIP_INDICES, hm2))
hm = (hm1 + hm2) / 2
out_var = model.forward_part2(hm)
coords = model.compute_coords(out_var)
else:
in_var = Variable(batch['input'].cuda(), volatile=True)
out_var = model(in_var)
coords = model.compute_coords(out_var)
orig_preds = torch.baddbmm(
batch['transform_b'],
coords.double(),
batch['transform_m'])
pos = i * batch_size
preds[pos:(pos + batch_size)] = orig_preds
if time_meter is not None:
time_meter.add(sum_meter.value())
completed += batch_size
bar.update(completed)
return preds
def evaluate_mpii_predictions(preds, subset, evaluator):
mpii_data = MpiiData('/datasets/mpii')
subset_indices = mpii_data.subset_indices(subset)
actual = torch.from_numpy(mpii_data.keypoints[subset_indices])
head_lengths = torch.from_numpy(mpii_data.head_lengths[subset_indices])
joint_mask = torch.from_numpy(mpii_data.keypoint_masks[subset_indices])
# Calculate PCKh accuracies
evaluator.add(preds, actual, joint_mask, head_lengths)
return evaluator
| [
"torchdata.mpii.MpiiData"
] | [((537, 547), 'tele.meter.SumMeter', 'SumMeter', ([], {}), '()\n', (545, 547), False, 'from tele.meter import SumMeter\n'), ((597, 660), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'num_workers': '(4)', 'pin_memory': '(True)'}), '(dataset, batch_size, num_workers=4, pin_memory=True)\n', (607, 660), False, 'from torch.utils.data import DataLoader\n'), ((2484, 2510), 'torchdata.mpii.MpiiData', 'MpiiData', (['"""/datasets/mpii"""'], {}), "('/datasets/mpii')\n", (2492, 2510), False, 'from torchdata.mpii import MpiiData\n'), ((2579, 2632), 'torch.from_numpy', 'torch.from_numpy', (['mpii_data.keypoints[subset_indices]'], {}), '(mpii_data.keypoints[subset_indices])\n', (2595, 2632), False, 'import torch\n'), ((2652, 2708), 'torch.from_numpy', 'torch.from_numpy', (['mpii_data.head_lengths[subset_indices]'], {}), '(mpii_data.head_lengths[subset_indices])\n', (2668, 2708), False, 'import torch\n'), ((2726, 2784), 'torch.from_numpy', 'torch.from_numpy', (['mpii_data.keypoint_masks[subset_indices]'], {}), '(mpii_data.keypoint_masks[subset_indices])\n', (2742, 2784), False, 'import torch\n'), ((944, 960), 'dsnt.util.timer', 'timer', (['sum_meter'], {}), '(sum_meter)\n', (949, 960), False, 'from dsnt.util import timer, type_as_index, reverse_tensor\n'), ((1071, 1105), 'dsnt.util.reverse_tensor', 'reverse_tensor', (["batch['input']", '(-1)'], {}), "(batch['input'], -1)\n", (1085, 1105), False, 'from dsnt.util import timer, type_as_index, reverse_tensor\n'), ((1501, 1524), 'dsnt.util.reverse_tensor', 'reverse_tensor', (['hm2', '(-1)'], {}), '(hm2, -1)\n', (1515, 1524), False, 'from dsnt.util import timer, type_as_index, reverse_tensor\n'), ((1572, 1617), 'dsnt.util.type_as_index', 'type_as_index', (['MPIIDataset.HFLIP_INDICES', 'hm2'], {}), '(MPIIDataset.HFLIP_INDICES, hm2)\n', (1585, 1617), False, 'from dsnt.util import timer, type_as_index, reverse_tensor\n'), ((1144, 1178), 'torch.cat', 'torch.cat', (['[sample, rev_sample]', '(0)'], {}), '([sample, rev_sample], 0)\n', (1153, 1178), False, 'import torch\n')] |
import os
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
URL = {
"train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json",
"dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json",
}
MD5 = {
"train": "62108c273c268d70893182d5cf8df740",
"dev": "246adae8b7002f8679c027697b0b7cf8",
}
NUM_LINES = {
"train": 130319,
"dev": 11873,
}
DATASET_NAME = "SQuAD2"
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD2(root: str, split: Union[Tuple[str], str]):
"""SQuAD2 Dataset
For additional details refer to https://rajpurkar.github.io/SQuAD-explorer/
Number of lines per split:
- train: 130319
- dev: 11873
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev`)
:returns: DataPipe that yields data points from SQuaAD1 dataset which consist of context, question, list of answers and corresponding index in context
:rtype: (str, str, list(str), list(int))
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL[split])): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
return cache_dp.parse_json_files().read_squad()
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.HttpReader",
"torchdata.datapipes.iter.FileOpener"
] | [((215, 247), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (234, 247), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((696, 748), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (721, 748), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((750, 788), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev')"], {}), "(('train', 'dev'))\n", (770, 788), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((1682, 1711), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL[split]]'], {}), '([URL[split]])\n', (1697, 1711), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((2073, 2103), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'mode': '"""b"""'}), "(cache_dp, mode='b')\n", (2083, 2103), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1469, 1501), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1488, 1501), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1991, 2011), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (2001, 2011), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1841, 1860), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1857, 1860), False, 'import os\n'), ((1901, 1929), 'os.path.basename', 'os.path.basename', (['URL[split]'], {}), '(URL[split])\n', (1917, 1929), False, 'import os\n')] |
from torchtext._internal.module_utils import is_module_available
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
import os
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_add_docstring_header,
_create_dataset_directory,
)
from typing import Union, Tuple
URL = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'
MD5 = '542ccefacc6c27f945fb54453812b3cd'
NUM_LINES = {
'train': 36718,
'valid': 3760,
'test': 4358,
}
DATASET_NAME = "WikiText2"
_EXTRACTED_FILES = {
'train': os.path.join('wikitext-2', 'wiki.train.tokens'),
'test': os.path.join('wikitext-2', 'wiki.test.tokens'),
'valid': os.path.join('wikitext-2', 'wiki.valid.tokens'),
}
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'valid', 'test'))
def WikiText2(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL])
# cache data on-disk
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL)): MD5},
hash_type="md5",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split]))
# Extract zip and filter the appropriate split file
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b").read_from_zip().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, mode='b')
return data_dp.readlines(strip_newline=False, decode=True, return_path=False)
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.HttpReader",
"torchdata.datapipes.iter.FileOpener"
] | [((69, 101), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (88, 101), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((794, 836), 'torchtext.data.datasets_utils._add_docstring_header', '_add_docstring_header', ([], {'num_lines': 'NUM_LINES'}), '(num_lines=NUM_LINES)\n', (815, 836), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((838, 890), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (863, 890), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((892, 940), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'valid', 'test')"], {}), "(('train', 'valid', 'test'))\n", (912, 940), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((618, 665), 'os.path.join', 'os.path.join', (['"""wikitext-2"""', '"""wiki.train.tokens"""'], {}), "('wikitext-2', 'wiki.train.tokens')\n", (630, 665), False, 'import os\n'), ((679, 725), 'os.path.join', 'os.path.join', (['"""wikitext-2"""', '"""wiki.test.tokens"""'], {}), "('wikitext-2', 'wiki.test.tokens')\n", (691, 725), False, 'import os\n'), ((740, 787), 'os.path.join', 'os.path.join', (['"""wikitext-2"""', '"""wiki.valid.tokens"""'], {}), "('wikitext-2', 'wiki.valid.tokens')\n", (752, 787), False, 'import os\n'), ((1199, 1221), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (1214, 1221), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((2002, 2045), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (2012, 2045), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1009, 1041), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1028, 1041), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1491, 1522), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_compressed_dp'], {}), '(cache_compressed_dp)\n', (1501, 1522), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1653, 1696), 'os.path.join', 'os.path.join', (['root', '_EXTRACTED_FILES[split]'], {}), '(root, _EXTRACTED_FILES[split])\n', (1665, 1696), False, 'import os\n'), ((1344, 1363), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1360, 1363), False, 'import os\n'), ((1404, 1425), 'os.path.basename', 'os.path.basename', (['URL'], {}), '(URL)\n', (1420, 1425), False, 'import os\n'), ((1782, 1825), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (1792, 1825), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n')] |
import io
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple, Iterator
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
OnlineResource,
GDriveResource,
)
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
)
from torchvision.prototype.features import Label
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
class PCAM(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"pcam",
homepage="https://github.com/basveeling/pcam",
categories=2,
valid_options=dict(split=("train", "test", "val")),
dependencies=["h5py"],
)
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[config.split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": features.Image(image.transpose(2, 0, 1)),
"label": Label(target.item()),
}
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Zipper"
] | [((1123, 1184), 'collections.namedtuple', 'namedtuple', (['"""_Resource"""', "('file_name', 'gdrive_id', 'sha256')"], {}), "('_Resource', ('file_name', 'gdrive_id', 'sha256'))\n", (1133, 1184), False, 'from collections import namedtuple\n'), ((4149, 4178), 'torchdata.datapipes.iter.Zipper', 'Zipper', (['images_dp', 'targets_dp'], {}), '(images_dp, targets_dp)\n', (4155, 4178), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper\n'), ((4192, 4210), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (4206, 4210), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((4224, 4241), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (4237, 4241), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((4257, 4289), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (4263, 4289), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper\n'), ((3386, 3480), 'torchvision.prototype.datasets.utils.GDriveResource', 'GDriveResource', ([], {'file_name': 'file_name', 'id': 'gdrive_id', 'sha256': 'sha256', 'preprocess': '"""decompress"""'}), "(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess\n ='decompress')\n", (3400, 3480), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, OnlineResource, GDriveResource\n'), ((967, 984), 'h5py.File', 'h5py.File', (['handle'], {}), '(handle)\n', (976, 984), False, 'import h5py\n')] |
import os
from functools import partial
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import _create_dataset_directory
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, IterableWrapper
from torchtext._download_hooks import HttpReader
URL = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
MD5 = "b6d5672bd9dc1e66ab2bb020ebeafb8d"
_PATH = "quora_duplicate_questions.tsv"
NUM_LINES = {"train": 404290}
DATASET_NAME = "QQP"
def _filepath_fn(root, _=None):
return os.path.join(root, _PATH)
def _modify_res(x):
return (int(x[-1]), x[3], x[4])
@_create_dataset_directory(dataset_name=DATASET_NAME)
def QQP(root: str):
"""QQP dataset
For additional details refer to https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
:returns: DataPipe that yields rows from QQP dataset (label (int), question1 (str), question2 (str))
:rtype: (int, str, str)
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root),
hash_dict={_filepath_fn(root): MD5},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
# some context stored at top of the file needs to be removed
parsed_data = cache_dp.parse_csv(skip_lines=1, delimiter="\t").map(_modify_res)
return parsed_data.shuffle().set_shuffle(False).sharding_filter()
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.FileOpener"
] | [((178, 210), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (197, 210), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((668, 720), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (693, 720), False, 'from torchtext.data.datasets_utils import _create_dataset_directory\n'), ((581, 606), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (593, 606), False, 'import os\n'), ((1348, 1370), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (1363, 1370), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((1630, 1668), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'encoding': '"""utf-8"""'}), "(cache_dp, encoding='utf-8')\n", (1640, 1668), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((1135, 1167), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1154, 1167), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1428, 1455), 'functools.partial', 'partial', (['_filepath_fn', 'root'], {}), '(_filepath_fn, root)\n', (1435, 1455), False, 'from functools import partial\n'), ((1548, 1568), 'torchtext._download_hooks.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (1558, 1568), False, 'from torchtext._download_hooks import HttpReader\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from torch.utils.data.dataset import IterableDataset
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_add_docstring_header,
_create_dataset_directory,
_wrap_split_argument,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import IterableWrapper, FileLoader
# we import HttpReader from _download_hooks so we can swap out public URLs
# with interal URLs when the dataset is used within Facebook
from torchtext._download_hooks import HttpReader
NUM_LINES = {
"train": 67349,
"dev": 872,
"test": 1821,
}
MD5 = "9f81648d4199384278b86e315dac217c"
URL = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip"
_PATH = "SST-2.zip"
_EXTRACTED_FILES = {
"train": f"{os.sep}".join([_PATH, "SST-2", "train.tsv"]),
"dev": f"{os.sep}".join([_PATH, "SST-2", "dev.tsv"]),
"test": f"{os.sep}".join([_PATH, "SST-2", "test.tsv"]),
}
_EXTRACTED_FILES_MD5 = {
"train": "da409a0a939379ed32a470bc0f7fe99a",
"dev": "268856b487b2a31a28c0a93daaff7288",
"test": "3230e4efec76488b87877a56ae49675a",
}
_FIRST_LINE_MD5 = {
"train": "2552b8cecd57b2e022ef23411c688fa8",
"dev": "1b0ffd6aa5f2bf0fd9840a5f6f1a9f07",
"test": "f838c81fe40bfcd7e42e9ffc4dd004f7",
}
DATASET_NAME = "SST2"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev", "test"))
def SST2(root, split, validate_hash=True):
return SST2Dataset(root, split, validate_hash=validate_hash)
class SST2Dataset(IterableDataset):
"""The SST2 dataset uses torchdata datapipes end-2-end.
To avoid download at every epoch, we cache the data on-disk
We do sanity check on dowloaded and extracted data
"""
def __init__(self, root, split, validate_hash=True):
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` is required to be installed to use this dataset."
"Please refer to https://github.com/pytorch/data for instructions on "
"how to install the package."
)
self._dp = self._get_datapipe(root, split, validate_hash)
def __iter__(self):
for data in self._dp:
yield data
def _get_datapipe(self, root, split, validate_hash):
# Validate integrity of dataset using md5 checksum
hash_dict = {os.path.join(root, "SST-2.zip"): MD5} if validate_hash else None
hash_type = "md5" if validate_hash else None
# cache data on-disk
cache_dp = IterableWrapper([URL]).on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict=hash_dict,
hash_type=hash_type,
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
# Load from cached file
cache_dp = FileLoader(cache_dp, mode="rb")
# extract data from zip
extracted_files = cache_dp.read_from_zip().filter(
lambda x: f"{split}.tsv" in x[0]
)
# Parse CSV file and yield data samples
return extracted_files.parse_csv(skip_lines=1, delimiter="\t").map(
lambda x: (x[0], x[1])
)
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.FileLoader"
] | [((314, 346), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (333, 346), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1378, 1435), 'torchtext.data.datasets_utils._add_docstring_header', '_add_docstring_header', ([], {'num_lines': 'NUM_LINES', 'num_classes': '(2)'}), '(num_lines=NUM_LINES, num_classes=2)\n', (1399, 1435), False, 'from torchtext.data.datasets_utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument\n'), ((1437, 1489), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (1462, 1489), False, 'from torchtext.data.datasets_utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument\n'), ((1491, 1537), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev', 'test')"], {}), "(('train', 'dev', 'test'))\n", (1511, 1537), False, 'from torchtext.data.datasets_utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument\n'), ((3028, 3059), 'torchdata.datapipes.iter.FileLoader', 'FileLoader', (['cache_dp'], {'mode': '"""rb"""'}), "(cache_dp, mode='rb')\n", (3038, 3059), False, 'from torchdata.datapipes.iter import IterableWrapper, FileLoader\n'), ((1944, 1976), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1963, 1976), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((2534, 2565), 'os.path.join', 'os.path.join', (['root', '"""SST-2.zip"""'], {}), "(root, 'SST-2.zip')\n", (2546, 2565), False, 'import os\n'), ((2701, 2723), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (2716, 2723), False, 'from torchdata.datapipes.iter import IterableWrapper, FileLoader\n'), ((2909, 2929), 'torchtext._download_hooks.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (2919, 2929), False, 'from torchtext._download_hooks import HttpReader\n'), ((2792, 2811), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (2808, 2811), False, 'import os\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper
from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument
URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
MD5 = "fe39f8b653cada45afd5792e0f0e8f9b"
NUM_LINES = {
"train": 3600000,
"test": 400000,
}
_PATH = "amazon_review_polarity_csv.tar.gz"
_EXTRACTED_FILES = {
"train": os.path.join("amazon_review_polarity_csv", "train.csv"),
"test": os.path.join("amazon_review_polarity_csv", "test.csv"),
}
DATASET_NAME = "AmazonReviewPolarity"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AmazonReviewPolarity(root, split):
"""Demonstrating caching, extraction and sanity check pipelines."""
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _PATH), hash_dict={os.path.join(root, _PATH): MD5}, hash_type="md5"
)
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split])
)
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").read_from_tar().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, mode="b")
return data_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| [
"torchdata.datapipes.iter.GDriveReader",
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.IterableWrapper"
] | [((499, 554), 'os.path.join', 'os.path.join', (['"""amazon_review_polarity_csv"""', '"""train.csv"""'], {}), "('amazon_review_polarity_csv', 'train.csv')\n", (511, 554), False, 'import os\n'), ((568, 622), 'os.path.join', 'os.path.join', (['"""amazon_review_polarity_csv"""', '"""test.csv"""'], {}), "('amazon_review_polarity_csv', 'test.csv')\n", (580, 622), False, 'import os\n'), ((946, 968), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (961, 968), False, 'from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper\n'), ((1655, 1698), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (1665, 1698), False, 'from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper\n'), ((1166, 1199), 'torchdata.datapipes.iter.GDriveReader', 'GDriveReader', (['cache_compressed_dp'], {}), '(cache_compressed_dp)\n', (1178, 1199), False, 'from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper\n'), ((1047, 1072), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (1059, 1072), False, 'import os\n'), ((1085, 1110), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (1097, 1110), False, 'import os\n'), ((1340, 1383), 'os.path.join', 'os.path.join', (['root', '_EXTRACTED_FILES[split]'], {}), '(root, _EXTRACTED_FILES[split])\n', (1352, 1383), False, 'import os\n'), ((1428, 1471), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (1438, 1471), False, 'from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from io import BytesIO
from typing import Iterator, Tuple
import torchdata
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
@functional_datapipe("list_file_by_s3")
class S3FileListerIterDataPipe(IterDataPipe[str]):
r"""
Iterable DataPipe that lists Amazon S3 file URLs with the given prefixes (functional name: ``list_file_by_s3``).
Acceptable prefixes include ``s3://bucket-name``, ``s3://bucket-name/``, ``s3://bucket-name/folder``,
``s3://bucket-name/folder/``, and ``s3://bucket-name/prefix``. You may also set ``length``, ``request_timeout_ms``
(default 3000 ms in aws-sdk-cpp), and ``region``.
Note:
1. Input **must** be a list and direct S3 URLs are skipped.
2. ``length`` is `-1` by default, and any call to ``__len__()`` is invalid, because the length is unknown
until all files are iterated.
3. ``request_timeout_ms`` and ``region`` will overwrite settings in the configuration file or
environment variables.
4. AWS_CPP_SDK is necessary to use the S3 DataPipe(s).
Args:
source_datapipe: a DataPipe that contains URLs/URL prefixes to s3 files
length: Nominal length of the datapipe
requestTimeoutMs: optional, overwrite the default timeout setting for this datapipe
region: optional, overwrite the default region inferred from credentials for this datapipe
Example:
>>> from torchdata.datapipes.iter import S3FileLister, S3FileLoader
>>> s3_prefixes = ['s3://bucket-name/folder/', ...]
>>> dp_s3_urls = S3FileLister(s3_prefixes)
>>> dp_s3_files = S3FileLoader(s3_urls) # outputs in (url, StreamWrapper(BytesIO))
>>> # more datapipes to convert loaded bytes, e.g.
>>> datapipe = dp_s3_files.parse_csv(delimiter=' ')
>>> for d in datapipe: # Start loading data
... pass
"""
def __init__(self, source_datapipe: IterDataPipe[str], length: int = -1, request_timeout_ms=-1, region="") -> None:
if not hasattr(torchdata, "_torchdata") or not hasattr(torchdata._torchdata, "S3Handler"):
raise ModuleNotFoundError("Torchdata must be built with BUILD_S3=1 to use this datapipe.")
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.length: int = length
self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region)
def __iter__(self) -> Iterator[str]:
for prefix in self.source_datapipe:
while True:
urls = self.handler.list_files(prefix)
yield from urls
if not urls:
break
self.handler.clear_marker()
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
@functional_datapipe("load_file_by_s3")
class S3FileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Iterable DataPipe that loads Amazon S3 files from the given S3 URLs (functional name: ``load_file_by_s3``).
``S3FileLoader`` iterates all given S3 URLs in ``BytesIO`` format with ``(url, BytesIO)`` tuples.
You may also set ``request_timeout_ms`` (default 3000 ms in aws-sdk-cpp), ``region``,
``buffer_size`` (default 120Mb), and ``multi_part_download`` (default to use multi-part downloading).
Note:
1. Input **must** be a list and S3 URLs must be valid.
2. ``request_timeout_ms`` and ``region`` will overwrite settings in the
configuration file or environment variables.
3. AWS_CPP_SDK is necessary to use the S3 DataPipe(s).
Args:
source_datapipe: a DataPipe that contains URLs to s3 files
requestTimeoutMs: optional, overwrite the default timeout setting for this datapipe
region: optional, overwrite the default region inferred from credentials for this datapipe
Example:
>>> from torchdata.datapipes.iter import S3FileLister, S3FileLoader
>>> s3_prefixes = ['s3://bucket-name/folder/', ...]
>>> dp_s3_urls = S3FileLister(s3_prefixes)
>>> dp_s3_files = S3FileLoader(s3_urls) # outputs in (url, StreamWrapper(BytesIO))
>>> # more datapipes to convert loaded bytes, e.g.
>>> datapipe = dp_s3_files.parse_csv(delimiter=' ')
>>> for d in datapipe: # Start loading data
... pass
"""
def __init__(
self,
source_datapipe: IterDataPipe[str],
request_timeout_ms=-1,
region="",
buffer_size=None,
multi_part_download=None,
) -> None:
if not hasattr(torchdata, "_torchdata") or not hasattr(torchdata._torchdata, "S3Handler"):
raise ModuleNotFoundError("Torchdata must be built with BUILD_S3=1 to use this datapipe.")
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region)
if buffer_size:
self.handler.set_buffer_size(buffer_size)
if multi_part_download:
self.handler.set_multi_part_download(multi_part_download)
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
yield url, StreamWrapper(BytesIO(self.handler.s3_read(url)))
def __len__(self) -> int:
return len(self.source_datapipe)
| [
"torchdata._torchdata.S3Handler",
"torchdata.datapipes.functional_datapipe"
] | [((442, 480), 'torchdata.datapipes.functional_datapipe', 'functional_datapipe', (['"""list_file_by_s3"""'], {}), "('list_file_by_s3')\n", (461, 480), False, 'from torchdata.datapipes import functional_datapipe\n'), ((3172, 3210), 'torchdata.datapipes.functional_datapipe', 'functional_datapipe', (['"""load_file_by_s3"""'], {}), "('load_file_by_s3')\n", (3191, 3210), False, 'from torchdata.datapipes import functional_datapipe\n'), ((2641, 2699), 'torchdata._torchdata.S3Handler', 'torchdata._torchdata.S3Handler', (['request_timeout_ms', 'region'], {}), '(request_timeout_ms, region)\n', (2671, 2699), False, 'import torchdata\n'), ((5233, 5291), 'torchdata._torchdata.S3Handler', 'torchdata._torchdata.S3Handler', (['request_timeout_ms', 'region'], {}), '(request_timeout_ms, region)\n', (5263, 5291), False, 'import torchdata\n')] |
import enum
import functools
import io
import pathlib
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Shuffler,
Filter,
IterKeyZipper,
Demultiplexer,
LineReader,
CSVParser,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
hint_sharding,
path_comparator,
getitem,
)
from torchvision.prototype.features import Label
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
class DTD(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"dtd",
type=DatasetType.IMAGE,
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
valid_options=dict(
split=("train", "test", "val"),
fold=tuple(str(fold) for fold in range(1, 11)),
),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
decompress=True,
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[str, List[str]], Tuple[str, io.IOBase]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label(self.info.categories.index(category), category=category),
path=path,
image=decoder(buffer) if decoder else buffer,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{config.split}{config.fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self, root: pathlib.Path) -> List[str]:
resources = self.resources(self.default_config)
dp = resources[0].load(root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
| [
"torchdata.datapipes.iter.Filter",
"torchdata.datapipes.iter.Shuffler",
"torchdata.datapipes.iter.LineReader",
"torchdata.datapipes.iter.CSVParser",
"torchdata.datapipes.iter.Demultiplexer"
] | [((1203, 1397), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"""'], {'sha256': '"""e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205"""', 'decompress': '(True)'}), "(\n 'https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz',\n sha256=\n 'e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205',\n decompress=True)\n", (1215, 1397), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource, DatasetType\n'), ((1541, 1562), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1553, 1562), False, 'import pathlib\n'), ((1936, 1957), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1948, 1957), False, 'import pathlib\n'), ((3130, 3236), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['archive_dp', '(3)', 'self._classify_archive'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(archive_dp, 3, self._classify_archive, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (3143, 3236), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((3375, 3428), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['splits_dp'], {'decode': '(True)', 'return_path': '(False)'}), '(splits_dp, decode=True, return_path=False)\n', (3385, 3428), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((3449, 3502), 'torchdata.datapipes.iter.Shuffler', 'Shuffler', (['splits_dp'], {'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)\n', (3457, 3502), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((3523, 3547), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['splits_dp'], {}), '(splits_dp)\n', (3536, 3547), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((3579, 3624), 'torchdata.datapipes.iter.CSVParser', 'CSVParser', (['joint_categories_dp'], {'delimiter': '""" """'}), "(joint_categories_dp, delimiter=' ')\n", (3588, 3624), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((4425, 4456), 'torchdata.datapipes.iter.Filter', 'Filter', (['dp', 'self._filter_images'], {}), '(dp, self._filter_images)\n', (4431, 4456), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((3294, 3353), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', 'f"""{config.split}{config.fold}.txt"""'], {}), "('name', f'{config.split}{config.fold}.txt')\n", (3309, 3353), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((4054, 4121), 'functools.partial', 'functools.partial', (['self._collate_and_decode_sample'], {'decoder': 'decoder'}), '(self._collate_and_decode_sample, decoder=decoder)\n', (4071, 4121), False, 'import functools\n'), ((2502, 2520), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2514, 2520), False, 'import pathlib\n'), ((3729, 3738), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', ([], {}), '()\n', (3736, 3738), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((3763, 3773), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(0)'], {}), '(0)\n', (3770, 3773), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((3917, 3927), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(0)'], {}), '(0)\n', (3924, 3927), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((4481, 4499), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (4493, 4499), False, 'import pathlib\n')] |
import functools
import io
import os
import os.path
import pathlib
from typing import Callable, Optional, Collection
from typing import Union, Tuple, List, Dict, Any
import torch
from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter
from torchvision.prototype.datasets.decoder import pil
from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _collate_and_decode_data(
data: Tuple[str, io.IOBase],
*,
root: pathlib.Path,
categories: List[str],
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
path, buffer = data
data = decoder(buffer) if decoder else buffer
category = pathlib.Path(path).relative_to(root).parts[0]
label = torch.tensor(categories.index(category))
return dict(
path=path,
data=data,
label=label,
category=category,
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]] = None,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = Shuffler(dp, buffer_size=INFINITE_BUFFER_SIZE)
dp = FileOpener(dp, mode="rb")
return (
Mapper(dp, functools.partial(_collate_and_decode_data, root=root, categories=categories, decoder=decoder)),
categories,
)
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
sample["image"] = sample.pop("data")
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]] = pil,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, decoder=decoder, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Shuffler",
"torchdata.datapipes.iter.FileOpener"
] | [((1848, 1865), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (1861, 1865), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding\n'), ((1875, 1921), 'torchdata.datapipes.iter.Shuffler', 'Shuffler', (['dp'], {'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(dp, buffer_size=INFINITE_BUFFER_SIZE)\n', (1883, 1921), False, 'from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter\n'), ((1931, 1956), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['dp'], {'mode': '"""rb"""'}), "(dp, mode='rb')\n", (1941, 1956), False, 'from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter\n'), ((1785, 1837), 'functools.partial', 'functools.partial', (['_is_not_top_level_file'], {'root': 'root'}), '(_is_not_top_level_file, root=root)\n', (1802, 1837), False, 'import functools\n'), ((2773, 2803), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', '_data_to_image_key'], {}), '(dp, _data_to_image_key)\n', (2779, 2803), False, 'from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter\n'), ((571, 589), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (583, 589), False, 'import pathlib\n'), ((659, 676), 'pathlib.Path', 'pathlib.Path', (['"""."""'], {}), "('.')\n", (671, 676), False, 'import pathlib\n'), ((1989, 2088), 'functools.partial', 'functools.partial', (['_collate_and_decode_data'], {'root': 'root', 'categories': 'categories', 'decoder': 'decoder'}), '(_collate_and_decode_data, root=root, categories=\n categories, decoder=decoder)\n', (2006, 2088), False, 'import functools\n'), ((1534, 1550), 'os.scandir', 'os.scandir', (['root'], {}), '(root)\n', (1544, 1550), False, 'import os\n'), ((970, 988), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (982, 988), False, 'import pathlib\n'), ((1444, 1462), 'pathlib.Path', 'pathlib.Path', (['root'], {}), '(root)\n', (1456, 1462), False, 'import pathlib\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import warnings
from functools import partial
import expecttest
import numpy as np
import torch
from _utils._common_utils_for_test import reset_after_n_next_calls
from torchdata.datapipes.iter import (
FileLister,
FileOpener,
FSSpecFileLister,
FSSpecFileOpener,
FSSpecSaver,
IterableWrapper,
TFRecordLoader,
)
class TestDataPipeTFRecord(expecttest.TestCase):
def setUp(self):
self.temp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "_fakedata", "tfrecord")
def assertArrayEqual(self, arr1, arr2):
np.testing.assert_array_equal(arr1, arr2)
def _ground_truth_data(self):
for i in range(4):
x = torch.range(i * 10, (i + 1) * 10 - 1)
yield {
"x_float": x,
"x_int": (x * 10).long(),
"x_byte": [b"test str"],
}
def _ground_truth_seq_data(self):
for i in range(4):
x = torch.range(i * 10, (i + 1) * 10 - 1)
rep = 2 * i + 3
yield {"x_float": x, "x_int": (x * 10).long(), "x_byte": [b"test str"]}, {
"x_float_seq": [x] * rep,
"x_int_seq": [(x * 10).long()] * rep,
"x_byte_seq": [[b"test str"]] * rep,
}
@torch.no_grad()
def test_tfrecord_loader_example_iterdatapipe(self):
filename = f"{self.temp_dir}/example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = final_expected_res = list(self._ground_truth_data())
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(loaded_data["x_byte"]), 1)
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
# Functional Test: test if the shape of the returned data is correct when using spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"].reshape(5, 2),
"x_int": x["x_int"].reshape(5, 2),
"x_byte": x["x_byte"][0],
}
for x in self._ground_truth_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
self.assertArrayEqual(true_data["x_int"].numpy(), loaded_data["x_int"].long().numpy())
self.assertEqual(loaded_data["x_float"].dtype, torch.float64)
self.assertEqual(loaded_data["x_int"].dtype, torch.int32)
self.assertEqual(true_data["x_byte"], loaded_data["x_byte"])
# Functional Test: ignore features missing from spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((10,), torch.float32),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"],
}
for x in self._ground_truth_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float_unknown": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for true_data, loaded_data in zip(expected_res[:n_elements_before_reset], res_before_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
self.assertEqual(len(expected_res), len(res_after_reset))
for true_data, loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "doesn't have valid length"):
len(tfrecord_parser)
@torch.no_grad()
def test_tfrecord_loader_sequence_example_iterdatapipe(self):
filename = f"{self.temp_dir}/sequence_example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = final_expected_res = list(self._ground_truth_seq_data())
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# Functional Test: test if the shape of the returned data is correct when using spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
"x_float_seq": ((-1, 5, 2), torch.float64),
"x_int_seq": ((-1, 5, 2), torch.int32),
"x_byte_seq": ((-1,), None),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
(
{
"x_float": x["x_float"].reshape(5, 2),
"x_int": x["x_int"].reshape(5, 2),
"x_byte": x["x_byte"][0],
},
{
"x_float_seq": [y.reshape(5, 2).numpy() for y in z["x_float_seq"]],
"x_int_seq": [y.reshape(5, 2).numpy() for y in z["x_int_seq"]],
"x_byte_seq": [y[0] for y in z["x_byte_seq"]],
},
)
for x, z in self._ground_truth_seq_data()
]
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
l_loaded_data = loaded_data[key]
if key == "x_float":
l_loaded_data = l_loaded_data.float()
else:
l_loaded_data = l_loaded_data.int()
self.assertArrayEqual(true_data_ctx[key].numpy(), l_loaded_data.numpy())
self.assertArrayEqual(true_data_seq[key + "_seq"], loaded_data[key + "_seq"])
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# Functional Test: ignore features missing from spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((10,), torch.float32),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"],
}
for x, z in self._ground_truth_seq_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{"x_float_unknown": ((5, 2), torch.float64), "x_int": ((5, 2), torch.int32), "x_byte": None}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for (true_data_ctx, true_data_seq), loaded_data in zip(
expected_res[:n_elements_before_reset], res_before_reset
):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
self.assertEqual(len(expected_res), len(res_after_reset))
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "doesn't have valid length"):
len(tfrecord_parser)
if __name__ == "__main__":
unittest.main()
| [
"torchdata.datapipes.iter.TFRecordLoader",
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.FileOpener"
] | [((1515, 1530), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1528, 1530), False, 'import torch\n'), ((6173, 6188), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6186, 6188), False, 'import torch\n'), ((12867, 12882), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12880, 12882), False, 'import unittest\n'), ((806, 847), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (835, 847), True, 'import numpy as np\n'), ((1663, 1690), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[filename]'], {}), '([filename])\n', (1678, 1690), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((1711, 1742), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (1721, 1742), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((4835, 4860), 'torchdata.datapipes.iter.TFRecordLoader', 'TFRecordLoader', (['datapipe2'], {}), '(datapipe2)\n', (4849, 4860), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((4983, 5049), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['tfrecord_parser', 'n_elements_before_reset'], {}), '(tfrecord_parser, n_elements_before_reset)\n', (5007, 5049), False, 'from _utils._common_utils_for_test import reset_after_n_next_calls\n'), ((6339, 6366), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[filename]'], {}), '([filename])\n', (6354, 6366), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((6387, 6418), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (6397, 6418), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((10570, 10595), 'torchdata.datapipes.iter.TFRecordLoader', 'TFRecordLoader', (['datapipe2'], {}), '(datapipe2)\n', (10584, 10595), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((10718, 10784), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['tfrecord_parser', 'n_elements_before_reset'], {}), '(tfrecord_parser, n_elements_before_reset)\n', (10742, 10784), False, 'from _utils._common_utils_for_test import reset_after_n_next_calls\n'), ((926, 963), 'torch.range', 'torch.range', (['(i * 10)', '((i + 1) * 10 - 1)'], {}), '(i * 10, (i + 1) * 10 - 1)\n', (937, 963), False, 'import torch\n'), ((1193, 1230), 'torch.range', 'torch.range', (['(i * 10)', '((i + 1) * 10 - 1)'], {}), '(i * 10, (i + 1) * 10 - 1)\n', (1204, 1230), False, 'import torch\n'), ((700, 725), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (715, 725), False, 'import os\n')] |
import functools
import io
import pathlib
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
hint_sharding,
hint_shuffling,
path_comparator,
path_accessor,
getitem,
)
from torchvision.prototype.features import Label
class CLEVR(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"clevr",
type=DatasetType.IMAGE,
homepage="https://cs.stanford.edu/people/jcjohns/clevr/",
valid_options=dict(split=("train", "val", "test")),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, io.IOBase]) -> Tuple[Tuple[str, io.IOBase], None]:
return data, None
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[str, io.IOBase], Optional[Dict[str, Any]]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=decoder(buffer) if decoder else buffer,
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", config.split))
images_dp = hint_sharding(images_dp)
images_dp = hint_shuffling(images_dp)
if config.split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{config.split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
| [
"torchdata.datapipes.iter.JsonParser",
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Demultiplexer",
"torchdata.datapipes.iter.UnBatcher"
] | [((1009, 1156), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip"""'], {'sha256': '"""5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1"""'}), "('https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip', sha256=\n '5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1')\n", (1021, 1156), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource, DatasetType\n'), ((1301, 1322), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1313, 1322), False, 'import pathlib\n'), ((2515, 2621), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['archive_dp', '(2)', 'self._classify_archive'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(archive_dp, 2, self._classify_archive, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (2528, 2621), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher\n'), ((2794, 2818), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['images_dp'], {}), '(images_dp)\n', (2807, 2818), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((2839, 2864), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['images_dp'], {}), '(images_dp)\n', (2853, 2864), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((2728, 2772), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""parent.name"""', 'config.split'], {}), "('parent.name', config.split)\n", (2743, 2772), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((3029, 3050), 'torchdata.datapipes.iter.JsonParser', 'JsonParser', (['scenes_dp'], {}), '(scenes_dp)\n', (3039, 3050), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher\n'), ((3139, 3159), 'torchdata.datapipes.iter.UnBatcher', 'UnBatcher', (['scenes_dp'], {}), '(scenes_dp)\n', (3148, 3159), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher\n'), ((3442, 3481), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['images_dp', 'self._add_empty_anns'], {}), '(images_dp, self._add_empty_anns)\n', (3448, 3481), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher\n'), ((3509, 3576), 'functools.partial', 'functools.partial', (['self._collate_and_decode_sample'], {'decoder': 'decoder'}), '(self._collate_and_decode_sample, decoder=decoder)\n', (3526, 3576), False, 'import functools\n'), ((2943, 3003), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', 'f"""CLEVR_{config.split}_scenes.json"""'], {}), "('name', f'CLEVR_{config.split}_scenes.json')\n", (2958, 3003), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((3093, 3113), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(1)', '"""scenes"""'], {}), "(1, 'scenes')\n", (3100, 3113), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((3270, 3291), 'torchvision.prototype.datasets.utils._internal.path_accessor', 'path_accessor', (['"""name"""'], {}), "('name')\n", (3283, 3291), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((3320, 3345), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['"""image_filename"""'], {}), "('image_filename')\n", (3327, 3345), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import hashlib
import itertools
import lzma
import os
import subprocess
import tarfile
import unittest
import warnings
import zipfile
from json.decoder import JSONDecodeError
import expecttest
from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls
from torchdata.datapipes.iter import (
CSVDictParser,
CSVParser,
Decompressor,
FileLister,
FileOpener,
HashChecker,
IoPathFileLister,
IoPathFileOpener,
IoPathSaver,
IterableWrapper,
JsonParser,
RarArchiveLoader,
Saver,
TarArchiveLoader,
XzFileLoader,
ZipArchiveLoader,
)
try:
import iopath
HAS_IOPATH = True
except ImportError:
HAS_IOPATH = False
skipIfNoIoPath = unittest.skipIf(not HAS_IOPATH, "no iopath")
try:
import rarfile
HAS_RAR_TOOLS = True
try:
rarfile.tool_setup()
subprocess.run(("rar", "-?"), check=True)
except (rarfile.RarCannotExec, subprocess.CalledProcessError):
HAS_RAR_TOOLS = False
except (ModuleNotFoundError, FileNotFoundError):
HAS_RAR_TOOLS = False
skipIfNoRarTools = unittest.skipIf(not HAS_RAR_TOOLS, "no rar tools")
class TestDataPipeLocalIO(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
self.temp_files = create_temp_files(self.temp_dir)
self.temp_sub_dir = create_temp_dir(self.temp_dir.name)
self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False)
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestDataPipeLocalIO was not able to cleanup temp dir due to {e}")
def _custom_files_set_up(self, files):
for fname, content in files.items():
temp_file_path = os.path.join(self.temp_dir.name, fname)
with open(temp_file_path, "w") as f:
f.write(content)
def _compressed_files_comparison_helper(self, expected_files, result, check_length: bool = True):
if check_length:
self.assertEqual(len(expected_files), len(result))
for res, expected_file in itertools.zip_longest(result, expected_files):
self.assertTrue(res is not None and expected_file is not None)
self.assertEqual(os.path.basename(res[0]), os.path.basename(expected_file))
with open(expected_file, "rb") as f:
self.assertEqual(res[1].read(), f.read())
res[1].close()
def _unordered_compressed_files_comparison_helper(self, expected_files, result, check_length: bool = True):
expected_names_to_files = {os.path.basename(f): f for f in expected_files}
if check_length:
self.assertEqual(len(expected_files), len(result))
for res in result:
fname = os.path.basename(res[0])
self.assertTrue(fname is not None)
self.assertTrue(fname in expected_names_to_files)
with open(expected_names_to_files[fname], "rb") as f:
self.assertEqual(res[1].read(), f.read())
res[1].close()
def test_csv_parser_iterdatapipe(self):
def make_path(fname):
return f"{self.temp_dir.name}/{fname}"
csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"}
self._custom_files_set_up(csv_files)
datapipe1 = IterableWrapper([make_path(fname) for fname in ["1.csv", "empty.csv", "empty2.csv"]])
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
# Functional Test: yield one row at time from each file, skipping over empty content
csv_parser_dp = datapipe3.parse_csv()
expected_res = [["key", "item"], ["a", "1"], ["b", "2"], []]
self.assertEqual(expected_res, list(csv_parser_dp))
# Functional Test: yield one row at time from each file, skipping over empty content and header
csv_parser_dp = datapipe3.parse_csv(skip_lines=1)
expected_res = [["a", "1"], ["b", "2"]]
self.assertEqual(expected_res, list(csv_parser_dp))
# Functional Test: yield one row at time from each file with file name, skipping over empty content
csv_parser_dp = datapipe3.parse_csv(return_path=True)
expected_res = [("1.csv", ["key", "item"]), ("1.csv", ["a", "1"]), ("1.csv", ["b", "2"]), ("empty2.csv", [])]
self.assertEqual(expected_res, list(csv_parser_dp))
# Reset Test:
csv_parser_dp = CSVParser(datapipe3, return_path=True)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(csv_parser_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(csv_parser_dp)
def test_csv_dict_parser_iterdatapipe(self):
def get_name(path_and_stream):
return os.path.basename(path_and_stream[0]), path_and_stream[1]
csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"}
self._custom_files_set_up(csv_files)
datapipe1 = FileLister(self.temp_dir.name, "*.csv")
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
# Functional Test: yield one row at a time as dict, with the first row being the header (key)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict()
expected_res1 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}]
self.assertEqual(expected_res1, list(csv_dict_parser_dp))
# Functional Test: yield one row at a time as dict, skip over first row, with the second row being the header
csv_dict_parser_dp = datapipe3.parse_csv_as_dict(skip_lines=1)
expected_res2 = [{"a": "b", "1": "2"}]
self.assertEqual(expected_res2, list(csv_dict_parser_dp))
# Functional Test: yield one row at a time as dict with file name, and the first row being the header (key)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict(return_path=True)
expected_res3 = [("1.csv", {"key": "a", "item": "1"}), ("1.csv", {"key": "b", "item": "2"})]
self.assertEqual(expected_res3, list(csv_dict_parser_dp))
# Reset Test
csv_dict_parser_dp = CSVDictParser(datapipe3)
expected_res4 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}]
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(csv_dict_parser_dp, n_elements_before_reset)
self.assertEqual(expected_res4[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res4, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(csv_dict_parser_dp)
def test_hash_checker_iterdatapipe(self):
hash_dict = {}
def fill_hash_dict():
for path in self.temp_files:
with open(path) as f:
hash_func = hashlib.sha256()
content = f.read().encode("utf-8")
hash_func.update(content)
hash_dict[path] = hash_func.hexdigest()
fill_hash_dict()
datapipe1 = FileLister(self.temp_dir.name, "*")
datapipe2 = FileOpener(datapipe1, mode="b")
hash_check_dp = HashChecker(datapipe2, hash_dict)
# Functional Test: Ensure the DataPipe values are unchanged if the hashes are the same
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, hash_check_dp):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
# Functional Test: Ensure the rewind option works, and the stream is empty when there is no rewind
hash_check_dp_no_reset = HashChecker(datapipe2, hash_dict, rewind=False)
for (expected_path, _), (actual_path, actual_stream) in zip(datapipe2, hash_check_dp_no_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(b"", actual_stream.read())
# Functional Test: Error when file/path is not in hash_dict
hash_check_dp = HashChecker(datapipe2, {})
it = iter(hash_check_dp)
with self.assertRaisesRegex(RuntimeError, "Unspecified hash for file"):
next(it)
# Functional Test: Error when the hash is different
hash_dict[self.temp_files[0]] = "WRONG HASH"
hash_check_dp = HashChecker(datapipe2, hash_dict)
with self.assertRaisesRegex(RuntimeError, "does not match"):
list(hash_check_dp)
# Reset Test:
fill_hash_dict() # Reset the dict with correct values because we changed it in the last test case
hash_check_dp = datapipe2.check_hash(hash_dict)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(hash_check_dp, n_elements_before_reset)
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, res_before_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, res_after_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
# __len__ Test: returns the length of source DataPipe
with self.assertRaisesRegex(TypeError, "FileOpenerIterDataPipe instance doesn't have valid length"):
len(hash_check_dp)
def test_json_parser_iterdatapipe(self):
def is_empty_json(path_and_stream):
return path_and_stream[0] == "empty.json"
def is_nonempty_json(path_and_stream):
return path_and_stream[0] != "empty.json"
json_files = {
"1.json": '["foo", {"bar":["baz", null, 1.0, 2]}]',
"empty.json": "",
"2.json": '{"__complex__": true, "real": 1, "imag": 2}',
}
self._custom_files_set_up(json_files)
datapipe1 = IterableWrapper([f"{self.temp_dir.name}/{fname}" for fname in ["empty.json", "1.json", "2.json"]])
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
datapipe_empty = datapipe3.filter(is_empty_json)
datapipe_nonempty = datapipe3.filter(is_nonempty_json)
empty_json_dp = datapipe_empty.parse_json_files()
it = iter(empty_json_dp)
# Functional Test: dp fails when empty JSON file is given
with self.assertRaisesRegex(JSONDecodeError, "Expecting value"):
next(it)
# Functional Test: dp yields one json file at a time
json_dp = datapipe_nonempty.parse_json_files()
expected_res = [
("1.json", ["foo", {"bar": ["baz", None, 1.0, 2]}]),
("2.json", {"__complex__": True, "real": 1, "imag": 2}),
]
self.assertEqual(expected_res, list(json_dp))
# Reset Test:
json_dp = JsonParser(datapipe_nonempty)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(json_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "len"):
len(json_dp)
def test_saver_iterdatapipe(self):
def filepath_fn(name: str) -> str:
return os.path.join(self.temp_dir.name, os.path.basename(name))
# Functional Test: Saving some data
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode="wb")
res_file_paths = list(saver_dp)
expected_paths = [filepath_fn(name) for name in name_to_data.keys()]
self.assertEqual(expected_paths, res_file_paths)
for name in name_to_data.keys():
p = filepath_fn(name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# Reset Test:
saver_dp = Saver(source_dp, filepath_fn=filepath_fn, mode="wb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual([filepath_fn("1.txt"), filepath_fn("2.txt")], res_before_reset)
self.assertEqual(expected_paths, res_after_reset)
for name in name_to_data.keys():
p = filepath_fn(name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# __len__ Test: returns the length of source DataPipe
self.assertEqual(3, len(saver_dp))
def _write_test_tar_files(self):
path = os.path.join(self.temp_dir.name, "test_tar.tar")
with tarfile.open(path, "w:tar") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def _write_test_tar_gz_files(self):
path = os.path.join(self.temp_dir.name, "test_gz.tar.gz")
with tarfile.open(path, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def test_tar_archive_reader_iterdatapipe(self):
self._write_test_tar_files()
datapipe1 = FileLister(self.temp_dir.name, "*.tar")
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
self._write_test_tar_gz_files()
datapipe_gz_1 = FileLister(self.temp_dir.name, "*.tar.gz")
datapipe_gz_2 = FileOpener(datapipe_gz_1, mode="b")
gz_reader_dp = TarArchiveLoader(datapipe_gz_2)
# Functional Test: Read extracted files before reaching the end of the tarfile
self._compressed_files_comparison_helper(self.temp_files, tar_loader_dp, check_length=False)
self._compressed_files_comparison_helper(self.temp_files, gz_reader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the tarfile
data_refs = list(tar_loader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs)
data_refs_gz = list(gz_reader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs_gz)
# Reset Test: reset the DataPipe after reading part of it
tar_loader_dp = datapipe2.load_from_tar()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(tar_loader_dp, n_elements_before_reset)
# Check result accumulated before reset
self._compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check result accumulated after reset
self._compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(tar_loader_dp)
def _write_test_zip_files(self):
path = os.path.join(self.temp_dir.name, "test_zip.zip")
with zipfile.ZipFile(path, "w") as myzip:
myzip.write(self.temp_files[0], arcname=os.path.basename(self.temp_files[0]))
myzip.write(self.temp_files[1], arcname=os.path.basename(self.temp_files[1]))
myzip.write(self.temp_files[2], arcname=os.path.basename(self.temp_files[2]))
def test_zip_archive_reader_iterdatapipe(self):
self._write_test_zip_files()
datapipe1 = FileLister(self.temp_dir.name, "*.zip")
datapipe2 = FileOpener(datapipe1, mode="b")
zip_loader_dp = ZipArchiveLoader(datapipe2)
# Functional Test: read extracted files before reaching the end of the zipfile
self._compressed_files_comparison_helper(self.temp_files, zip_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the zipile
data_refs = list(zip_loader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
zip_loader_dp = datapipe2.load_from_zip()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(zip_loader_dp, n_elements_before_reset)
# Check the results accumulated before reset
self._compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check the results accumulated after reset
self._compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(zip_loader_dp)
def _write_test_xz_files(self):
for path in self.temp_files:
fname = os.path.basename(path)
temp_xzfile_pathname = os.path.join(self.temp_dir.name, f"{fname}.xz")
with open(path) as f:
with lzma.open(temp_xzfile_pathname, "w") as xz:
xz.write(f.read().encode("utf-8"))
def test_xz_archive_reader_iterdatapipe(self):
# Worth noting that the .tar and .zip tests write multiple files into the same compressed file
# Whereas we create multiple .xz files in the same directories below.
self._write_test_xz_files()
datapipe1 = FileLister(self.temp_dir.name, "*.xz")
datapipe2 = FileOpener(datapipe1, mode="b")
xz_loader_dp = XzFileLoader(datapipe2)
# Functional Test: Read extracted files before reaching the end of the xzfile
self._unordered_compressed_files_comparison_helper(self.temp_files, xz_loader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the xzfile
data_refs = list(xz_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
xz_loader_dp = datapipe2.load_from_xz()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(xz_loader_dp, n_elements_before_reset)
# Check result accumulated before reset
self.assertEqual(n_elements_before_reset, len(res_before_reset))
self._unordered_compressed_files_comparison_helper(self.temp_files, res_before_reset, check_length=False)
# Check result accumulated after reset
self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset)
# Reset Test: Ensure the order is consistent between iterations
for r1, r2 in zip(xz_loader_dp, xz_loader_dp):
self.assertEqual(r1[0], r2[0])
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(xz_loader_dp)
def _decompressor_tar_test_helper(self, expected_files, tar_decompress_dp):
for _file, child_obj in tar_decompress_dp:
for expected_file, tarinfo in zip(expected_files, child_obj):
if not tarinfo.isfile():
continue
extracted_fobj = child_obj.extractfile(tarinfo)
with open(expected_file, "rb") as f:
self.assertEqual(f.read(), extracted_fobj.read())
def _decompressor_xz_test_helper(self, xz_decompress_dp):
for xz_file_name, xz_stream in xz_decompress_dp:
expected_file = xz_file_name[:-3]
with open(expected_file, "rb") as f:
self.assertEqual(f.read(), xz_stream.read())
def _write_single_gz_file(self):
import gzip
with gzip.open(f"{self.temp_dir.name}/temp.gz", "wb") as k:
with open(self.temp_files[0], "rb") as f:
k.write(f.read())
def test_decompressor_iterdatapipe(self):
self._write_test_tar_files()
self._write_test_tar_gz_files()
self._write_single_gz_file()
self._write_test_zip_files()
self._write_test_xz_files()
# Functional Test: work with .tar files
tar_file_dp = FileLister(self.temp_dir.name, "*.tar")
tar_load_dp = FileOpener(tar_file_dp, mode="b")
tar_decompress_dp = Decompressor(tar_load_dp, file_type="tar")
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional test: work with .tar.gz files
tar_gz_file_dp = FileLister(self.temp_dir.name, "*.tar.gz")
tar_gz_load_dp = FileOpener(tar_gz_file_dp, mode="b")
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type="tar")
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: work with .gz files
gz_file_dp = IterableWrapper([f"{self.temp_dir.name}/temp.gz"])
gz_load_dp = FileOpener(gz_file_dp, mode="b")
gz_decompress_dp = Decompressor(gz_load_dp, file_type="gzip")
for _, gz_stream in gz_decompress_dp:
with open(self.temp_files[0], "rb") as f:
self.assertEqual(f.read(), gz_stream.read())
# Functional Test: work with .zip files
zip_file_dp = FileLister(self.temp_dir.name, "*.zip")
zip_load_dp = FileOpener(zip_file_dp, mode="b")
zip_decompress_dp = zip_load_dp.decompress(file_type="zip")
for _, zip_stream in zip_decompress_dp:
for fname in self.temp_files:
with open(fname, "rb") as f:
self.assertEqual(f.read(), zip_stream.read(name=os.path.basename(fname)))
# Functional Test: work with .xz files
xz_file_dp = FileLister(self.temp_dir.name, "*.xz")
xz_load_dp = FileOpener(xz_file_dp, mode="b")
xz_decompress_dp = Decompressor(xz_load_dp, file_type="lzma")
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar files
tar_decompress_dp = Decompressor(tar_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: work without file type as input for .xz files
xz_decompress_dp = Decompressor(xz_load_dp)
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar.gz files
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: Compression Type is works for both upper and lower case strings
tar_decompress_dp = Decompressor(tar_load_dp, file_type="TAr")
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: Compression Type throws error for invalid file type
with self.assertRaisesRegex(ValueError, "not a valid CompressionType"):
Decompressor(tar_load_dp, file_type="ABC")
# Reset Test: Ensure the order is consistent between iterations
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(xz_decompress_dp, n_elements_before_reset)
self._decompressor_xz_test_helper(res_before_reset)
self._decompressor_xz_test_helper(res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "has no len"):
len(tar_decompress_dp)
def _write_text_files(self):
def filepath_fn(name: str) -> str:
return os.path.join(self.temp_dir.name, os.path.basename(name))
name_to_data = {"1.text": b"DATA", "2.text": b"DATA", "3.text": b"DATA"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode="wb")
list(saver_dp)
# TODO(120): this test currently only covers reading from local
# filesystem. It needs to be modified once test data can be stored on
# gdrive/s3/onedrive
@skipIfNoIoPath
def test_io_path_file_lister_iterdatapipe(self):
datapipe = IoPathFileLister(root=self.temp_sub_dir.name)
# check all file paths within sub_folder are listed
for path in datapipe:
self.assertTrue(path in self.temp_sub_files)
@skipIfNoIoPath
def test_io_path_file_loader_iterdatapipe(self):
datapipe1 = IoPathFileLister(root=self.temp_sub_dir.name)
datapipe2 = IoPathFileOpener(datapipe1)
# check contents of file match
for _, f in datapipe2:
self.assertEqual(f.read(), "0123456789abcdef")
# Reset Test: Ensure the resulting streams are still readable after the DataPipe is reset/exhausted
self._write_text_files()
lister_dp = FileLister(self.temp_dir.name, "*.text")
iopath_file_loader_dp = IoPathFileOpener(lister_dp, mode="rb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(iopath_file_loader_dp, n_elements_before_reset)
self.assertEqual(2, len(res_before_reset))
self.assertEqual(3, len(res_after_reset))
for _name, stream in res_before_reset:
self.assertEqual(b"DATA", stream.read())
for _name, stream in res_after_reset:
self.assertEqual(b"DATA", stream.read())
@skipIfNoIoPath
def test_io_path_saver_iterdatapipe(self):
def filepath_fn(name: str) -> str:
return os.path.join(self.temp_dir.name, os.path.basename(name))
# Functional Test: Saving some data
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_by_iopath(filepath_fn=filepath_fn, mode="wb")
res_file_paths = list(saver_dp)
expected_paths = [filepath_fn(name) for name in name_to_data.keys()]
self.assertEqual(expected_paths, res_file_paths)
for name in name_to_data.keys():
p = filepath_fn(name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# Reset Test:
saver_dp = IoPathSaver(source_dp, filepath_fn=filepath_fn, mode="wb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual([filepath_fn("1.txt"), filepath_fn("2.txt")], res_before_reset)
self.assertEqual(expected_paths, res_after_reset)
for name in name_to_data.keys():
p = filepath_fn(name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# __len__ Test: returns the length of source DataPipe
self.assertEqual(3, len(saver_dp))
def _write_test_rar_files(self):
# `rarfile` can only read but not write .rar archives so we use to system utilities
rar_archive_name = os.path.join(self.temp_dir.name, "test_rar")
subprocess.run(("rar", "a", rar_archive_name + ".rar", *self.temp_files), check=True)
# Nested RAR
subprocess.run(("rar", "a", rar_archive_name + "1.rar", self.temp_files[0]), check=True)
subprocess.run(("rar", "a", rar_archive_name + "2.rar", *self.temp_files[1:]), check=True)
subprocess.run(
("rar", "a", rar_archive_name + "_nested.rar", rar_archive_name + "1.rar", rar_archive_name + "2.rar"),
check=True,
)
# Nested RAR in TAR
with tarfile.open(rar_archive_name + "_nested.tar", "w:tar") as tar:
tar.add(rar_archive_name + "1.rar")
tar.add(rar_archive_name + "2.rar")
@skipIfNoRarTools
def test_rar_archive_loader(self):
self._write_test_rar_files()
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp = RarArchiveLoader(datapipe2)
# Functional Test: read extracted files before reaching the end of the rarfile
self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the rarfile
data_refs = list(rar_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
rar_loader_dp = datapipe2.load_from_rar()
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(rar_loader_dp, n_elements_before_reset)
# Check the results accumulated before reset
self._unordered_compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check the results accumulated after reset
self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(rar_loader_dp)
# Nested RAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp_1 = RarArchiveLoader(datapipe2)
rar_loader_dp_2 = RarArchiveLoader(rar_loader_dp_1)
with self.assertRaisesRegex(ValueError, "Nested RAR archive is not supported"):
list(rar_loader_dp_2)
# Nested RAR in TAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.tar")])
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
rar_loader_dp = RarArchiveLoader(tar_loader_dp)
# Functional Test: read extracted files before reaching the end of the rarfile
self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the rarfile
data_refs = list(rar_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
if __name__ == "__main__":
unittest.main()
| [
"torchdata.datapipes.iter.IoPathFileOpener",
"torchdata.datapipes.iter.Decompressor",
"torchdata.datapipes.iter.TarArchiveLoader",
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.RarArchiveLoader",
"torchdata.datapipes.iter.FileLister",
"torchdata.datapipes.iter.Saver",
"torchdata.dat... | [((802, 846), 'unittest.skipIf', 'unittest.skipIf', (['(not HAS_IOPATH)', '"""no iopath"""'], {}), "(not HAS_IOPATH, 'no iopath')\n", (817, 846), False, 'import unittest\n'), ((1177, 1227), 'unittest.skipIf', 'unittest.skipIf', (['(not HAS_RAR_TOOLS)', '"""no rar tools"""'], {}), "(not HAS_RAR_TOOLS, 'no rar tools')\n", (1192, 1227), False, 'import unittest\n'), ((31198, 31213), 'unittest.main', 'unittest.main', ([], {}), '()\n', (31211, 31213), False, 'import unittest\n'), ((915, 935), 'rarfile.tool_setup', 'rarfile.tool_setup', ([], {}), '()\n', (933, 935), False, 'import rarfile\n'), ((944, 985), 'subprocess.run', 'subprocess.run', (["('rar', '-?')"], {'check': '(True)'}), "(('rar', '-?'), check=True)\n", (958, 985), False, 'import subprocess\n'), ((1323, 1340), '_utils._common_utils_for_test.create_temp_dir', 'create_temp_dir', ([], {}), '()\n', (1338, 1340), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((1367, 1399), '_utils._common_utils_for_test.create_temp_files', 'create_temp_files', (['self.temp_dir'], {}), '(self.temp_dir)\n', (1384, 1399), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((1428, 1463), '_utils._common_utils_for_test.create_temp_dir', 'create_temp_dir', (['self.temp_dir.name'], {}), '(self.temp_dir.name)\n', (1443, 1463), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((1494, 1540), '_utils._common_utils_for_test.create_temp_files', 'create_temp_files', (['self.temp_sub_dir', '(4)', '(False)'], {}), '(self.temp_sub_dir, 4, False)\n', (1511, 1540), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((2245, 2290), 'itertools.zip_longest', 'itertools.zip_longest', (['result', 'expected_files'], {}), '(result, expected_files)\n', (2266, 2290), False, 'import itertools\n'), ((3592, 3623), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (3602, 3623), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((4604, 4642), 'torchdata.datapipes.iter.CSVParser', 'CSVParser', (['datapipe3'], {'return_path': '(True)'}), '(datapipe3, return_path=True)\n', (4613, 4642), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((4723, 4787), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['csv_parser_dp', 'n_elements_before_reset'], {}), '(csv_parser_dp, n_elements_before_reset)\n', (4747, 4787), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((5429, 5468), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.csv"""'], {}), "(self.temp_dir.name, '*.csv')\n", (5439, 5468), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((5489, 5520), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (5499, 5520), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((6585, 6609), 'torchdata.datapipes.iter.CSVDictParser', 'CSVDictParser', (['datapipe3'], {}), '(datapipe3)\n', (6598, 6609), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((6769, 6838), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['csv_dict_parser_dp', 'n_elements_before_reset'], {}), '(csv_dict_parser_dp, n_elements_before_reset)\n', (6793, 6838), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((7604, 7639), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*"""'], {}), "(self.temp_dir.name, '*')\n", (7614, 7639), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((7660, 7691), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (7670, 7691), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((7716, 7749), 'torchdata.datapipes.iter.HashChecker', 'HashChecker', (['datapipe2', 'hash_dict'], {}), '(datapipe2, hash_dict)\n', (7727, 7749), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((8228, 8275), 'torchdata.datapipes.iter.HashChecker', 'HashChecker', (['datapipe2', 'hash_dict'], {'rewind': '(False)'}), '(datapipe2, hash_dict, rewind=False)\n', (8239, 8275), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((8586, 8612), 'torchdata.datapipes.iter.HashChecker', 'HashChecker', (['datapipe2', '{}'], {}), '(datapipe2, {})\n', (8597, 8612), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((8885, 8918), 'torchdata.datapipes.iter.HashChecker', 'HashChecker', (['datapipe2', 'hash_dict'], {}), '(datapipe2, hash_dict)\n', (8896, 8918), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((9286, 9350), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['hash_check_dp', 'n_elements_before_reset'], {}), '(hash_check_dp, n_elements_before_reset)\n', (9310, 9350), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((10550, 10652), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[f'{self.temp_dir.name}/{fname}' for fname in ['empty.json', '1.json',\n '2.json']]"], {}), "([f'{self.temp_dir.name}/{fname}' for fname in ['empty.json',\n '1.json', '2.json']])\n", (10565, 10652), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((10669, 10700), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (10679, 10700), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((11498, 11527), 'torchdata.datapipes.iter.JsonParser', 'JsonParser', (['datapipe_nonempty'], {}), '(datapipe_nonempty)\n', (11508, 11527), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((11608, 11666), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['json_dp', 'n_elements_before_reset'], {}), '(json_dp, n_elements_before_reset)\n', (11632, 11666), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((12798, 12850), 'torchdata.datapipes.iter.Saver', 'Saver', (['source_dp'], {'filepath_fn': 'filepath_fn', 'mode': '"""wb"""'}), "(source_dp, filepath_fn=filepath_fn, mode='wb')\n", (12803, 12850), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((12931, 12990), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['saver_dp', 'n_elements_before_reset'], {}), '(saver_dp, n_elements_before_reset)\n', (12955, 12990), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((13475, 13523), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_tar.tar"""'], {}), "(self.temp_dir.name, 'test_tar.tar')\n", (13487, 13523), False, 'import os\n'), ((13749, 13799), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_gz.tar.gz"""'], {}), "(self.temp_dir.name, 'test_gz.tar.gz')\n", (13761, 13799), False, 'import os\n'), ((14078, 14117), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.tar"""'], {}), "(self.temp_dir.name, '*.tar')\n", (14088, 14117), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14138, 14169), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (14148, 14169), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14194, 14221), 'torchdata.datapipes.iter.TarArchiveLoader', 'TarArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (14210, 14221), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14287, 14329), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.tar.gz"""'], {}), "(self.temp_dir.name, '*.tar.gz')\n", (14297, 14329), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14354, 14389), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe_gz_1'], {'mode': '"""b"""'}), "(datapipe_gz_1, mode='b')\n", (14364, 14389), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14413, 14444), 'torchdata.datapipes.iter.TarArchiveLoader', 'TarArchiveLoader', (['datapipe_gz_2'], {}), '(datapipe_gz_2)\n', (14429, 14444), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((15257, 15321), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['tar_loader_dp', 'n_elements_before_reset'], {}), '(tar_loader_dp, n_elements_before_reset)\n', (15281, 15321), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((15831, 15879), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_zip.zip"""'], {}), "(self.temp_dir.name, 'test_zip.zip')\n", (15843, 15879), False, 'import os\n'), ((16310, 16349), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.zip"""'], {}), "(self.temp_dir.name, '*.zip')\n", (16320, 16349), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((16370, 16401), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (16380, 16401), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((16426, 16453), 'torchdata.datapipes.iter.ZipArchiveLoader', 'ZipArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (16442, 16453), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((17043, 17107), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['zip_loader_dp', 'n_elements_before_reset'], {}), '(zip_loader_dp, n_elements_before_reset)\n', (17067, 17107), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((18217, 18255), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.xz"""'], {}), "(self.temp_dir.name, '*.xz')\n", (18227, 18255), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((18276, 18307), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (18286, 18307), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((18331, 18354), 'torchdata.datapipes.iter.XzFileLoader', 'XzFileLoader', (['datapipe2'], {}), '(datapipe2)\n', (18343, 18354), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((18959, 19022), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['xz_loader_dp', 'n_elements_before_reset'], {}), '(xz_loader_dp, n_elements_before_reset)\n', (18983, 19022), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((20995, 21034), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.tar"""'], {}), "(self.temp_dir.name, '*.tar')\n", (21005, 21034), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21057, 21090), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['tar_file_dp'], {'mode': '"""b"""'}), "(tar_file_dp, mode='b')\n", (21067, 21090), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21119, 21161), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_load_dp'], {'file_type': '"""tar"""'}), "(tar_load_dp, file_type='tar')\n", (21131, 21161), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21318, 21360), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.tar.gz"""'], {}), "(self.temp_dir.name, '*.tar.gz')\n", (21328, 21360), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21386, 21422), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['tar_gz_file_dp'], {'mode': '"""b"""'}), "(tar_gz_file_dp, mode='b')\n", (21396, 21422), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21454, 21499), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_gz_load_dp'], {'file_type': '"""tar"""'}), "(tar_gz_load_dp, file_type='tar')\n", (21466, 21499), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21651, 21701), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[f'{self.temp_dir.name}/temp.gz']"], {}), "([f'{self.temp_dir.name}/temp.gz'])\n", (21666, 21701), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21723, 21755), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['gz_file_dp'], {'mode': '"""b"""'}), "(gz_file_dp, mode='b')\n", (21733, 21755), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21783, 21825), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['gz_load_dp'], {'file_type': '"""gzip"""'}), "(gz_load_dp, file_type='gzip')\n", (21795, 21825), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22058, 22097), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.zip"""'], {}), "(self.temp_dir.name, '*.zip')\n", (22068, 22097), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22120, 22153), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['zip_file_dp'], {'mode': '"""b"""'}), "(zip_file_dp, mode='b')\n", (22130, 22153), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22520, 22558), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.xz"""'], {}), "(self.temp_dir.name, '*.xz')\n", (22530, 22558), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22580, 22612), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['xz_file_dp'], {'mode': '"""b"""'}), "(xz_file_dp, mode='b')\n", (22590, 22612), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22640, 22682), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['xz_load_dp'], {'file_type': '"""lzma"""'}), "(xz_load_dp, file_type='lzma')\n", (22652, 22682), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22846, 22887), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_load_dp'], {'file_type': 'None'}), '(tar_load_dp, file_type=None)\n', (22858, 22887), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((23068, 23092), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['xz_load_dp'], {}), '(xz_load_dp)\n', (23080, 23092), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((23262, 23306), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_gz_load_dp'], {'file_type': 'None'}), '(tar_gz_load_dp, file_type=None)\n', (23274, 23306), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((23509, 23551), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_load_dp'], {'file_type': '"""TAr"""'}), "(tar_load_dp, file_type='TAr')\n", (23521, 23551), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((23999, 24066), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['xz_decompress_dp', 'n_elements_before_reset'], {}), '(xz_decompress_dp, n_elements_before_reset)\n', (24023, 24066), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((24996, 25041), 'torchdata.datapipes.iter.IoPathFileLister', 'IoPathFileLister', ([], {'root': 'self.temp_sub_dir.name'}), '(root=self.temp_sub_dir.name)\n', (25012, 25041), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25284, 25329), 'torchdata.datapipes.iter.IoPathFileLister', 'IoPathFileLister', ([], {'root': 'self.temp_sub_dir.name'}), '(root=self.temp_sub_dir.name)\n', (25300, 25329), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25350, 25377), 'torchdata.datapipes.iter.IoPathFileOpener', 'IoPathFileOpener', (['datapipe1'], {}), '(datapipe1)\n', (25366, 25377), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25670, 25710), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.text"""'], {}), "(self.temp_dir.name, '*.text')\n", (25680, 25710), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25743, 25781), 'torchdata.datapipes.iter.IoPathFileOpener', 'IoPathFileOpener', (['lister_dp'], {'mode': '"""rb"""'}), "(lister_dp, mode='rb')\n", (25759, 25781), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25863, 25935), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['iopath_file_loader_dp', 'n_elements_before_reset'], {}), '(iopath_file_loader_dp, n_elements_before_reset)\n', (25887, 25935), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((27089, 27147), 'torchdata.datapipes.iter.IoPathSaver', 'IoPathSaver', (['source_dp'], {'filepath_fn': 'filepath_fn', 'mode': '"""wb"""'}), "(source_dp, filepath_fn=filepath_fn, mode='wb')\n", (27100, 27147), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((27228, 27287), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['saver_dp', 'n_elements_before_reset'], {}), '(saver_dp, n_elements_before_reset)\n', (27252, 27287), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((27876, 27920), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_rar"""'], {}), "(self.temp_dir.name, 'test_rar')\n", (27888, 27920), False, 'import os\n'), ((27929, 28018), 'subprocess.run', 'subprocess.run', (["('rar', 'a', rar_archive_name + '.rar', *self.temp_files)"], {'check': '(True)'}), "(('rar', 'a', rar_archive_name + '.rar', *self.temp_files),\n check=True)\n", (27943, 28018), False, 'import subprocess\n'), ((28045, 28137), 'subprocess.run', 'subprocess.run', (["('rar', 'a', rar_archive_name + '1.rar', self.temp_files[0])"], {'check': '(True)'}), "(('rar', 'a', rar_archive_name + '1.rar', self.temp_files[0]),\n check=True)\n", (28059, 28137), False, 'import subprocess\n'), ((28142, 28237), 'subprocess.run', 'subprocess.run', (["('rar', 'a', rar_archive_name + '2.rar', *self.temp_files[1:])"], {'check': '(True)'}), "(('rar', 'a', rar_archive_name + '2.rar', *self.temp_files[1:\n ]), check=True)\n", (28156, 28237), False, 'import subprocess\n'), ((28241, 28376), 'subprocess.run', 'subprocess.run', (["('rar', 'a', rar_archive_name + '_nested.rar', rar_archive_name + '1.rar', \n rar_archive_name + '2.rar')"], {'check': '(True)'}), "(('rar', 'a', rar_archive_name + '_nested.rar', \n rar_archive_name + '1.rar', rar_archive_name + '2.rar'), check=True)\n", (28255, 28376), False, 'import subprocess\n'), ((28817, 28848), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (28827, 28848), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((28873, 28900), 'torchdata.datapipes.iter.RarArchiveLoader', 'RarArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (28889, 28900), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((29511, 29575), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['rar_loader_dp', 'n_elements_before_reset'], {}), '(rar_loader_dp, n_elements_before_reset)\n', (29535, 29575), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((30199, 30230), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (30209, 30230), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30257, 30284), 'torchdata.datapipes.iter.RarArchiveLoader', 'RarArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (30273, 30284), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30311, 30344), 'torchdata.datapipes.iter.RarArchiveLoader', 'RarArchiveLoader', (['rar_loader_dp_1'], {}), '(rar_loader_dp_1)\n', (30327, 30344), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30612, 30643), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (30622, 30643), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30668, 30695), 'torchdata.datapipes.iter.TarArchiveLoader', 'TarArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (30684, 30695), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30720, 30751), 'torchdata.datapipes.iter.RarArchiveLoader', 'RarArchiveLoader', (['tar_loader_dp'], {}), '(tar_loader_dp)\n', (30736, 30751), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((1898, 1937), 'os.path.join', 'os.path.join', (['self.temp_dir.name', 'fname'], {}), '(self.temp_dir.name, fname)\n', (1910, 1937), False, 'import os\n'), ((2737, 2756), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (2753, 2756), False, 'import os\n'), ((2920, 2944), 'os.path.basename', 'os.path.basename', (['res[0]'], {}), '(res[0])\n', (2936, 2944), False, 'import os\n'), ((13537, 13564), 'tarfile.open', 'tarfile.open', (['path', '"""w:tar"""'], {}), "(path, 'w:tar')\n", (13549, 13564), False, 'import tarfile\n'), ((13813, 13839), 'tarfile.open', 'tarfile.open', (['path', '"""w:gz"""'], {}), "(path, 'w:gz')\n", (13825, 13839), False, 'import tarfile\n'), ((15893, 15919), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path', '"""w"""'], {}), "(path, 'w')\n", (15908, 15919), False, 'import zipfile\n'), ((17668, 17690), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (17684, 17690), False, 'import os\n'), ((17726, 17773), 'os.path.join', 'os.path.join', (['self.temp_dir.name', 'f"""{fname}.xz"""'], {}), "(self.temp_dir.name, f'{fname}.xz')\n", (17738, 17773), False, 'import os\n'), ((20547, 20595), 'gzip.open', 'gzip.open', (['f"""{self.temp_dir.name}/temp.gz"""', '"""wb"""'], {}), "(f'{self.temp_dir.name}/temp.gz', 'wb')\n", (20556, 20595), False, 'import gzip\n'), ((23803, 23845), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_load_dp'], {'file_type': '"""ABC"""'}), "(tar_load_dp, file_type='ABC')\n", (23815, 23845), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((28449, 28504), 'tarfile.open', 'tarfile.open', (["(rar_archive_name + '_nested.tar')", '"""w:tar"""'], {}), "(rar_archive_name + '_nested.tar', 'w:tar')\n", (28461, 28504), False, 'import tarfile\n'), ((1698, 1784), 'warnings.warn', 'warnings.warn', (['f"""TestDataPipeLocalIO was not able to cleanup temp dir due to {e}"""'], {}), "(\n f'TestDataPipeLocalIO was not able to cleanup temp dir due to {e}')\n", (1711, 1784), False, 'import warnings\n'), ((2396, 2420), 'os.path.basename', 'os.path.basename', (['res[0]'], {}), '(res[0])\n', (2412, 2420), False, 'import os\n'), ((2422, 2453), 'os.path.basename', 'os.path.basename', (['expected_file'], {}), '(expected_file)\n', (2438, 2453), False, 'import os\n'), ((5217, 5253), 'os.path.basename', 'os.path.basename', (['path_and_stream[0]'], {}), '(path_and_stream[0])\n', (5233, 5253), False, 'import os\n'), ((12110, 12132), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (12126, 12132), False, 'import os\n'), ((24463, 24485), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (24479, 24485), False, 'import os\n'), ((26399, 26421), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (26415, 26421), False, 'import os\n'), ((28746, 28794), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_rar.rar"""'], {}), "(self.temp_dir.name, 'test_rar.rar')\n", (28758, 28794), False, 'import os\n'), ((30121, 30176), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_rar_nested.rar"""'], {}), "(self.temp_dir.name, 'test_rar_nested.rar')\n", (30133, 30176), False, 'import os\n'), ((30534, 30589), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_rar_nested.tar"""'], {}), "(self.temp_dir.name, 'test_rar_nested.tar')\n", (30546, 30589), False, 'import os\n'), ((7379, 7395), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (7393, 7395), False, 'import hashlib\n'), ((15982, 16018), 'os.path.basename', 'os.path.basename', (['self.temp_files[0]'], {}), '(self.temp_files[0])\n', (15998, 16018), False, 'import os\n'), ((16072, 16108), 'os.path.basename', 'os.path.basename', (['self.temp_files[1]'], {}), '(self.temp_files[1])\n', (16088, 16108), False, 'import os\n'), ((16162, 16198), 'os.path.basename', 'os.path.basename', (['self.temp_files[2]'], {}), '(self.temp_files[2])\n', (16178, 16198), False, 'import os\n'), ((17829, 17865), 'lzma.open', 'lzma.open', (['temp_xzfile_pathname', '"""w"""'], {}), "(temp_xzfile_pathname, 'w')\n", (17838, 17865), False, 'import lzma\n'), ((22425, 22448), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (22441, 22448), False, 'import os\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torcharrow as ta
import torcharrow.dtypes as dt
import torcharrow.pytorch as tap
from torch.utils.data import DataLoader
from torcharrow import functional
from torchdata.datapipes.iter import FileLister
from torchrec.datasets.criteo import (
DEFAULT_CAT_NAMES,
DEFAULT_INT_NAMES,
DEFAULT_LABEL_NAME,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class _JaggedTensorConversion(tap.TensorConversion):
# pyre-fixme[14]: `to_tensor` overrides method defined in `TensorConversion`
# inconsistently.
def to_tensor(self, df: ta.DataFrame):
kjt_keys = df.columns
kjt_values = []
kjt_lengths = []
for row in df:
for idx, _column in enumerate(df.columns):
value = row[idx]
kjt_values.extend(value)
kjt_lengths.append(len(value))
kjt = KeyedJaggedTensor.from_lengths_sync(
keys=kjt_keys,
values=torch.tensor(kjt_values),
lengths=torch.tensor(kjt_lengths),
)
return kjt
class _Scalar(tap.TensorConversion):
def to_tensor(self, df: ta.DataFrame):
labels = torch.tensor(df)
return labels
def get_dataloader(
parquet_directory, world_size, rank, num_embeddings=4096, salt=0, batch_size=16
):
source_dp = FileLister(parquet_directory, masks="*.parquet")
# TODO support batch_size for load_parquet_as_df.
# TODO use OSSArrowDataPipe once it is ready
parquet_df_dp = source_dp.load_parquet_as_df()
def preproc(df, max_idx=num_embeddings, salt=salt):
for feature_name in DEFAULT_INT_NAMES:
df[feature_name] = df[feature_name].fill_null(0)
for feature_name in DEFAULT_CAT_NAMES:
df[feature_name] = df[feature_name].fill_null(0)
df[feature_name] = df[feature_name].cast(dt.int64)
# construct a sprase index from a dense one
df["bucketize_int_0"] = functional.bucketize(df["int_0"], [0.5, 1.0, 1.5]).cast(
dt.int64
)
# flatten several columns into one
df["dense_features"] = ta.dataframe(
{int_name: df[int_name] for int_name in DEFAULT_INT_NAMES}
)
df["dense_features"] = (df["dense_features"] + 3).log()
for cat_name in DEFAULT_CAT_NAMES + ["bucketize_int_0"]:
# hash our embedding index into our embedding tables
df[cat_name] = functional.sigrid_hash(df[cat_name], salt, max_idx)
df[cat_name] = functional.array_constructor(df[cat_name])
df[cat_name] = functional.firstx(df[cat_name], 1)
df["sparse_features"] = ta.dataframe(
{
cat_name: df[cat_name]
for cat_name in DEFAULT_CAT_NAMES + ["bucketize_int_0"]
}
)
df = df[["dense_features", "sparse_features", DEFAULT_LABEL_NAME]]
return df
parquet_df_dp = parquet_df_dp.map(preproc).sharding_filter()
parquet_df_dp.apply_sharding(world_size, rank)
def criteo_collate(df):
dense_features, kjt, labels = df.to_tensor(
{
"dense_features": tap.rec.Dense(batch_first=True),
"sparse_features": _JaggedTensorConversion(),
"label": _Scalar(),
}
)
return dense_features, kjt, labels
return DataLoader(
parquet_df_dp,
batch_size=None,
collate_fn=criteo_collate,
drop_last=False,
pin_memory=True,
)
| [
"torchdata.datapipes.iter.FileLister"
] | [((1570, 1618), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['parquet_directory'], {'masks': '"""*.parquet"""'}), "(parquet_directory, masks='*.parquet')\n", (1580, 1618), False, 'from torchdata.datapipes.iter import FileLister\n'), ((3608, 3715), 'torch.utils.data.DataLoader', 'DataLoader', (['parquet_df_dp'], {'batch_size': 'None', 'collate_fn': 'criteo_collate', 'drop_last': '(False)', 'pin_memory': '(True)'}), '(parquet_df_dp, batch_size=None, collate_fn=criteo_collate,\n drop_last=False, pin_memory=True)\n', (3618, 3715), False, 'from torch.utils.data import DataLoader\n'), ((1406, 1422), 'torch.tensor', 'torch.tensor', (['df'], {}), '(df)\n', (1418, 1422), False, 'import torch\n'), ((2358, 2430), 'torcharrow.dataframe', 'ta.dataframe', (['{int_name: df[int_name] for int_name in DEFAULT_INT_NAMES}'], {}), '({int_name: df[int_name] for int_name in DEFAULT_INT_NAMES})\n', (2370, 2430), True, 'import torcharrow as ta\n'), ((2893, 2992), 'torcharrow.dataframe', 'ta.dataframe', (["{cat_name: df[cat_name] for cat_name in DEFAULT_CAT_NAMES + ['bucketize_int_0']\n }"], {}), "({cat_name: df[cat_name] for cat_name in DEFAULT_CAT_NAMES + [\n 'bucketize_int_0']})\n", (2905, 2992), True, 'import torcharrow as ta\n'), ((2676, 2727), 'torcharrow.functional.sigrid_hash', 'functional.sigrid_hash', (['df[cat_name]', 'salt', 'max_idx'], {}), '(df[cat_name], salt, max_idx)\n', (2698, 2727), False, 'from torcharrow import functional\n'), ((2755, 2797), 'torcharrow.functional.array_constructor', 'functional.array_constructor', (['df[cat_name]'], {}), '(df[cat_name])\n', (2783, 2797), False, 'from torcharrow import functional\n'), ((2825, 2859), 'torcharrow.functional.firstx', 'functional.firstx', (['df[cat_name]', '(1)'], {}), '(df[cat_name], 1)\n', (2842, 2859), False, 'from torcharrow import functional\n'), ((1205, 1229), 'torch.tensor', 'torch.tensor', (['kjt_values'], {}), '(kjt_values)\n', (1217, 1229), False, 'import torch\n'), ((1251, 1276), 'torch.tensor', 'torch.tensor', (['kjt_lengths'], {}), '(kjt_lengths)\n', (1263, 1276), False, 'import torch\n'), ((2195, 2245), 'torcharrow.functional.bucketize', 'functional.bucketize', (["df['int_0']", '[0.5, 1.0, 1.5]'], {}), "(df['int_0'], [0.5, 1.0, 1.5])\n", (2215, 2245), False, 'from torcharrow import functional\n'), ((3397, 3428), 'torcharrow.pytorch.rec.Dense', 'tap.rec.Dense', ([], {'batch_first': '(True)'}), '(batch_first=True)\n', (3410, 3428), True, 'import torcharrow.pytorch as tap\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Callable, Iterator, List, Tuple, TypeVar
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
def _default_line_join(lines: List[str]) -> str:
return "\n".join(lines)
@functional_datapipe("lines_to_paragraphs")
class ParagraphAggregatorIterDataPipe(IterDataPipe[Tuple[str, str]]):
r"""
Aggregates lines of text from the same file into a single paragraph (functional name: ``lines_to_paragraphs``).
Specifically, this accepts a DataPipe consisting of tuples of a file name and a line. For each tuple,
it checks if the file name matches the file name from the previous tuple. If yes, it joins the current line
with existing paragraph. If the file names do not match, the existing paragraph is yielded and a new
paragraph starts.
Args:
source_datapipe: a DataPipe with tuples of a file name and a line
joiner: a function that joins a list of lines together
"""
def __init__(self, source_datapipe: IterDataPipe[Tuple[str, T_co]], joiner: Callable = _default_line_join) -> None:
self.source_datapipe: IterDataPipe[Tuple[str, T_co]] = source_datapipe
self.joiner: Callable = joiner
def __iter__(self) -> Iterator[Tuple[str, str]]:
buffer = []
prev_filename = None
for filename, line in self.source_datapipe:
if prev_filename is None:
prev_filename = filename
if line and prev_filename == filename:
buffer.append(line)
else:
if buffer:
yield prev_filename, self.joiner(buffer) # type: ignore[misc]
if line:
buffer = [line]
else:
buffer = []
prev_filename = filename
if buffer:
yield prev_filename, self.joiner(buffer) # type: ignore[misc]
| [
"torchdata.datapipes.functional_datapipe"
] | [((223, 254), 'typing.TypeVar', 'TypeVar', (['"""T_co"""'], {'covariant': '(True)'}), "('T_co', covariant=True)\n", (230, 254), False, 'from typing import Callable, Iterator, List, Tuple, TypeVar\n'), ((337, 379), 'torchdata.datapipes.functional_datapipe', 'functional_datapipe', (['"""lines_to_paragraphs"""'], {}), "('lines_to_paragraphs')\n", (356, 379), False, 'from torchdata.datapipes import functional_datapipe\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
from typing import Iterator, Tuple
class IoPathFileListerIterDataPipe(IterDataPipe[str]):
r""":class:`IoPathFileListerIterDataPipe`.
Iterable DataPipe to list the contents of the directory at the provided
`root` URI.
pathnames. This yields the full URI for each file within the directory.
Args:
root: The base URI directory to list files from
"""
def __init__(self, *, root: str) -> None:
try:
from iopath.common.file_io import g_pathmgr
except ImportError:
raise ModuleNotFoundError(
"Package `iopath` is required to be installed to use this "
"datapipe. Please use `pip install iopath` or `conda install "
"iopath`"
"to install the package"
)
self.root: str = root
self.pathmgr = g_pathmgr
def __iter__(self) -> Iterator[str]:
if self.pathmgr.isfile(self.root):
yield self.root
else:
for file_name in self.pathmgr.ls(self.root):
yield os.path.join(self.root, file_name)
@functional_datapipe("load_file_by_iopath")
class IoPathFileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r""":class:`IoPathFileLoaderIterDataPipe`.
Iterable DataPipe to load files from input datapipe which contains
URIs. This yields a tuple of pathname and an opened filestream.
Args:
source_datapipe: Iterable DataPipe that provides the pathname
mode: Specifies the mode in which the file is opened. This arg will be
passed into `iopath.common.file_io.g_pathmgr.open` (internal only).
Check each subclass of `PathHandler` to determine which modes are
supported.
"""
def __init__(self, source_datapipe: IterDataPipe[str], mode: str = "r") -> None:
try:
from iopath.common.file_io import g_pathmgr
except ImportError:
raise ModuleNotFoundError(
"Package `iopath` is required to be installed to use this "
"datapipe. Please use `pip install iopath` or `conda install "
"iopath`"
"to install the package"
)
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.pathmgr = g_pathmgr
self.mode: str = mode
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for file_uri in self.source_datapipe:
with self.pathmgr.open(file_uri, self.mode) as file:
yield file_uri, StreamWrapper(file)
def __len__(self) -> int:
return len(self.source_datapipe)
| [
"torchdata.datapipes.utils.StreamWrapper",
"torchdata.datapipes.functional_datapipe"
] | [((1325, 1367), 'torchdata.datapipes.functional_datapipe', 'functional_datapipe', (['"""load_file_by_iopath"""'], {}), "('load_file_by_iopath')\n", (1344, 1367), False, 'from torchdata.datapipes import functional_datapipe\n'), ((1287, 1321), 'os.path.join', 'os.path.join', (['self.root', 'file_name'], {}), '(self.root, file_name)\n', (1299, 1321), False, 'import os\n'), ((2775, 2794), 'torchdata.datapipes.utils.StreamWrapper', 'StreamWrapper', (['file'], {}), '(file)\n', (2788, 2794), False, 'from torchdata.datapipes.utils import StreamWrapper\n')] |
import functools
import io
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
UnBatcher,
)
from torchvision.prototype.datasets.decoder import raw
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
read_mat,
hint_sharding,
hint_shuffling,
image_buffer_from_array,
)
from torchvision.prototype.features import Label, Image
class SVHN(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"svhn",
type=DatasetType.RAW,
dependencies=("scipy",),
categories=10,
homepage="http://ufldl.stanford.edu/housenumbers/",
valid_options=dict(split=("train", "test", "extra")),
)
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{config.split}_32x32.mat",
sha256=self._CHECKSUMS[config.split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, io.IOBase]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _collate_and_decode_sample(
self,
data: Tuple[np.ndarray, np.ndarray],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
image_array, label_array = data
if decoder is raw:
image = Image(image_array.transpose((2, 0, 1)))
else:
image_buffer = image_buffer_from_array(image_array)
image = decoder(image_buffer) if decoder else image_buffer # type: ignore[assignment]
return dict(
image=image,
label=Label(int(label_array) % 10),
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.UnBatcher"
] | [((1339, 1462), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['f"""http://ufldl.stanford.edu/housenumbers/{config.split}_32x32.mat"""'], {'sha256': 'self._CHECKSUMS[config.split]'}), "(f'http://ufldl.stanford.edu/housenumbers/{config.split}_32x32.mat'\n , sha256=self._CHECKSUMS[config.split])\n", (1351, 1462), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource, DatasetType\n'), ((1667, 1683), 'torchvision.prototype.datasets.utils._internal.read_mat', 'read_mat', (['buffer'], {}), '(buffer)\n', (1675, 1683), False, 'from torchvision.prototype.datasets.utils._internal import read_mat, hint_sharding, hint_shuffling, image_buffer_from_array\n'), ((2714, 2754), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._read_images_and_labels'], {}), '(dp, self._read_images_and_labels)\n', (2720, 2754), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher\n'), ((2768, 2781), 'torchdata.datapipes.iter.UnBatcher', 'UnBatcher', (['dp'], {}), '(dp)\n', (2777, 2781), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher\n'), ((2795, 2812), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (2808, 2812), False, 'from torchvision.prototype.datasets.utils._internal import read_mat, hint_sharding, hint_shuffling, image_buffer_from_array\n'), ((2826, 2844), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (2840, 2844), False, 'from torchvision.prototype.datasets.utils._internal import read_mat, hint_sharding, hint_shuffling, image_buffer_from_array\n'), ((2205, 2241), 'torchvision.prototype.datasets.utils._internal.image_buffer_from_array', 'image_buffer_from_array', (['image_array'], {}), '(image_array)\n', (2228, 2241), False, 'from torchvision.prototype.datasets.utils._internal import read_mat, hint_sharding, hint_shuffling, image_buffer_from_array\n'), ((2871, 2938), 'functools.partial', 'functools.partial', (['self._collate_and_decode_sample'], {'decoder': 'decoder'}), '(self._collate_and_decode_sample, decoder=decoder)\n', (2888, 2938), False, 'import functools\n')] |
import enum
import functools
import pathlib
import re
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, Match, cast, Union
from torchdata.datapipes.iter import (
IterDataPipe,
LineReader,
IterKeyZipper,
Mapper,
Filter,
Demultiplexer,
TarArchiveLoader,
Enumerator,
)
from torchvision.prototype.datasets.utils import (
OnlineResource,
ManualDownloadResource,
Dataset,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
getitem,
read_mat,
hint_sharding,
hint_shuffling,
read_categories_file,
path_accessor,
)
from torchvision.prototype.features import Label, EncodedImage
from .._api import register_dataset, register_info
NAME = "imagenet"
@register_info(NAME)
def _info() -> Dict[str, Any]:
categories, wnids = zip(*read_categories_file(NAME))
return dict(categories=categories, wnids=wnids)
class ImageNetResource(ManualDownloadResource):
def __init__(self, **kwargs: Any) -> None:
super().__init__("Register on https://image-net.org/ and follow the instructions there.", **kwargs)
class ImageNetDemux(enum.IntEnum):
META = 0
LABEL = 1
@register_dataset(NAME)
class ImageNet(Dataset):
"""
- **homepage**: https://www.image-net.org/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
info = _info()
categories, wnids = info["categories"], info["wnids"]
self._categories = categories
self._wnids = wnids
self._wnid_to_category = dict(zip(wnids, categories))
super().__init__(root, skip_integrity_check=skip_integrity_check)
_IMAGES_CHECKSUMS = {
"train": "b08200a27a8e34218a0e58fde36b0fe8f73bc377f4acea2d91602057c3ca45bb",
"val": "c7e06a6c0baccf06d8dbeb6577d71efff84673a5dbdd50633ab44f8ea0456ae0",
"test_v10102019": "9cf7f8249639510f17d3d8a0deb47cd22a435886ba8e29e2b3223e65a4079eb4",
}
def _resources(self) -> List[OnlineResource]:
name = "test_v10102019" if self._split == "test" else self._split
images = ImageNetResource(
file_name=f"ILSVRC2012_img_{name}.tar",
sha256=self._IMAGES_CHECKSUMS[name],
)
resources: List[OnlineResource] = [images]
if self._split == "val":
devkit = ImageNetResource(
file_name="ILSVRC2012_devkit_t12.tar.gz",
sha256="b59243268c0d266621fd587d2018f69e906fb22875aca0e295b48cafaa927953",
)
resources.append(devkit)
return resources
_TRAIN_IMAGE_NAME_PATTERN = re.compile(r"(?P<wnid>n\d{8})_\d+[.]JPEG")
def _prepare_train_data(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[Label, str], Tuple[str, BinaryIO]]:
path = pathlib.Path(data[0])
wnid = cast(Match[str], self._TRAIN_IMAGE_NAME_PATTERN.match(path.name))["wnid"]
label = Label.from_category(self._wnid_to_category[wnid], categories=self._categories)
return (label, wnid), data
def _prepare_test_data(self, data: Tuple[str, BinaryIO]) -> Tuple[None, Tuple[str, BinaryIO]]:
return None, data
def _classifiy_devkit(self, data: Tuple[str, BinaryIO]) -> Optional[int]:
return {
"meta.mat": ImageNetDemux.META,
"ILSVRC2012_validation_ground_truth.txt": ImageNetDemux.LABEL,
}.get(pathlib.Path(data[0]).name)
# Although the WordNet IDs (wnids) are unique, the corresponding categories are not. For example, both n02012849
# and n03126707 are labeled 'crane' while the first means the bird and the latter means the construction equipment
_WNID_MAP = {
"n03126707": "construction crane",
"n03710721": "tank suit",
}
def _extract_categories_and_wnids(self, data: Tuple[str, BinaryIO]) -> List[Tuple[str, str]]:
synsets = read_mat(data[1], squeeze_me=True)["synsets"]
return [
(self._WNID_MAP.get(wnid, category.split(",", 1)[0]), wnid)
for _, wnid, category, _, num_children, *_ in synsets
# if num_children > 0, we are looking at a superclass that has no direct instance
if num_children == 0
]
def _imagenet_label_to_wnid(self, imagenet_label: str, *, wnids: Tuple[str, ...]) -> str:
return wnids[int(imagenet_label) - 1]
_VAL_TEST_IMAGE_NAME_PATTERN = re.compile(r"ILSVRC2012_(val|test)_(?P<id>\d{8})[.]JPEG")
def _val_test_image_key(self, path: pathlib.Path) -> int:
return int(self._VAL_TEST_IMAGE_NAME_PATTERN.match(path.name)["id"]) # type: ignore[index]
def _prepare_val_data(
self, data: Tuple[Tuple[int, str], Tuple[str, BinaryIO]]
) -> Tuple[Tuple[Label, str], Tuple[str, BinaryIO]]:
label_data, image_data = data
_, wnid = label_data
label = Label.from_category(self._wnid_to_category[wnid], categories=self._categories)
return (label, wnid), image_data
def _prepare_sample(
self,
data: Tuple[Optional[Tuple[Label, str]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
label_data, (path, buffer) = data
return dict(
dict(zip(("label", "wnid"), label_data if label_data else (None, None))),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split in {"train", "test"}:
dp = resource_dps[0]
# the train archive is a tar of tars
if self._split == "train":
dp = TarArchiveLoader(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
dp = Mapper(dp, self._prepare_train_data if self._split == "train" else self._prepare_test_data)
else: # config.split == "val":
images_dp, devkit_dp = resource_dps
meta_dp, label_dp = Demultiplexer(
devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
_, wnids = zip(*next(iter(meta_dp)))
label_dp = LineReader(label_dp, decode=True, return_path=False)
# We cannot use self._wnids here, since we use a different order than the dataset
label_dp = Mapper(label_dp, functools.partial(self._imagenet_label_to_wnid, wnids=wnids))
label_dp: IterDataPipe[Tuple[int, str]] = Enumerator(label_dp, 1)
label_dp = hint_shuffling(label_dp)
label_dp = hint_sharding(label_dp)
dp = IterKeyZipper(
label_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor(self._val_test_image_key),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = Mapper(dp, self._prepare_val_data)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 1_281_167,
"val": 50_000,
"test": 100_000,
}[self._split]
def _filter_meta(self, data: Tuple[str, Any]) -> bool:
return self._classifiy_devkit(data) == ImageNetDemux.META
def _generate_categories(self) -> List[Tuple[str, ...]]:
self._split = "val"
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, self._filter_meta)
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
categories_and_wnids = cast(List[Tuple[str, ...]], next(iter(meta_dp)))
categories_and_wnids.sort(key=lambda category_and_wnid: category_and_wnid[1])
return categories_and_wnids
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.LineReader",
"torchdata.datapipes.iter.Demultiplexer",
"torchdata.datapipes.iter.TarArchiveLoader",
"torchdata.datapipes.iter.Enumerator",
"torchdata.datapipes.iter.Filter"
] | [((2802, 2845), 're.compile', 're.compile', (['"""(?P<wnid>n\\\\d{8})_\\\\d+[.]JPEG"""'], {}), "('(?P<wnid>n\\\\d{8})_\\\\d+[.]JPEG')\n", (2812, 2845), False, 'import re\n'), ((4568, 4625), 're.compile', 're.compile', (['"""ILSVRC2012_(val|test)_(?P<id>\\\\d{8})[.]JPEG"""'], {}), "('ILSVRC2012_(val|test)_(?P<id>\\\\d{8})[.]JPEG')\n", (4578, 4625), False, 'import re\n'), ((2974, 2995), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (2986, 2995), False, 'import pathlib\n'), ((3101, 3179), 'torchvision.prototype.features.Label.from_category', 'Label.from_category', (['self._wnid_to_category[wnid]'], {'categories': 'self._categories'}), '(self._wnid_to_category[wnid], categories=self._categories)\n', (3120, 3179), False, 'from torchvision.prototype.features import Label, EncodedImage\n'), ((5022, 5100), 'torchvision.prototype.features.Label.from_category', 'Label.from_category', (['self._wnid_to_category[wnid]'], {'categories': 'self._categories'}), '(self._wnid_to_category[wnid], categories=self._categories)\n', (5041, 5100), False, 'from torchvision.prototype.features import Label, EncodedImage\n'), ((7138, 7170), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (7144, 7170), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((7653, 7689), 'torchdata.datapipes.iter.Filter', 'Filter', (['devkit_dp', 'self._filter_meta'], {}), '(devkit_dp, self._filter_meta)\n', (7659, 7689), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((7708, 7759), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['meta_dp', 'self._extract_categories_and_wnids'], {}), '(meta_dp, self._extract_categories_and_wnids)\n', (7714, 7759), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((843, 869), 'torchvision.prototype.datasets.utils._internal.read_categories_file', 'read_categories_file', (['NAME'], {}), '(NAME)\n', (863, 869), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, read_mat, hint_sharding, hint_shuffling, read_categories_file, path_accessor\n'), ((4053, 4087), 'torchvision.prototype.datasets.utils._internal.read_mat', 'read_mat', (['data[1]'], {'squeeze_me': '(True)'}), '(data[1], squeeze_me=True)\n', (4061, 4087), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, read_mat, hint_sharding, hint_shuffling, read_categories_file, path_accessor\n'), ((5831, 5849), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (5845, 5849), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, read_mat, hint_sharding, hint_shuffling, read_categories_file, path_accessor\n'), ((5867, 5884), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (5880, 5884), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, read_mat, hint_sharding, hint_shuffling, read_categories_file, path_accessor\n'), ((5902, 5998), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', "(self._prepare_train_data if self._split == 'train' else self.\n _prepare_test_data)"], {}), "(dp, self._prepare_train_data if self._split == 'train' else self.\n _prepare_test_data)\n", (5908, 5998), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6115, 6220), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['devkit_dp', '(2)', 'self._classifiy_devkit'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(devkit_dp, 2, self._classifiy_devkit, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (6128, 6220), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6270, 6321), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['meta_dp', 'self._extract_categories_and_wnids'], {}), '(meta_dp, self._extract_categories_and_wnids)\n', (6276, 6321), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6395, 6447), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['label_dp'], {'decode': '(True)', 'return_path': '(False)'}), '(label_dp, decode=True, return_path=False)\n', (6405, 6447), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6698, 6721), 'torchdata.datapipes.iter.Enumerator', 'Enumerator', (['label_dp', '(1)'], {}), '(label_dp, 1)\n', (6708, 6721), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6745, 6769), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['label_dp'], {}), '(label_dp)\n', (6759, 6769), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, read_mat, hint_sharding, hint_shuffling, read_categories_file, path_accessor\n'), ((6793, 6816), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['label_dp'], {}), '(label_dp)\n', (6806, 6816), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, read_mat, hint_sharding, hint_shuffling, read_categories_file, path_accessor\n'), ((7087, 7121), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_val_data'], {}), '(dp, self._prepare_val_data)\n', (7093, 7121), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((3570, 3591), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (3582, 3591), False, 'import pathlib\n'), ((5470, 5500), 'torchvision.prototype.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (5492, 5500), False, 'from torchvision.prototype.features import Label, EncodedImage\n'), ((5792, 5812), 'torchdata.datapipes.iter.TarArchiveLoader', 'TarArchiveLoader', (['dp'], {}), '(dp)\n', (5808, 5812), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6582, 6642), 'functools.partial', 'functools.partial', (['self._imagenet_label_to_wnid'], {'wnids': 'wnids'}), '(self._imagenet_label_to_wnid, wnids=wnids)\n', (6599, 6642), False, 'import functools\n'), ((6926, 6936), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(0)'], {}), '(0)\n', (6933, 6936), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, read_mat, hint_sharding, hint_shuffling, read_categories_file, path_accessor\n'), ((6965, 7004), 'torchvision.prototype.datasets.utils._internal.path_accessor', 'path_accessor', (['self._val_test_image_key'], {}), '(self._val_test_image_key)\n', (6978, 7004), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, read_mat, hint_sharding, hint_shuffling, read_categories_file, path_accessor\n')] |
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import EncodedImage, Label
from .._api import register_dataset, register_info
NAME = "eurosat"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=(
"AnnualCrop",
"Forest",
"HerbaceousVegetation",
"Highway",
"Industrial," "Pasture",
"PermanentCrop",
"Residential",
"River",
"SeaLake",
)
)
@register_dataset(NAME)
class EuroSAT(Dataset):
"""EuroSAT Dataset.
homepage="https://github.com/phelber/eurosat",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
sha256="8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 27_000
| [
"torchdata.datapipes.iter.Mapper"
] | [((1887, 1905), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (1901, 1905), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((1919, 1936), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (1932, 1936), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((1952, 1984), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (1958, 1984), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper\n'), ((1224, 1367), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://madm.dfki.de/files/sentinel/EuroSAT.zip"""'], {'sha256': '"""8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd"""'}), "('https://madm.dfki.de/files/sentinel/EuroSAT.zip', sha256=\n '8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd')\n", (1236, 1367), False, 'from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource\n'), ((1540, 1558), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1552, 1558), False, 'import pathlib\n'), ((1610, 1668), 'torchvision.prototype.features.Label.from_category', 'Label.from_category', (['category'], {'categories': 'self._categories'}), '(category, categories=self._categories)\n', (1629, 1668), False, 'from torchvision.prototype.features import EncodedImage, Label\n'), ((1711, 1741), 'torchvision.prototype.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (1733, 1741), False, 'from torchvision.prototype.features import EncodedImage, Label\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
from functools import partial
from typing import List, Optional, TypeVar
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
try: # TODO: Create dependency on TorchArrow?
import pyarrow.parquet as parquet
import torcharrow
except ImportError:
torcharrow = None
parquet = None
T_co = TypeVar("T_co")
@functional_datapipe("dataframe")
class DataFrameMakerIterDataPipe(IterDataPipe): # IterDataPipe[torcharrow.IDataFrame[T_co]]
r"""
Takes rows of data, batches a number of them together and creates `TorchArrow`
DataFrames (functional name: ``dataframe``).
Note:
There is a trade-off between having a large number of rows within a DataFrame and usage of memory. Please
choose a value carefully.
Args:
source_dp: IterDataPipe containing rows of data
dataframe_size: number of rows of data within each DataFrame
dtype: specify the `TorchArrow` dtype for the DataFrame
columns: List of str that specifies the column names of the DataFrame
device: specify the device on which the DataFrame will be stored
"""
def __new__(
cls,
source_dp: IterDataPipe[T_co],
dataframe_size: int = 1000, # or Page Size
dtype=None, # Optional[torcharrow.dtypes.DType]
columns: Optional[List[str]] = None,
device: str = "",
):
if torcharrow is None:
raise ImportError(
"The library 'torcharrow' is necessary for this DataPipe but it is not available."
"Please visit https://github.com/facebookresearch/torcharrow/ to install it."
)
# In this version, DF tracing is not available, which would allow DataPipe to run DataFrame operations
batch_dp = source_dp.batch(dataframe_size)
df_dp = batch_dp.map(partial(torcharrow.DataFrame, dtype=dtype, columns=columns, device=device))
return df_dp
@functional_datapipe("load_parquet_as_df")
class ParquetDFLoaderIterDataPipe(IterDataPipe): # IterDataPipe[torcharrow.IDataFrame[T_co]]
r"""
Takes in paths to Parquet files and return a `TorchArrow` DataFrame for each row group
within a Parquet file (functional name: ``load_parquet_as_df``).
Args:
source_dp: source DataPipe containing paths to the Parquet files
columns: List of `str` that specifies the column names of the DataFrame
use_threads: if ``True``, Parquet reader will perform multi-threaded column reads
dtype: specify the `TorchArrow` dtype for the DataFrame
device: specify the device on which the DataFrame will be stored
"""
def __init__(
self,
source_dp: IterDataPipe[str],
dtype=None, # Optional[torcharrow.dtypes.DType]
columns: Optional[List[str]] = None,
device: str = "",
use_threads: bool = False,
):
if torcharrow is None:
raise ImportError(
"The library 'torcharrow' is necessary for this DataPipe but it is not available."
"Please visit https://github.com/facebookresearch/torcharrow/ to install it."
)
if parquet is None:
raise ImportError("The library 'parquet' is necessary for this DataPipe but it is not available.")
self.source_dp = source_dp
self.columns = columns
self.use_threads = use_threads
self.dtype = dtype
self.device = device
def __iter__(self):
for path in self.source_dp:
parquet_file = parquet.ParquetFile(path)
num_row_groups = parquet_file.num_row_groups
for i in range(num_row_groups):
# TODO: More fine-grain control over the number of rows or row group per DataFrame
row_group = parquet_file.read_row_group(i, columns=self.columns, use_threads=self.use_threads)
yield torcharrow.from_arrow(row_group, dtype=self.dtype)
| [
"torchdata.datapipes.functional_datapipe"
] | [((404, 419), 'typing.TypeVar', 'TypeVar', (['"""T_co"""'], {}), "('T_co')\n", (411, 419), False, 'from typing import List, Optional, TypeVar\n'), ((423, 455), 'torchdata.datapipes.functional_datapipe', 'functional_datapipe', (['"""dataframe"""'], {}), "('dataframe')\n", (442, 455), False, 'from torchdata.datapipes import functional_datapipe\n'), ((2025, 2066), 'torchdata.datapipes.functional_datapipe', 'functional_datapipe', (['"""load_parquet_as_df"""'], {}), "('load_parquet_as_df')\n", (2044, 2066), False, 'from torchdata.datapipes import functional_datapipe\n'), ((1925, 1999), 'functools.partial', 'partial', (['torcharrow.DataFrame'], {'dtype': 'dtype', 'columns': 'columns', 'device': 'device'}), '(torcharrow.DataFrame, dtype=dtype, columns=columns, device=device)\n', (1932, 1999), False, 'from functools import partial\n'), ((3627, 3652), 'pyarrow.parquet.ParquetFile', 'parquet.ParquetFile', (['path'], {}), '(path)\n', (3646, 3652), True, 'import pyarrow.parquet as parquet\n'), ((3986, 4036), 'torcharrow.from_arrow', 'torcharrow.from_arrow', (['row_group'], {'dtype': 'self.dtype'}), '(row_group, dtype=self.dtype)\n', (4007, 4036), False, 'import torcharrow\n')] |
import abc
import functools
import io
import pathlib
import pickle
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Iterator, cast
import numpy as np
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Filter,
Mapper,
)
from torchvision.prototype.datasets.decoder import raw
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
hint_shuffling,
image_buffer_from_array,
path_comparator,
hint_sharding,
)
from torchvision.prototype.features import Label, Image
__all__ = ["Cifar10", "Cifar100"]
class CifarFileReader(IterDataPipe[Tuple[np.ndarray, int]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]], *, labels_key: str) -> None:
self.datapipe = datapipe
self.labels_key = labels_key
def __iter__(self) -> Iterator[Tuple[np.ndarray, int]]:
for mapping in self.datapipe:
image_arrays = mapping["data"].reshape((-1, 3, 32, 32))
category_idcs = mapping[self.labels_key]
yield from iter(zip(image_arrays, category_idcs))
class _CifarBase(Dataset):
_FILE_NAME: str
_SHA256: str
_LABELS_KEY: str
_META_FILE_NAME: str
_CATEGORIES_KEY: str
@abc.abstractmethod
def _is_data_file(self, data: Tuple[str, io.IOBase], *, split: str) -> Optional[int]:
pass
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
type(self).__name__.lower(),
type=DatasetType.RAW,
homepage="https://www.cs.toronto.edu/~kriz/cifar.html",
valid_options=dict(split=("train", "test")),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
return [
HttpResource(
f"https://www.cs.toronto.edu/~kriz/{self._FILE_NAME}",
sha256=self._SHA256,
)
]
def _unpickle(self, data: Tuple[str, io.BytesIO]) -> Dict[str, Any]:
_, file = data
return cast(Dict[str, Any], pickle.load(file, encoding="latin1"))
def _collate_and_decode(
self,
data: Tuple[np.ndarray, int],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
image_array, category_idx = data
image: Union[Image, io.BytesIO]
if decoder is raw:
image = Image(image_array)
else:
image_buffer = image_buffer_from_array(image_array.transpose((1, 2, 0)))
image = decoder(image_buffer) if decoder else image_buffer # type: ignore[assignment]
label = Label(category_idx, category=self.categories[category_idx])
return dict(image=image, label=label)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, functools.partial(self._is_data_file, split=config.split))
dp = Mapper(dp, self._unpickle)
dp = CifarFileReader(dp, labels_key=self._LABELS_KEY)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
return Mapper(dp, functools.partial(self._collate_and_decode, decoder=decoder))
def _generate_categories(self, root: pathlib.Path) -> List[str]:
resources = self.resources(self.default_config)
dp = resources[0].load(root)
dp = Filter(dp, path_comparator("name", self._META_FILE_NAME))
dp = Mapper(dp, self._unpickle)
return cast(List[str], next(iter(dp))[self._CATEGORIES_KEY])
class Cifar10(_CifarBase):
_FILE_NAME = "cifar-10-python.tar.gz"
_SHA256 = "6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
_LABELS_KEY = "labels"
_META_FILE_NAME = "batches.meta"
_CATEGORIES_KEY = "label_names"
def _is_data_file(self, data: Tuple[str, Any], *, split: str) -> bool:
path = pathlib.Path(data[0])
return path.name.startswith("data" if split == "train" else "test")
class Cifar100(_CifarBase):
_FILE_NAME = "cifar-100-python.tar.gz"
_SHA256 = "85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
_LABELS_KEY = "fine_labels"
_META_FILE_NAME = "meta"
_CATEGORIES_KEY = "fine_label_names"
def _is_data_file(self, data: Tuple[str, Any], *, split: str) -> bool:
path = pathlib.Path(data[0])
return path.name == split
| [
"torchdata.datapipes.iter.Mapper"
] | [((2739, 2798), 'torchvision.prototype.features.Label', 'Label', (['category_idx'], {'category': 'self.categories[category_idx]'}), '(category_idx, category=self.categories[category_idx])\n', (2744, 2798), False, 'from torchvision.prototype.features import Label, Image\n'), ((3197, 3223), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._unpickle'], {}), '(dp, self._unpickle)\n', (3203, 3223), False, 'from torchdata.datapipes.iter import IterDataPipe, Filter, Mapper\n'), ((3299, 3316), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (3312, 3316), False, 'from torchvision.prototype.datasets.utils._internal import hint_shuffling, image_buffer_from_array, path_comparator, hint_sharding\n'), ((3330, 3348), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (3344, 3348), False, 'from torchvision.prototype.datasets.utils._internal import hint_shuffling, image_buffer_from_array, path_comparator, hint_sharding\n'), ((3685, 3711), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._unpickle'], {}), '(dp, self._unpickle)\n', (3691, 3711), False, 'from torchdata.datapipes.iter import IterDataPipe, Filter, Mapper\n'), ((4125, 4146), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (4137, 4146), False, 'import pathlib\n'), ((4570, 4591), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (4582, 4591), False, 'import pathlib\n'), ((1877, 1970), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['f"""https://www.cs.toronto.edu/~kriz/{self._FILE_NAME}"""'], {'sha256': 'self._SHA256'}), "(f'https://www.cs.toronto.edu/~kriz/{self._FILE_NAME}', sha256=\n self._SHA256)\n", (1889, 1970), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource, DatasetType\n'), ((2156, 2192), 'pickle.load', 'pickle.load', (['file'], {'encoding': '"""latin1"""'}), "(file, encoding='latin1')\n", (2167, 2192), False, 'import pickle\n'), ((2505, 2523), 'torchvision.prototype.features.Image', 'Image', (['image_array'], {}), '(image_array)\n', (2510, 2523), False, 'from torchvision.prototype.features import Label, Image\n'), ((3125, 3182), 'functools.partial', 'functools.partial', (['self._is_data_file'], {'split': 'config.split'}), '(self._is_data_file, split=config.split)\n', (3142, 3182), False, 'import functools\n'), ((3375, 3435), 'functools.partial', 'functools.partial', (['self._collate_and_decode'], {'decoder': 'decoder'}), '(self._collate_and_decode, decoder=decoder)\n', (3392, 3435), False, 'import functools\n'), ((3625, 3670), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', 'self._META_FILE_NAME'], {}), "('name', self._META_FILE_NAME)\n", (3640, 3670), False, 'from torchvision.prototype.datasets.utils._internal import hint_shuffling, image_buffer_from_array, path_comparator, hint_sharding\n')] |
import os
import tarfile
import enum
import functools
import pathlib
from tqdm import tqdm
import h5py
import torch
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, cast, Union
from xml.etree import ElementTree
from torch.utils.data import DataLoader2
from Dataset4EO import transforms
import pdb
import numpy as np
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Filter,
Demultiplexer,
IterKeyZipper,
LineReader,
)
from torchdata.datapipes.map import SequenceWrapper
from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset
from Dataset4EO.datasets.utils._internal import (
path_accessor,
getitem,
INFINITE_BUFFER_SIZE,
path_comparator,
hint_sharding,
hint_shuffling,
read_categories_file,
)
from Dataset4EO.features import BoundingBox, Label, EncodedImage
from .._api import register_dataset, register_info
NAME = "landslide4sense"
_TRAIN_LEN = 3799
_VAL_LEN = 245
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Landslide4Sense(Dataset):
"""
- **homepage**: https://www.iarai.ac.at/landslide4sense/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
data_info: bool = True,
skip_integrity_check: bool = False,
) -> None:
# There is currently no test split available
assert split != 'test'
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self.root = root
self.decom_dir = os.path.join(self.root, 'landslide4sense')
self._categories = _info()["categories"]
self.CLASSES = ('background', 'landslide')
self.PALETTE = [[128, 0, 0], [0, 128, 0]]
self.data_info = data_info
super().__init__(root, skip_integrity_check=skip_integrity_check)
_TRAIN_VAL_ARCHIVES = {
"trainval": ("landslide4sense.tar", "c7f6678d50c7003eba47b3cace8053c9bfa6b4692cd1630fe2d6b7bec11ccc77"),
}
def decompress_integrity_check(self, decom_dir):
train_img_dir = os.path.join(decom_dir, 'train', 'img')
train_mask_dir = os.path.join(decom_dir, 'train', 'mask')
val_img_dir = os.path.join(decom_dir, 'val', 'img')
if not os.path.exists(train_img_dir) or not os.path.exists(train_mask_dir) or not os.path.exists(val_img_dir):
return False
num_train_img = len(os.listdir(train_img_dir))
num_train_mask = len(os.listdir(train_mask_dir))
num_val_img = len(os.listdir(val_img_dir))
return (num_train_img == _TRAIN_LEN) and \
(num_train_mask == _TRAIN_LEN) and \
(num_val_img == _VAL_LEN)
def _decompress_dir(self):
file_name, sha256 = self._TRAIN_VAL_ARCHIVES['trainval']
if not self.decompress_integrity_check(self.decom_dir):
print('Decompressing the tar file...')
with tarfile.open(os.path.join(self.root, file_name), 'r:gz') as tar:
tar.extractall(self.decom_dir)
tar.close()
def _resources(self) -> List[OnlineResource]:
file_name, sha256 = self._TRAIN_VAL_ARCHIVES['trainval']
archive = HttpResource("https://syncandshare.lrz.de/getlink/fiLurHQ9Cy4NwvmPGYQe7RWM/{}".format(file_name), sha256=sha256)
return [archive]
def _prepare_sample_dp(self, idx):
iname = "{}/img/image_{}.h5".format(self._split, idx)
image_path = os.path.join(self.decom_dir, iname)
label_path = None
if self._split == 'train':
mname = "{}/mask/mask_{}.h5".format(self._split, idx)
label_path = os.path.join(self.decom_dir, mname)
img_info = dict({'filename':image_path, 'ann':dict({'seg_map':label_path})})
return img_info
def _prepare_sample(self, idx):
iname = "{}/img/image_{}.h5".format(self._split, idx)
image_path = os.path.join(self.decom_dir, iname)
img = h5py.File(os.path.join(self.decom_dir, iname), 'r')['img'][()]
img = torch.tensor(img).permute(2, 0, 1)
label_path = None
img_info = dict({'filename':image_path, 'ann':dict({'seg_map':label_path})})
if self._split == 'train':
mname = "{}/mask/mask_{}.h5".format(self._split, idx)
label_path = os.path.join(self.decom_dir, mname)
mask = h5py.File(os.path.join(self.decom_dir, mname), 'r')['mask'][()]
mask = torch.tensor(mask)
img_info = dict({'filename':image_path, 'ann':dict({'seg_map':label_path})})
return (img_info, img, mask)
return (img_info, img)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
self._decompress_dir()
dp = SequenceWrapper(range(1, self.__len__()+1))
if not self.data_info:
ndp = Mapper(dp, self._prepare_sample)
ndp = hint_shuffling(ndp)
ndp = hint_sharding(ndp)
tfs = transforms.Compose(transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomResizedCrop((128, 128), scale=[0.5, 1]))
ndp = ndp.map(tfs)
else:
ndp = Mapper(dp, self._prepare_sample_dp)
ndp = hint_shuffling(ndp)
ndp = hint_sharding(ndp)
return ndp
def __len__(self) -> int:
return {
'train': _TRAIN_LEN,
'val': _VAL_LEN
}[self._split]
if __name__ == '__main__':
dp = Landslide4Sense('./')
| [
"torchdata.datapipes.iter.Mapper"
] | [((1640, 1682), 'os.path.join', 'os.path.join', (['self.root', '"""landslide4sense"""'], {}), "(self.root, 'landslide4sense')\n", (1652, 1682), False, 'import os\n'), ((2169, 2208), 'os.path.join', 'os.path.join', (['decom_dir', '"""train"""', '"""img"""'], {}), "(decom_dir, 'train', 'img')\n", (2181, 2208), False, 'import os\n'), ((2234, 2274), 'os.path.join', 'os.path.join', (['decom_dir', '"""train"""', '"""mask"""'], {}), "(decom_dir, 'train', 'mask')\n", (2246, 2274), False, 'import os\n'), ((2297, 2334), 'os.path.join', 'os.path.join', (['decom_dir', '"""val"""', '"""img"""'], {}), "(decom_dir, 'val', 'img')\n", (2309, 2334), False, 'import os\n'), ((3559, 3594), 'os.path.join', 'os.path.join', (['self.decom_dir', 'iname'], {}), '(self.decom_dir, iname)\n', (3571, 3594), False, 'import os\n'), ((4023, 4058), 'os.path.join', 'os.path.join', (['self.decom_dir', 'iname'], {}), '(self.decom_dir, iname)\n', (4035, 4058), False, 'import os\n'), ((1051, 1077), 'Dataset4EO.datasets.utils._internal.read_categories_file', 'read_categories_file', (['NAME'], {}), '(NAME)\n', (1071, 1077), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((2509, 2534), 'os.listdir', 'os.listdir', (['train_img_dir'], {}), '(train_img_dir)\n', (2519, 2534), False, 'import os\n'), ((2565, 2591), 'os.listdir', 'os.listdir', (['train_mask_dir'], {}), '(train_mask_dir)\n', (2575, 2591), False, 'import os\n'), ((2619, 2642), 'os.listdir', 'os.listdir', (['val_img_dir'], {}), '(val_img_dir)\n', (2629, 2642), False, 'import os\n'), ((3748, 3783), 'os.path.join', 'os.path.join', (['self.decom_dir', 'mname'], {}), '(self.decom_dir, mname)\n', (3760, 3783), False, 'import os\n'), ((4423, 4458), 'os.path.join', 'os.path.join', (['self.decom_dir', 'mname'], {}), '(self.decom_dir, mname)\n', (4435, 4458), False, 'import os\n'), ((4561, 4579), 'torch.tensor', 'torch.tensor', (['mask'], {}), '(mask)\n', (4573, 4579), False, 'import torch\n'), ((4971, 5003), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (4977, 5003), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((5022, 5041), 'Dataset4EO.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['ndp'], {}), '(ndp)\n', (5036, 5041), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5060, 5078), 'Dataset4EO.datasets.utils._internal.hint_sharding', 'hint_sharding', (['ndp'], {}), '(ndp)\n', (5073, 5078), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5371, 5406), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample_dp'], {}), '(dp, self._prepare_sample_dp)\n', (5377, 5406), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((5425, 5444), 'Dataset4EO.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['ndp'], {}), '(ndp)\n', (5439, 5444), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5463, 5481), 'Dataset4EO.datasets.utils._internal.hint_sharding', 'hint_sharding', (['ndp'], {}), '(ndp)\n', (5476, 5481), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((2351, 2380), 'os.path.exists', 'os.path.exists', (['train_img_dir'], {}), '(train_img_dir)\n', (2365, 2380), False, 'import os\n'), ((2388, 2418), 'os.path.exists', 'os.path.exists', (['train_mask_dir'], {}), '(train_mask_dir)\n', (2402, 2418), False, 'import os\n'), ((2426, 2453), 'os.path.exists', 'os.path.exists', (['val_img_dir'], {}), '(val_img_dir)\n', (2440, 2453), False, 'import os\n'), ((4150, 4167), 'torch.tensor', 'torch.tensor', (['img'], {}), '(img)\n', (4162, 4167), False, 'import torch\n'), ((5116, 5149), 'Dataset4EO.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (5147, 5149), False, 'from Dataset4EO import transforms\n'), ((5184, 5215), 'Dataset4EO.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (5213, 5215), False, 'from Dataset4EO import transforms\n'), ((5250, 5306), 'Dataset4EO.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(128, 128)'], {'scale': '[0.5, 1]'}), '((128, 128), scale=[0.5, 1])\n', (5278, 5306), False, 'from Dataset4EO import transforms\n'), ((3033, 3067), 'os.path.join', 'os.path.join', (['self.root', 'file_name'], {}), '(self.root, file_name)\n', (3045, 3067), False, 'import os\n'), ((4083, 4118), 'os.path.join', 'os.path.join', (['self.decom_dir', 'iname'], {}), '(self.decom_dir, iname)\n', (4095, 4118), False, 'import os\n'), ((4488, 4523), 'os.path.join', 'os.path.join', (['self.decom_dir', 'mname'], {}), '(self.decom_dir, mname)\n', (4500, 4523), False, 'import os\n')] |
import csv
import io
from typing import Any, Callable, Dict, List, Optional, Tuple, Iterator, Sequence
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Shuffler,
Filter,
ZipArchiveReader,
Zipper,
IterKeyZipper,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
GDriveResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, path_accessor
csv.register_dialect("celeba", delimiter=" ", skipinitialspace=True)
class CelebACSVParser(IterDataPipe[Tuple[str, Dict[str, str]]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[Any, io.IOBase]],
*,
fieldnames: Optional[Sequence[str]] = None,
) -> None:
self.datapipe = datapipe
self.fieldnames = fieldnames
def __iter__(self) -> Iterator[Tuple[str, Dict[str, str]]]:
for _, file in self.datapipe:
file = (line.decode() for line in file)
if self.fieldnames:
fieldnames = self.fieldnames
else:
# The first row is skipped, because it only contains the number of samples
next(file)
# Empty field names are filtered out, because some files have an extra white space after the header
# line, which is recognized as extra column
fieldnames = [name for name in next(csv.reader([next(file)], dialect="celeba")) if name]
# Some files do not include a label for the image ID column
if fieldnames[0] != "image_id":
fieldnames.insert(0, "image_id")
for line in csv.DictReader(file, fieldnames=fieldnames, dialect="celeba"):
yield line.pop("image_id"), line
class CelebA(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"celeba",
type=DatasetType.IMAGE,
homepage="https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html",
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
splits = GDriveResource(
"0B7EVK8r0v71pY0NSMzRuSXJEVkk",
sha256="fc955bcb3ef8fbdf7d5640d9a8693a8431b5f2ee291a5c1449a1549e7e073fe7",
file_name="list_eval_partition.txt",
)
images = GDriveResource(
"0B7EVK8r0v71pZjFTYXZWM3FlRnM",
sha256="46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74",
file_name="img_align_celeba.zip",
)
identities = GDriveResource(
"1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS",
sha256="c6143857c3e2630ac2da9f782e9c1232e5e59be993a9d44e8a7916c78a6158c0",
file_name="identity_CelebA.txt",
)
attributes = GDriveResource(
"0B7EVK8r0v71pblRyaVFSWGxPY0U",
sha256="f0e5da289d5ccf75ffe8811132694922b60f2af59256ed362afa03fefba324d0",
file_name="list_attr_celeba.txt",
)
bboxes = GDriveResource(
"0B7EVK8r0v71pbThiMVRxWXZ4dU0",
sha256="7487a82e57c4bb956c5445ae2df4a91ffa717e903c5fa22874ede0820c8ec41b",
file_name="list_bbox_celeba.txt",
)
landmarks = GDriveResource(
"0B7EVK8r0v71pd0FJY3Blby1HUTQ",
sha256="6c02a87569907f6db2ba99019085697596730e8129f67a3d61659f198c48d43b",
file_name="list_landmarks_align_celeba.txt",
)
return [splits, images, identities, attributes, bboxes, landmarks]
_SPLIT_ID_TO_NAME = {
"0": "train",
"1": "valid",
"2": "test",
}
def _filter_split(self, data: Tuple[str, Dict[str, str]], *, split: str) -> bool:
return self._SPLIT_ID_TO_NAME[data[1]["split_id"]] == split
def _collate_anns(self, data: Tuple[Tuple[str, Dict[str, str]], ...]) -> Tuple[str, Dict[str, Dict[str, str]]]:
(image_id, identity), (_, attributes), (_, bbox), (_, landmarks) = data
return image_id, dict(identity=identity, attributes=attributes, bbox=bbox, landmarks=landmarks)
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[str, Tuple[str, List[str]], Tuple[str, io.IOBase]], Tuple[str, Dict[str, Any]]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, _, image_data = split_and_image_data
path, buffer = image_data
_, ann = ann_data
image = decoder(buffer) if decoder else buffer
identity = int(ann["identity"]["identity"])
attributes = {attr: value == "1" for attr, value in ann["attributes"].items()}
bbox = torch.tensor([int(ann["bbox"][key]) for key in ("x_1", "y_1", "width", "height")])
landmarks = {
landmark: torch.tensor((int(ann["landmarks"][f"{landmark}_x"]), int(ann["landmarks"][f"{landmark}_y"])))
for landmark in {key[:-2] for key in ann["landmarks"].keys()}
}
return dict(
path=path,
image=image,
identity=identity,
attributes=attributes,
bbox=bbox,
landmarks=landmarks,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
splits_dp, images_dp, identities_dp, attributes_dp, bboxes_dp, landmarks_dp = resource_dps
splits_dp = CelebACSVParser(splits_dp, fieldnames=("image_id", "split_id"))
splits_dp = Filter(splits_dp, self._filter_split, fn_kwargs=dict(split=config.split))
splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)
images_dp = ZipArchiveReader(images_dp)
anns_dp = Zipper(
*[
CelebACSVParser(dp, fieldnames=fieldnames)
for dp, fieldnames in (
(identities_dp, ("image_id", "identity")),
(attributes_dp, None),
(bboxes_dp, None),
(landmarks_dp, None),
)
]
)
anns_dp = Mapper(anns_dp, self._collate_anns)
dp = IterKeyZipper(
splits_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor("name"),
buffer_size=INFINITE_BUFFER_SIZE,
keep_key=True,
)
dp = IterKeyZipper(dp, anns_dp, key_fn=getitem(0), buffer_size=INFINITE_BUFFER_SIZE)
return Mapper(dp, self._collate_and_decode_sample, fn_kwargs=dict(decoder=decoder))
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Shuffler",
"torchdata.datapipes.iter.ZipArchiveReader"
] | [((532, 600), 'csv.register_dialect', 'csv.register_dialect', (['"""celeba"""'], {'delimiter': '""" """', 'skipinitialspace': '(True)'}), "('celeba', delimiter=' ', skipinitialspace=True)\n", (552, 600), False, 'import csv\n'), ((1949, 2061), 'torchvision.prototype.datasets.utils.DatasetInfo', 'DatasetInfo', (['"""celeba"""'], {'type': 'DatasetType.IMAGE', 'homepage': '"""https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html"""'}), "('celeba', type=DatasetType.IMAGE, homepage=\n 'https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html')\n", (1960, 2061), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, GDriveResource, OnlineResource, DatasetType\n'), ((2194, 2361), 'torchvision.prototype.datasets.utils.GDriveResource', 'GDriveResource', (['"""0B7EVK8r0v71pY0NSMzRuSXJEVkk"""'], {'sha256': '"""fc955bcb3ef8fbdf7d5640d9a8693a8431b5f2ee291a5c1449a1549e7e073fe7"""', 'file_name': '"""list_eval_partition.txt"""'}), "('0B7EVK8r0v71pY0NSMzRuSXJEVkk', sha256=\n 'fc955bcb3ef8fbdf7d5640d9a8693a8431b5f2ee291a5c1449a1549e7e073fe7',\n file_name='list_eval_partition.txt')\n", (2208, 2361), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, GDriveResource, OnlineResource, DatasetType\n'), ((2417, 2581), 'torchvision.prototype.datasets.utils.GDriveResource', 'GDriveResource', (['"""0B7EVK8r0v71pZjFTYXZWM3FlRnM"""'], {'sha256': '"""46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74"""', 'file_name': '"""img_align_celeba.zip"""'}), "('0B7EVK8r0v71pZjFTYXZWM3FlRnM', sha256=\n '46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74',\n file_name='img_align_celeba.zip')\n", (2431, 2581), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, GDriveResource, OnlineResource, DatasetType\n'), ((2641, 2809), 'torchvision.prototype.datasets.utils.GDriveResource', 'GDriveResource', (['"""1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS"""'], {'sha256': '"""c6143857c3e2630ac2da9f782e9c1232e5e59be993a9d44e8a7916c78a6158c0"""', 'file_name': '"""identity_CelebA.txt"""'}), "('1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS', sha256=\n 'c6143857c3e2630ac2da9f782e9c1232e5e59be993a9d44e8a7916c78a6158c0',\n file_name='identity_CelebA.txt')\n", (2655, 2809), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, GDriveResource, OnlineResource, DatasetType\n'), ((2869, 3033), 'torchvision.prototype.datasets.utils.GDriveResource', 'GDriveResource', (['"""0B7EVK8r0v71pblRyaVFSWGxPY0U"""'], {'sha256': '"""f0e5da289d5ccf75ffe8811132694922b60f2af59256ed362afa03fefba324d0"""', 'file_name': '"""list_attr_celeba.txt"""'}), "('0B7EVK8r0v71pblRyaVFSWGxPY0U', sha256=\n 'f0e5da289d5ccf75ffe8811132694922b60f2af59256ed362afa03fefba324d0',\n file_name='list_attr_celeba.txt')\n", (2883, 3033), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, GDriveResource, OnlineResource, DatasetType\n'), ((3089, 3253), 'torchvision.prototype.datasets.utils.GDriveResource', 'GDriveResource', (['"""0B7EVK8r0v71pbThiMVRxWXZ4dU0"""'], {'sha256': '"""7487a82e57c4bb956c5445ae2df4a91ffa717e903c5fa22874ede0820c8ec41b"""', 'file_name': '"""list_bbox_celeba.txt"""'}), "('0B7EVK8r0v71pbThiMVRxWXZ4dU0', sha256=\n '7487a82e57c4bb956c5445ae2df4a91ffa717e903c5fa22874ede0820c8ec41b',\n file_name='list_bbox_celeba.txt')\n", (3103, 3253), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, GDriveResource, OnlineResource, DatasetType\n'), ((3312, 3487), 'torchvision.prototype.datasets.utils.GDriveResource', 'GDriveResource', (['"""0B7EVK8r0v71pd0FJY3Blby1HUTQ"""'], {'sha256': '"""6c02a87569907f6db2ba99019085697596730e8129f67a3d61659f198c48d43b"""', 'file_name': '"""list_landmarks_align_celeba.txt"""'}), "('0B7EVK8r0v71pd0FJY3Blby1HUTQ', sha256=\n '6c02a87569907f6db2ba99019085697596730e8129f67a3d61659f198c48d43b',\n file_name='list_landmarks_align_celeba.txt')\n", (3326, 3487), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, GDriveResource, OnlineResource, DatasetType\n'), ((5810, 5863), 'torchdata.datapipes.iter.Shuffler', 'Shuffler', (['splits_dp'], {'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)\n', (5818, 5863), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, ZipArchiveReader, Zipper, IterKeyZipper\n'), ((5885, 5912), 'torchdata.datapipes.iter.ZipArchiveReader', 'ZipArchiveReader', (['images_dp'], {}), '(images_dp)\n', (5901, 5912), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, ZipArchiveReader, Zipper, IterKeyZipper\n'), ((6301, 6336), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['anns_dp', 'self._collate_anns'], {}), '(anns_dp, self._collate_anns)\n', (6307, 6336), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, ZipArchiveReader, Zipper, IterKeyZipper\n'), ((1756, 1817), 'csv.DictReader', 'csv.DictReader', (['file'], {'fieldnames': 'fieldnames', 'dialect': '"""celeba"""'}), "(file, fieldnames=fieldnames, dialect='celeba')\n", (1770, 1817), False, 'import csv\n'), ((6431, 6441), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(0)'], {}), '(0)\n', (6438, 6441), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, path_accessor\n'), ((6466, 6487), 'torchvision.prototype.datasets.utils._internal.path_accessor', 'path_accessor', (['"""name"""'], {}), "('name')\n", (6479, 6487), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, path_accessor\n'), ((6619, 6629), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(0)'], {}), '(0)\n', (6626, 6629), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, path_accessor\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import csv
import os
from functools import partial
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_create_dataset_directory,
_wrap_split_argument,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, IterableWrapper
# we import HttpReader from _download_hooks so we can swap out public URLs
# with interal URLs when the dataset is used within Facebook
from torchtext._download_hooks import HttpReader
URL = "https://cims.nyu.edu/~sbowman/multinli/multinli_1.0.zip"
MD5 = "0f70aaf66293b3c088a864891db51353"
NUM_LINES = {
"train": 392702,
"dev_matched": 9815,
"dev_mismatched": 9832,
}
_PATH = "multinli_1.0.zip"
DATASET_NAME = "MNLI"
_EXTRACTED_FILES = {
"train": "multinli_1.0_train.txt",
"dev_matched": "multinli_1.0_dev_matched.txt",
"dev_mismatched": "multinli_1.0_dev_mismatched.txt",
}
LABEL_TO_INT = {"entailment": 0, "neutral": 1, "contradiction": 2}
def _filepath_fn(root, x=None):
return os.path.join(root, os.path.basename(x))
def _extracted_filepath_fn(root, split, _=None):
return os.path.join(root, _EXTRACTED_FILES[split])
def _filter_fn(split, x):
return _EXTRACTED_FILES[split] in x[0]
def _filter_res(x):
return x[0] in LABEL_TO_INT
def _modify_res(x):
return (LABEL_TO_INT[x[0]], x[5], x[6])
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev_matched", "dev_mismatched"))
def MNLI(root, split):
"""MNLI Dataset
For additional details refer to https://cims.nyu.edu/~sbowman/multinli/
Number of lines per split:
- train: 392702
- dev_matched: 9815
- dev_mismatched: 9832
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev_matched`, `dev_mismatched`)
:returns: DataPipe that yields tuple of text and label (0 to 2).
:rtype: Tuple[int, str, str]
"""
# TODO Remove this after removing conditional dependency
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root),
hash_dict={_filepath_fn(root, URL): MD5},
hash_type="md5",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=partial(_extracted_filepath_fn, root, split))
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").load_from_zip().filter(partial(_filter_fn, split))
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8")
parsed_data = (
data_dp.parse_csv(skip_lines=1, delimiter="\t", quoting=csv.QUOTE_NONE).filter(_filter_res).map(_modify_res)
)
return parsed_data.shuffle().set_shuffle(False).sharding_filter()
| [
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.IterableWrapper"
] | [((275, 307), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (294, 307), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1450, 1502), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (1475, 1502), False, 'from torchtext.data.datasets_utils import _create_dataset_directory, _wrap_split_argument\n'), ((1504, 1568), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev_matched', 'dev_mismatched')"], {}), "(('train', 'dev_matched', 'dev_mismatched'))\n", (1524, 1568), False, 'from torchtext.data.datasets_utils import _create_dataset_directory, _wrap_split_argument\n'), ((1212, 1255), 'os.path.join', 'os.path.join', (['root', '_EXTRACTED_FILES[split]'], {}), '(root, _EXTRACTED_FILES[split])\n', (1224, 1255), False, 'import os\n'), ((2451, 2473), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (2466, 2473), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((3127, 3178), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'encoding': '"""utf-8"""'}), "(cache_decompressed_dp, encoding='utf-8')\n", (3137, 3178), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((1129, 1148), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1145, 1148), False, 'import os\n'), ((2238, 2270), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (2257, 2270), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((2982, 3008), 'functools.partial', 'partial', (['_filter_fn', 'split'], {}), '(_filter_fn, split)\n', (2989, 3008), False, 'from functools import partial\n'), ((2542, 2569), 'functools.partial', 'partial', (['_filepath_fn', 'root'], {}), '(_filepath_fn, root)\n', (2549, 2569), False, 'from functools import partial\n'), ((2678, 2709), 'torchtext._download_hooks.HttpReader', 'HttpReader', (['cache_compressed_dp'], {}), '(cache_compressed_dp)\n', (2688, 2709), False, 'from torchtext._download_hooks import HttpReader\n'), ((2831, 2875), 'functools.partial', 'partial', (['_extracted_filepath_fn', 'root', 'split'], {}), '(_extracted_filepath_fn, root, split)\n', (2838, 2875), False, 'from functools import partial\n'), ((2915, 2958), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (2925, 2958), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n')] |
import functools
import io
import pathlib
import re
from typing import Any, Callable, Dict, List, Optional, Tuple, cast
import numpy as np
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Demultiplexer,
Filter,
IterKeyZipper,
LineReader,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
read_mat,
getitem,
path_accessor,
path_comparator,
hint_sharding,
hint_shuffling,
)
class SBD(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"sbd",
type=DatasetType.IMAGE,
dependencies=("scipy",),
homepage="http://home.bharathh.info/pubs/codes/SBD/download.html",
valid_options=dict(
split=("train", "val", "train_noval"),
boundaries=(True, False),
segmentation=(False, True),
),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
archive = HttpResource(
"https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz",
sha256="6a5a2918d5c73ce032fdeba876574d150d9d04113ab87540a1304cbcc715be53",
)
extra_split = HttpResource(
"http://home.bharathh.info/pubs/codes/SBD/train_noval.txt",
sha256="0b2068f7a359d2907431803e1cd63bf6162da37d7d503b589d3b08c6fd0c2432",
)
return [archive, extra_split]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
parent, grandparent, *_ = path.parents
if parent.name == "dataset":
return 0
elif grandparent.name == "dataset":
if parent.name == "img":
return 1
elif parent.name == "cls":
return 2
else:
return None
else:
return None
def _decode_ann(
self, data: Dict[str, Any], *, decode_boundaries: bool, decode_segmentation: bool
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
raw_anns = data["GTcls"][0]
raw_boundaries = raw_anns["Boundaries"][0]
raw_segmentation = raw_anns["Segmentation"][0]
# the boundaries are stored in sparse CSC format, which is not supported by PyTorch
boundaries = (
torch.as_tensor(np.stack([raw_boundary[0].toarray() for raw_boundary in raw_boundaries]))
if decode_boundaries
else None
)
segmentation = torch.as_tensor(raw_segmentation) if decode_segmentation else None
return boundaries, segmentation
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[Any, Tuple[str, io.IOBase]], Tuple[str, io.IOBase]],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, image_data = split_and_image_data
image_path, image_buffer = image_data
ann_path, ann_buffer = ann_data
image = decoder(image_buffer) if decoder else image_buffer
if config.boundaries or config.segmentation:
boundaries, segmentation = self._decode_ann(
read_mat(ann_buffer), decode_boundaries=config.boundaries, decode_segmentation=config.segmentation
)
else:
boundaries = segmentation = None
return dict(
image_path=image_path,
image=image,
ann_path=ann_path,
boundaries=boundaries,
segmentation=segmentation,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
archive_dp, extra_split_dp = resource_dps
archive_dp = resource_dps[0]
split_dp, images_dp, anns_dp = Demultiplexer(
archive_dp,
3,
self._classify_archive,
buffer_size=INFINITE_BUFFER_SIZE,
drop_none=True,
)
if config.split == "train_noval":
split_dp = extra_split_dp
split_dp = LineReader(split_dp, decode=True)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = split_dp
for level, data_dp in enumerate((images_dp, anns_dp)):
dp = IterKeyZipper(
dp,
data_dp,
key_fn=getitem(*[0] * level, 1),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, config=config, decoder=decoder))
def _generate_categories(self, root: pathlib.Path) -> Tuple[str, ...]:
dp = self.resources(self.default_config)[0].load(pathlib.Path(root) / self.name)
dp = Filter(dp, path_comparator("name", "category_names.m"))
dp = LineReader(dp)
dp = Mapper(dp, bytes.decode, input_col=1)
lines = tuple(zip(*iter(dp)))[1]
pattern = re.compile(r"\s*'(?P<category>\w+)';\s*%(?P<label>\d+)")
categories_and_labels = cast(
List[Tuple[str, ...]],
[
pattern.match(line).groups() # type: ignore[union-attr]
# the first and last line contain no information
for line in lines[1:-1]
],
)
categories_and_labels.sort(key=lambda category_and_label: int(category_and_label[1]))
categories, _ = zip(*categories_and_labels)
return categories
| [
"torchdata.datapipes.iter.Demultiplexer",
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.LineReader"
] | [((1193, 1398), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz"""'], {'sha256': '"""6a5a2918d5c73ce032fdeba876574d150d9d04113ab87540a1304cbcc715be53"""'}), "(\n 'https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz'\n , sha256='6a5a2918d5c73ce032fdeba876574d150d9d04113ab87540a1304cbcc715be53'\n )\n", (1205, 1398), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource, DatasetType\n'), ((1441, 1592), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""http://home.bharathh.info/pubs/codes/SBD/train_noval.txt"""'], {'sha256': '"""0b2068f7a359d2907431803e1cd63bf6162da37d7d503b589d3b08c6fd0c2432"""'}), "('http://home.bharathh.info/pubs/codes/SBD/train_noval.txt',\n sha256='0b2068f7a359d2907431803e1cd63bf6162da37d7d503b589d3b08c6fd0c2432')\n", (1453, 1592), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource, DatasetType\n'), ((1751, 1772), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1763, 1772), False, 'import pathlib\n'), ((4220, 4327), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['archive_dp', '(3)', 'self._classify_archive'], {'buffer_size': 'INFINITE_BUFFER_SIZE', 'drop_none': '(True)'}), '(archive_dp, 3, self._classify_archive, buffer_size=\n INFINITE_BUFFER_SIZE, drop_none=True)\n', (4233, 4327), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Demultiplexer, Filter, IterKeyZipper, LineReader\n'), ((4494, 4527), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['split_dp'], {'decode': '(True)'}), '(split_dp, decode=True)\n', (4504, 4527), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Demultiplexer, Filter, IterKeyZipper, LineReader\n'), ((4547, 4570), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['split_dp'], {}), '(split_dp)\n', (4560, 4570), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, read_mat, getitem, path_accessor, path_comparator, hint_sharding, hint_shuffling\n'), ((4590, 4614), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['split_dp'], {}), '(split_dp)\n', (4604, 4614), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, read_mat, getitem, path_accessor, path_comparator, hint_sharding, hint_shuffling\n'), ((5298, 5312), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['dp'], {}), '(dp)\n', (5308, 5312), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Demultiplexer, Filter, IterKeyZipper, LineReader\n'), ((5326, 5363), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'bytes.decode'], {'input_col': '(1)'}), '(dp, bytes.decode, input_col=1)\n', (5332, 5363), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Demultiplexer, Filter, IterKeyZipper, LineReader\n'), ((5424, 5483), 're.compile', 're.compile', (['"""\\\\s*\'(?P<category>\\\\w+)\';\\\\s*%(?P<label>\\\\d+)"""'], {}), '("\\\\s*\'(?P<category>\\\\w+)\';\\\\s*%(?P<label>\\\\d+)")\n', (5434, 5483), False, 'import re\n'), ((2757, 2790), 'torch.as_tensor', 'torch.as_tensor', (['raw_segmentation'], {}), '(raw_segmentation)\n', (2772, 2790), False, 'import torch\n'), ((4967, 5054), 'functools.partial', 'functools.partial', (['self._collate_and_decode_sample'], {'config': 'config', 'decoder': 'decoder'}), '(self._collate_and_decode_sample, config=config, decoder=\n decoder)\n', (4984, 5054), False, 'import functools\n'), ((5240, 5283), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', '"""category_names.m"""'], {}), "('name', 'category_names.m')\n", (5255, 5283), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, read_mat, getitem, path_accessor, path_comparator, hint_sharding, hint_shuffling\n'), ((3498, 3518), 'torchvision.prototype.datasets.utils._internal.read_mat', 'read_mat', (['ann_buffer'], {}), '(ann_buffer)\n', (3506, 3518), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, read_mat, getitem, path_accessor, path_comparator, hint_sharding, hint_shuffling\n'), ((5184, 5202), 'pathlib.Path', 'pathlib.Path', (['root'], {}), '(root)\n', (5196, 5202), False, 'import pathlib\n'), ((4801, 4827), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['*([0] * level)', '(1)'], {}), '(*([0] * level), 1)\n', (4808, 4827), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, read_mat, getitem, path_accessor, path_comparator, hint_sharding, hint_shuffling\n'), ((4854, 4875), 'torchvision.prototype.datasets.utils._internal.path_accessor', 'path_accessor', (['"""stem"""'], {}), "('stem')\n", (4867, 4875), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, read_mat, getitem, path_accessor, path_comparator, hint_sharding, hint_shuffling\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import unittest
import warnings
from functools import partial
from io import StringIO
from operator import itemgetter
from typing import List
import expecttest
import torchdata.datapipes.iter as iterdp
from _utils._common_utils_for_test import create_temp_dir, create_temp_files
from torch.utils.data.datapipes.utils.common import DILL_AVAILABLE
from torchdata.datapipes.iter import IterableWrapper
from torchdata.datapipes.map import SequenceWrapper
if DILL_AVAILABLE:
import dill
dill.extend(use_dill=False)
try:
import fsspec
except ImportError:
fsspec = None
try:
import iopath
except ImportError:
iopath = None
try:
import subprocess
import rarfile
try:
rarfile.tool_setup()
subprocess.run(("rar", "-?"), check=True)
except (rarfile.RarCannotExec, subprocess.CalledProcessError):
rarfile = None
except (ModuleNotFoundError, FileNotFoundError):
rarfile = None
try:
import torcharrow
import torcharrow.dtypes as dt
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
except ImportError:
torcharrow = None
dt = None
DTYPE = None
def _fake_batch_fn(batch):
return [d + 1 for d in batch]
def _fake_fn_ls(x):
return [x, x]
def _filepath_fn(name: str, dir) -> str:
return os.path.join(dir, os.path.basename(name))
def _filter_by_module_availability(datapipes):
filter_set = set()
if fsspec is None:
filter_set.update([iterdp.FSSpecFileLister, iterdp.FSSpecFileOpener, iterdp.FSSpecSaver])
if iopath is None:
filter_set.update([iterdp.IoPathFileLister, iterdp.IoPathFileOpener, iterdp.IoPathSaver])
if rarfile is None:
filter_set.update([iterdp.RarArchiveLoader])
if torcharrow is None or not DILL_AVAILABLE:
filter_set.update([iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader])
return [dp for dp in datapipes if dp[0] not in filter_set]
class TestIterDataPipeSerialization(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
self.temp_files = create_temp_files(self.temp_dir)
self.temp_sub_dir = create_temp_dir(self.temp_dir.name)
self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False)
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestIterDataPipeSerialization was not able to cleanup temp dir due to {e}")
def _serialization_test_helper(self, datapipe):
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
try:
self.assertEqual(list(datapipe), list(deserialized_dp))
except AssertionError as e:
print(f"{datapipe} is failing.")
raise e
def _serialization_dataframe_test_helper(self, datapipe):
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
for df1, df2 in zip(datapipe, deserialized_dp):
for exp, act in zip(df1, df2):
self.assertEqual(exp, act)
def _serialization_test_for_single_dp(self, dp, is_dataframe=False):
test_helper_fn = self._serialization_dataframe_test_helper if is_dataframe else self._serialization_test_helper
# 1. Testing for serialization before any iteration starts
test_helper_fn(dp)
# 2. Testing for serialization afterDataPipe is partially read
it = iter(dp)
_ = next(it)
test_helper_fn(dp)
# 3. Testing for serialization after DataPipe is fully read
_ = list(it)
test_helper_fn(dp)
def _serialization_test_for_dp_with_children(self, dp1, dp2):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
# 2. Testing for serialization after DataPipe is partially read
it1, it2 = iter(dp1), iter(dp2)
_, _ = next(it1), next(it2)
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
# 2.5. Testing for serialization after one child DataPipe is fully read
# (Only for DataPipes with children DataPipes)
_ = list(it1) # fully read one child
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
# 3. Testing for serialization after DataPipe is fully read
_ = list(it2) # fully read the other child
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
def test_serializable(self):
picklable_datapipes: List = [
(iterdp.BatchMapper, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), (_fake_batch_fn, 2, 1), {}),
(iterdp.BucketBatcher, IterableWrapper([0, 0, 0, 0, 0, 0, 0]), (5,), {}),
(iterdp.Bz2FileLoader, None, (), {}),
(
iterdp.CSVDictParser,
IterableWrapper(
[("f1", StringIO("Label,1,1\nLabel,2,2\nLabel,3,3")), ("f2", StringIO("L,1,1\r\nL,2,2\r\nL,3,3"))]
),
(),
{},
),
(
iterdp.CSVParser,
IterableWrapper(
[("f1", StringIO("Label,1,1\nLabel,2,2\nLabel,3,3")), ("f2", StringIO("L,1,1\r\nL,2,2\r\nL,3,3"))]
),
(),
{},
),
(iterdp.Cycler, None, (2,), {}),
(iterdp.DataFrameMaker, IterableWrapper([(i,) for i in range(3)]), (), {"dtype": DTYPE}),
(iterdp.Decompressor, None, (), {}),
(iterdp.Enumerator, None, (2,), {}),
(iterdp.FlatMapper, None, (_fake_fn_ls,), {}),
(iterdp.FSSpecFileLister, ".", (), {}),
(iterdp.FSSpecFileOpener, None, (), {}),
(
iterdp.FSSpecSaver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(iterdp.GDriveReader, None, (), {}),
(iterdp.HashChecker, None, ({},), {}),
(iterdp.Header, None, (3,), {}),
(iterdp.HttpReader, None, (), {}),
# TODO (ejguan): Deterministic serialization is required
# (iterdp.InBatchShuffler, IterableWrapper(range(10)).batch(3), (), {}),
(iterdp.InMemoryCacheHolder, None, (), {}),
(iterdp.IndexAdder, IterableWrapper([{"a": 1, "b": 2}, {"c": 3, "a": 1}]), ("label",), {}),
(iterdp.IoPathFileLister, ".", (), {}),
(iterdp.IoPathFileOpener, None, (), {}),
(
iterdp.IoPathSaver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(
iterdp.IterKeyZipper,
IterableWrapper([("a", 100), ("b", 200), ("c", 300)]),
(IterableWrapper([("a", 1), ("b", 2), ("c", 3)]), itemgetter(0), itemgetter(0)),
{},
),
(
iterdp.JsonParser,
IterableWrapper(
[
("1.json", StringIO('["fo", {"ba":["baz", null, 1.0, 2]}]')),
("2.json", StringIO('{"__cx__": true, "r": 1, "i": 2}')),
]
),
(),
{},
),
(
iterdp.LineReader,
IterableWrapper(
[("file1", StringIO("Line1\nLine2")), ("file2", StringIO("Line2,1\r\nLine2,2\r\nLine2,3"))]
),
(),
{},
),
(
iterdp.MaxTokenBucketizer,
IterableWrapper(["1", "22", "1", "4444", "333", "1", "22", "22", "333"]),
(4,),
{},
),
(
iterdp.MapKeyZipper,
IterableWrapper([("a", 1), ("b", 2), ("c", 3)]),
(SequenceWrapper({"a": 100, "b": 200, "c": 300}), itemgetter(0)),
{},
),
(iterdp.OnDiskCacheHolder, None, (), {}),
(iterdp.OnlineReader, None, (), {}),
(
iterdp.ParagraphAggregator,
IterableWrapper([("f1", "L1"), ("f1", "L2"), ("f2", "21"), ("f2", "22")]),
(),
{},
),
(iterdp.ParquetDataFrameLoader, None, (), {"dtype": DTYPE}),
(iterdp.RarArchiveLoader, None, (), {}),
(
iterdp.Rows2Columnar,
IterableWrapper([[{"a": 1}, {"b": 2, "a": 1}], [{"a": 1, "b": 200}, {"c": 3}]]),
(),
{},
),
(iterdp.SampleMultiplexer, {IterableWrapper([0] * 10): 0.5, IterableWrapper([1] * 10): 0.5}, (), {}),
(
iterdp.Saver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(iterdp.TarArchiveLoader, None, (), {}),
(iterdp.TFRecordLoader, None, (), {}),
(iterdp.UnZipper, IterableWrapper([(i, i + 10) for i in range(10)]), (), {"sequence_length": 2}),
(iterdp.XzFileLoader, None, (), {}),
(iterdp.ZipArchiveLoader, None, (), {}),
]
picklable_datapipes = _filter_by_module_availability(picklable_datapipes)
# Skipping value comparison for these DataPipes
# Most of them return streams not comparable by `self.assertEqual`
# Others are similar to caching where the outputs depend on other DataPipes
dp_skip_comparison = {
iterdp.Bz2FileLoader,
iterdp.Decompressor,
iterdp.FileOpener,
iterdp.FSSpecFileOpener,
iterdp.GDriveReader,
iterdp.IoPathFileOpener,
iterdp.HashChecker,
iterdp.HttpReader,
iterdp.OnDiskCacheHolder,
iterdp.OnlineReader,
iterdp.ParquetDataFrameLoader,
iterdp.SampleMultiplexer,
iterdp.RarArchiveLoader,
iterdp.TarArchiveLoader,
iterdp.TFRecordLoader,
iterdp.XzFileLoader,
iterdp.ZipArchiveLoader,
}
# These DataPipes produce multiple DataPipes as outputs and those should be compared
dp_compare_children = {iterdp.UnZipper}
for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes:
try:
# Creating input (usually a DataPipe) for the specific dpipe being tested
if custom_input is None:
custom_input = IterableWrapper(range(10))
if dpipe in dp_skip_comparison: # Mke sure they are picklable and loadable (no value comparison)
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = pickle.dumps(datapipe)
_ = pickle.loads(serialized_dp)
elif dpipe in dp_compare_children: # DataPipes that have children
dp1, dp2 = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_dp_with_children(dp1, dp2)
else: # Single DataPipe that requires comparison
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
is_dataframe = issubclass(dpipe, (iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader))
self._serialization_test_for_single_dp(datapipe, is_dataframe=is_dataframe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
input_dp = IterableWrapper(range(10))
ref_idp = IterableWrapper(range(10))
ref_mdp = SequenceWrapper(range(10))
unpicklable_datapipes: List = [
(iterdp.BatchMapper, (lambda batch: [d + 1 for d in batch], 2), {}),
(iterdp.FlatMapper, (lambda x: [x, x],), {}),
(iterdp.IterKeyZipper, (ref_idp, lambda x: x, None, True, 100), {}),
(iterdp.MapKeyZipper, (ref_mdp, lambda x: x), {}),
(iterdp.OnDiskCacheHolder, (lambda x: x,), {}),
(iterdp.ParagraphAggregator, (lambda x: x,), {}),
]
# Skipping value comparison for these DataPipes
dp_skip_comparison = {iterdp.OnDiskCacheHolder, iterdp.ParagraphAggregator}
for dpipe, dp_args, dp_kwargs in unpicklable_datapipes:
if DILL_AVAILABLE:
try:
if dpipe in dp_skip_comparison: # Make sure they are picklable/loadable (no value comparison)
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = dill.dumps(datapipe)
_ = dill.loads(serialized_dp)
else:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
else:
dp_no_attribute_error = (iterdp.OnDiskCacheHolder,)
try:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
if isinstance(datapipe, dp_no_attribute_error):
_ = pickle.dumps(datapipe)
else:
with self.assertRaises(AttributeError):
_ = pickle.dumps(datapipe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
class TestMapDataPipeSerialization(expecttest.TestCase):
def test_serializable(self):
pass
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
pass
if __name__ == "__main__":
unittest.main()
| [
"torchdata.datapipes.map.SequenceWrapper",
"torchdata.datapipes.iter.IterableWrapper"
] | [((726, 753), 'dill.extend', 'dill.extend', ([], {'use_dill': '(False)'}), '(use_dill=False)\n', (737, 753), False, 'import dill\n'), ((15119, 15134), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15132, 15134), False, 'import unittest\n'), ((944, 964), 'rarfile.tool_setup', 'rarfile.tool_setup', ([], {}), '()\n', (962, 964), False, 'import rarfile\n'), ((973, 1014), 'subprocess.run', 'subprocess.run', (["('rar', '-?')"], {'check': '(True)'}), "(('rar', '-?'), check=True)\n", (987, 1014), False, 'import subprocess\n'), ((1539, 1561), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (1555, 1561), False, 'import os\n'), ((2253, 2270), '_utils._common_utils_for_test.create_temp_dir', 'create_temp_dir', ([], {}), '()\n', (2268, 2270), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files\n'), ((2297, 2329), '_utils._common_utils_for_test.create_temp_files', 'create_temp_files', (['self.temp_dir'], {}), '(self.temp_dir)\n', (2314, 2329), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files\n'), ((2358, 2393), '_utils._common_utils_for_test.create_temp_dir', 'create_temp_dir', (['self.temp_dir.name'], {}), '(self.temp_dir.name)\n', (2373, 2393), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files\n'), ((2424, 2470), '_utils._common_utils_for_test.create_temp_files', 'create_temp_files', (['self.temp_sub_dir', '(4)', '(False)'], {}), '(self.temp_sub_dir, 4, False)\n', (2441, 2470), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files\n'), ((2797, 2819), 'pickle.dumps', 'pickle.dumps', (['datapipe'], {}), '(datapipe)\n', (2809, 2819), False, 'import pickle\n'), ((2846, 2873), 'pickle.loads', 'pickle.loads', (['serialized_dp'], {}), '(serialized_dp)\n', (2858, 2873), False, 'import pickle\n'), ((3143, 3165), 'pickle.dumps', 'pickle.dumps', (['datapipe'], {}), '(datapipe)\n', (3155, 3165), False, 'import pickle\n'), ((3192, 3219), 'pickle.loads', 'pickle.loads', (['serialized_dp'], {}), '(serialized_dp)\n', (3204, 3219), False, 'import pickle\n'), ((1260, 1288), 'torcharrow.dtypes.Field', 'dt.Field', (['"""Values"""', 'dt.int32'], {}), "('Values', dt.int32)\n", (1268, 1288), True, 'import torcharrow.dtypes as dt\n'), ((2628, 2729), 'warnings.warn', 'warnings.warn', (['f"""TestIterDataPipeSerialization was not able to cleanup temp dir due to {e}"""'], {}), "(\n f'TestIterDataPipeSerialization was not able to cleanup temp dir due to {e}'\n )\n", (2641, 2729), False, 'import warnings\n'), ((4960, 5009), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[(0, 0), (0, 0), (0, 0), (0, 0)]'], {}), '([(0, 0), (0, 0), (0, 0), (0, 0)])\n', (4975, 5009), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((5075, 5113), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0])\n', (5090, 5113), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((6203, 6288), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[('1.txt', b'DATA1'), ('2.txt', b'DATA2'), ('3.txt', b'DATA3')]"], {}), "([('1.txt', b'DATA1'), ('2.txt', b'DATA2'), ('3.txt', b'DATA3')]\n )\n", (6218, 6288), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((6849, 6902), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[{'a': 1, 'b': 2}, {'c': 3, 'a': 1}]"], {}), "([{'a': 1, 'b': 2}, {'c': 3, 'a': 1}])\n", (6864, 6902), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((7092, 7177), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[('1.txt', b'DATA1'), ('2.txt', b'DATA2'), ('3.txt', b'DATA3')]"], {}), "([('1.txt', b'DATA1'), ('2.txt', b'DATA2'), ('3.txt', b'DATA3')]\n )\n", (7107, 7177), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((7371, 7424), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[('a', 100), ('b', 200), ('c', 300)]"], {}), "([('a', 100), ('b', 200), ('c', 300)])\n", (7386, 7424), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((8267, 8339), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["['1', '22', '1', '4444', '333', '1', '22', '22', '333']"], {}), "(['1', '22', '1', '4444', '333', '1', '22', '22', '333'])\n", (8282, 8339), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((8465, 8512), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[('a', 1), ('b', 2), ('c', 3)]"], {}), "([('a', 1), ('b', 2), ('c', 3)])\n", (8480, 8512), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((8808, 8881), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[('f1', 'L1'), ('f1', 'L2'), ('f2', '21'), ('f2', '22')]"], {}), "([('f1', 'L1'), ('f1', 'L2'), ('f2', '21'), ('f2', '22')])\n", (8823, 8881), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((9132, 9211), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[[{'a': 1}, {'b': 2, 'a': 1}], [{'a': 1, 'b': 200}, {'c': 3}]]"], {}), "([[{'a': 1}, {'b': 2, 'a': 1}], [{'a': 1, 'b': 200}, {'c': 3}]])\n", (9147, 9211), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((9442, 9527), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[('1.txt', b'DATA1'), ('2.txt', b'DATA2'), ('3.txt', b'DATA3')]"], {}), "([('1.txt', b'DATA1'), ('2.txt', b'DATA2'), ('3.txt', b'DATA3')]\n )\n", (9457, 9527), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((6351, 6396), 'functools.partial', 'partial', (['_filepath_fn'], {'dir': 'self.temp_dir.name'}), '(_filepath_fn, dir=self.temp_dir.name)\n', (6358, 6396), False, 'from functools import partial\n'), ((7240, 7285), 'functools.partial', 'partial', (['_filepath_fn'], {'dir': 'self.temp_dir.name'}), '(_filepath_fn, dir=self.temp_dir.name)\n', (7247, 7285), False, 'from functools import partial\n'), ((7443, 7490), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[('a', 1), ('b', 2), ('c', 3)]"], {}), "([('a', 1), ('b', 2), ('c', 3)])\n", (7458, 7490), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((7492, 7505), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (7502, 7505), False, 'from operator import itemgetter\n'), ((7507, 7520), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (7517, 7520), False, 'from operator import itemgetter\n'), ((8531, 8578), 'torchdata.datapipes.map.SequenceWrapper', 'SequenceWrapper', (["{'a': 100, 'b': 200, 'c': 300}"], {}), "({'a': 100, 'b': 200, 'c': 300})\n", (8546, 8578), False, 'from torchdata.datapipes.map import SequenceWrapper\n'), ((8580, 8593), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (8590, 8593), False, 'from operator import itemgetter\n'), ((9308, 9333), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['([0] * 10)'], {}), '([0] * 10)\n', (9323, 9333), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((9340, 9365), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['([1] * 10)'], {}), '([1] * 10)\n', (9355, 9365), False, 'from torchdata.datapipes.iter import IterableWrapper\n'), ((9590, 9635), 'functools.partial', 'partial', (['_filepath_fn'], {'dir': 'self.temp_dir.name'}), '(_filepath_fn, dir=self.temp_dir.name)\n', (9597, 9635), False, 'from functools import partial\n'), ((11597, 11619), 'pickle.dumps', 'pickle.dumps', (['datapipe'], {}), '(datapipe)\n', (11609, 11619), False, 'import pickle\n'), ((11644, 11671), 'pickle.loads', 'pickle.loads', (['serialized_dp'], {}), '(serialized_dp)\n', (11656, 11671), False, 'import pickle\n'), ((13623, 13643), 'dill.dumps', 'dill.dumps', (['datapipe'], {}), '(datapipe)\n', (13633, 13643), False, 'import dill\n'), ((13672, 13697), 'dill.loads', 'dill.loads', (['serialized_dp'], {}), '(serialized_dp)\n', (13682, 13697), False, 'import dill\n'), ((14147, 14183), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (14170, 14183), False, 'import warnings\n'), ((5289, 5334), 'io.StringIO', 'StringIO', (['"""Label,1,1\nLabel,2,2\nLabel,3,3"""'], {}), '("""Label,1,1\nLabel,2,2\nLabel,3,3""")\n', (5297, 5334), False, 'from io import StringIO\n'), ((5342, 5377), 'io.StringIO', 'StringIO', (["'L,1,1\\r\\nL,2,2\\r\\nL,3,3'"], {}), "('L,1,1\\r\\nL,2,2\\r\\nL,3,3')\n", (5350, 5377), False, 'from io import StringIO\n'), ((5563, 5608), 'io.StringIO', 'StringIO', (['"""Label,1,1\nLabel,2,2\nLabel,3,3"""'], {}), '("""Label,1,1\nLabel,2,2\nLabel,3,3""")\n', (5571, 5608), False, 'from io import StringIO\n'), ((5616, 5651), 'io.StringIO', 'StringIO', (["'L,1,1\\r\\nL,2,2\\r\\nL,3,3'"], {}), "('L,1,1\\r\\nL,2,2\\r\\nL,3,3')\n", (5624, 5651), False, 'from io import StringIO\n'), ((7697, 7745), 'io.StringIO', 'StringIO', (['"""["fo", {"ba":["baz", null, 1.0, 2]}]"""'], {}), '(\'["fo", {"ba":["baz", null, 1.0, 2]}]\')\n', (7705, 7745), False, 'from io import StringIO\n'), ((7783, 7827), 'io.StringIO', 'StringIO', (['"""{"__cx__": true, "r": 1, "i": 2}"""'], {}), '(\'{"__cx__": true, "r": 1, "i": 2}\')\n', (7791, 7827), False, 'from io import StringIO\n'), ((8039, 8063), 'io.StringIO', 'StringIO', (['"""Line1\nLine2"""'], {}), "('Line1\\nLine2')\n", (8047, 8063), False, 'from io import StringIO\n'), ((8076, 8117), 'io.StringIO', 'StringIO', (["'Line2,1\\r\\nLine2,2\\r\\nLine2,3'"], {}), "('Line2,1\\r\\nLine2,2\\r\\nLine2,3')\n", (8084, 8117), False, 'from io import StringIO\n'), ((14558, 14580), 'pickle.dumps', 'pickle.dumps', (['datapipe'], {}), '(datapipe)\n', (14570, 14580), False, 'import pickle\n'), ((14715, 14737), 'pickle.dumps', 'pickle.dumps', (['datapipe'], {}), '(datapipe)\n', (14727, 14737), False, 'import pickle\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
from torchdata.datapipes.iter import HttpReader
from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument
URL = {
"train": "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv",
"test": "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv",
}
MD5 = {
"train": "b1a00f826fdfbd249f79597b59e1dc12",
"test": "d52ea96a97a2d943681189a97654912d",
}
NUM_LINES = {
"train": 120000,
"test": 7600,
}
DATASET_NAME = "AG_NEWS"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=4)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AG_NEWS(root, split):
"""Demonstrating streaming use case
This might be useful when we do not want to cache or download the data.
The limitation is that we do not have any checking mechanism or data sanity check.
"""
# Stack CSV Parser directly on top of web-stream
return HttpReader([URL[split]]).parse_csv().map(lambda t: (int(t[0]), " ".join(t[1:])))
| [
"torchdata.datapipes.iter.HttpReader"
] | [((1063, 1087), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['[URL[split]]'], {}), '([URL[split]])\n', (1073, 1087), False, 'from torchdata.datapipes.iter import HttpReader\n')] |
import functools
import pathlib
import pickle
from typing import BinaryIO
from typing import (
Sequence,
Callable,
Union,
Any,
Tuple,
TypeVar,
Iterator,
Dict,
IO,
Sized,
)
from typing import cast
import torch
import torch.distributed as dist
import torch.utils.data
from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler
from torchdata.datapipes.utils import StreamWrapper
from torchvision.prototype.utils._internal import fromfile
__all__ = [
"INFINITE_BUFFER_SIZE",
"BUILTIN_DIR",
"read_mat",
"MappingIterator",
"getitem",
"path_accessor",
"path_comparator",
"read_flo",
"hint_sharding",
]
K = TypeVar("K")
D = TypeVar("D")
# pseudo-infinite until a true infinite buffer is supported by all datapipes
INFINITE_BUFFER_SIZE = 1_000_000_000
BUILTIN_DIR = pathlib.Path(__file__).parent.parent / "_builtin"
def read_mat(buffer: BinaryIO, **kwargs: Any) -> Any:
try:
import scipy.io as sio
except ImportError as error:
raise ModuleNotFoundError("Package `scipy` is required to be installed to read .mat files.") from error
if isinstance(buffer, StreamWrapper):
buffer = buffer.file_obj
return sio.loadmat(buffer, **kwargs)
class MappingIterator(IterDataPipe[Union[Tuple[K, D], D]]):
def __init__(self, datapipe: IterDataPipe[Dict[K, D]], *, drop_key: bool = False) -> None:
self.datapipe = datapipe
self.drop_key = drop_key
def __iter__(self) -> Iterator[Union[Tuple[K, D], D]]:
for mapping in self.datapipe:
yield from iter(mapping.values() if self.drop_key else mapping.items())
def _getitem_closure(obj: Any, *, items: Sequence[Any]) -> Any:
for item in items:
obj = obj[item]
return obj
def getitem(*items: Any) -> Callable[[Any], Any]:
return functools.partial(_getitem_closure, items=items)
def _getattr_closure(obj: Any, *, attrs: Sequence[str]) -> Any:
for attr in attrs:
obj = getattr(obj, attr)
return obj
def _path_attribute_accessor(path: pathlib.Path, *, name: str) -> D:
return cast(D, _getattr_closure(path, attrs=name.split(".")))
def _path_accessor_closure(data: Tuple[str, Any], *, getter: Callable[[pathlib.Path], D]) -> D:
return getter(pathlib.Path(data[0]))
def path_accessor(getter: Union[str, Callable[[pathlib.Path], D]]) -> Callable[[Tuple[str, Any]], D]:
if isinstance(getter, str):
getter = functools.partial(_path_attribute_accessor, name=getter)
return functools.partial(_path_accessor_closure, getter=getter)
def _path_comparator_closure(data: Tuple[str, Any], *, accessor: Callable[[Tuple[str, Any]], D], value: D) -> bool:
return accessor(data) == value
def path_comparator(getter: Union[str, Callable[[pathlib.Path], D]], value: D) -> Callable[[Tuple[str, Any]], bool]:
return functools.partial(_path_comparator_closure, accessor=path_accessor(getter), value=value)
class PicklerDataPipe(IterDataPipe):
def __init__(self, source_datapipe: IterDataPipe[Tuple[str, IO[bytes]]]) -> None:
self.source_datapipe = source_datapipe
def __iter__(self) -> Iterator[Any]:
for _, fobj in self.source_datapipe:
data = pickle.load(fobj)
for _, d in enumerate(data):
yield d
class SharderDataPipe(torch.utils.data.datapipes.iter.grouping.ShardingFilterIterDataPipe):
def __init__(self, source_datapipe: IterDataPipe) -> None:
super().__init__(source_datapipe)
self.rank = 0
self.world_size = 1
if dist.is_available() and dist.is_initialized():
self.rank = dist.get_rank()
self.world_size = dist.get_world_size()
self.apply_sharding(self.world_size, self.rank)
def __iter__(self) -> Iterator[Any]:
num_workers = self.world_size
worker_id = self.rank
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
worker_id = worker_id + worker_info.id * num_workers
num_workers *= worker_info.num_workers
self.apply_sharding(num_workers, worker_id)
yield from super().__iter__()
class TakerDataPipe(IterDataPipe):
def __init__(self, source_datapipe: IterDataPipe, num_take: int) -> None:
super().__init__()
self.source_datapipe = source_datapipe
self.num_take = num_take
self.world_size = 1
if dist.is_available() and dist.is_initialized():
self.world_size = dist.get_world_size()
def __iter__(self) -> Iterator[Any]:
num_workers = self.world_size
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
num_workers *= worker_info.num_workers
# TODO: this is weird as it drops more elements than it should
num_take = self.num_take // num_workers
for i, data in enumerate(self.source_datapipe):
if i < num_take:
yield data
else:
break
def __len__(self) -> int:
num_take = self.num_take // self.world_size
if isinstance(self.source_datapipe, Sized):
if len(self.source_datapipe) < num_take:
num_take = len(self.source_datapipe)
# TODO: might be weird to not take `num_workers` into account
return num_take
def _make_sharded_datapipe(root: str, dataset_size: int) -> IterDataPipe[Dict[str, Any]]:
dp = IoPathFileLister(root=root)
dp = SharderDataPipe(dp)
dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE)
dp = IoPathFileOpener(dp, mode="rb")
dp = PicklerDataPipe(dp)
# dp = dp.cycle(2)
dp = TakerDataPipe(dp, dataset_size)
return dp
def read_flo(file: BinaryIO) -> torch.Tensor:
if file.read(4) != b"PIEH":
raise ValueError("Magic number incorrect. Invalid .flo file")
width, height = fromfile(file, dtype=torch.int32, byte_order="little", count=2)
flow = fromfile(file, dtype=torch.float32, byte_order="little", count=height * width * 2)
return flow.reshape((height, width, 2)).permute((2, 0, 1))
def hint_sharding(datapipe: IterDataPipe) -> ShardingFilter:
return ShardingFilter(datapipe)
def hint_shuffling(datapipe: IterDataPipe[D]) -> Shuffler[D]:
return Shuffler(datapipe, default=False, buffer_size=INFINITE_BUFFER_SIZE)
| [
"torchdata.datapipes.iter.IoPathFileLister",
"torchdata.datapipes.iter.Shuffler",
"torchdata.datapipes.iter.ShardingFilter",
"torchdata.datapipes.iter.IoPathFileOpener"
] | [((733, 745), 'typing.TypeVar', 'TypeVar', (['"""K"""'], {}), "('K')\n", (740, 745), False, 'from typing import Sequence, Callable, Union, Any, Tuple, TypeVar, Iterator, Dict, IO, Sized\n'), ((750, 762), 'typing.TypeVar', 'TypeVar', (['"""D"""'], {}), "('D')\n", (757, 762), False, 'from typing import Sequence, Callable, Union, Any, Tuple, TypeVar, Iterator, Dict, IO, Sized\n'), ((1272, 1301), 'scipy.io.loadmat', 'sio.loadmat', (['buffer'], {}), '(buffer, **kwargs)\n', (1283, 1301), True, 'import scipy.io as sio\n'), ((1898, 1946), 'functools.partial', 'functools.partial', (['_getitem_closure'], {'items': 'items'}), '(_getitem_closure, items=items)\n', (1915, 1946), False, 'import functools\n'), ((2582, 2638), 'functools.partial', 'functools.partial', (['_path_accessor_closure'], {'getter': 'getter'}), '(_path_accessor_closure, getter=getter)\n', (2599, 2638), False, 'import functools\n'), ((5529, 5556), 'torchdata.datapipes.iter.IoPathFileLister', 'IoPathFileLister', ([], {'root': 'root'}), '(root=root)\n', (5545, 5556), False, 'from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler\n'), ((5649, 5680), 'torchdata.datapipes.iter.IoPathFileOpener', 'IoPathFileOpener', (['dp'], {'mode': '"""rb"""'}), "(dp, mode='rb')\n", (5665, 5680), False, 'from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler\n'), ((5959, 6022), 'torchvision.prototype.utils._internal.fromfile', 'fromfile', (['file'], {'dtype': 'torch.int32', 'byte_order': '"""little"""', 'count': '(2)'}), "(file, dtype=torch.int32, byte_order='little', count=2)\n", (5967, 6022), False, 'from torchvision.prototype.utils._internal import fromfile\n'), ((6034, 6120), 'torchvision.prototype.utils._internal.fromfile', 'fromfile', (['file'], {'dtype': 'torch.float32', 'byte_order': '"""little"""', 'count': '(height * width * 2)'}), "(file, dtype=torch.float32, byte_order='little', count=height *\n width * 2)\n", (6042, 6120), False, 'from torchvision.prototype.utils._internal import fromfile\n'), ((6254, 6278), 'torchdata.datapipes.iter.ShardingFilter', 'ShardingFilter', (['datapipe'], {}), '(datapipe)\n', (6268, 6278), False, 'from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler\n'), ((6354, 6421), 'torchdata.datapipes.iter.Shuffler', 'Shuffler', (['datapipe'], {'default': '(False)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(datapipe, default=False, buffer_size=INFINITE_BUFFER_SIZE)\n', (6362, 6421), False, 'from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler\n'), ((2337, 2358), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (2349, 2358), False, 'import pathlib\n'), ((2513, 2569), 'functools.partial', 'functools.partial', (['_path_attribute_accessor'], {'name': 'getter'}), '(_path_attribute_accessor, name=getter)\n', (2530, 2569), False, 'import functools\n'), ((3959, 3993), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (3991, 3993), False, 'import torch\n'), ((4698, 4732), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (4730, 4732), False, 'import torch\n'), ((893, 915), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (905, 915), False, 'import pathlib\n'), ((3289, 3306), 'pickle.load', 'pickle.load', (['fobj'], {}), '(fobj)\n', (3300, 3306), False, 'import pickle\n'), ((3632, 3651), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (3649, 3651), True, 'import torch.distributed as dist\n'), ((3656, 3677), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (3675, 3677), True, 'import torch.distributed as dist\n'), ((3703, 3718), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3716, 3718), True, 'import torch.distributed as dist\n'), ((3749, 3770), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3768, 3770), True, 'import torch.distributed as dist\n'), ((4497, 4516), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (4514, 4516), True, 'import torch.distributed as dist\n'), ((4521, 4542), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (4540, 4542), True, 'import torch.distributed as dist\n'), ((4574, 4595), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4593, 4595), True, 'import torch.distributed as dist\n')] |
import pathlib
from typing import Any, Dict, List, Optional, Tuple
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, CSVDictParser, Zipper, Demultiplexer
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
OnlineResource,
HttpResource,
)
from torchvision.prototype.datasets.utils._internal import (
path_comparator,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
)
from torchvision.prototype.features import Label, BoundingBox, EncodedImage
class GTSRB(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"gtsrb",
homepage="https://benchmark.ini.rub.de",
categories=[f"{label:05d}" for label in range(43)],
valid_options=dict(split=("train", "test")),
)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[config.split], sha256=self._CHECKSUMS[config.split])]
if config.split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_box = BoundingBox(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
image_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self.categories),
"bounding_box": bounding_box,
}
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
if config.split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
return Mapper(dp, self._prepare_sample)
| [
"torchdata.datapipes.iter.Demultiplexer",
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Zipper",
"torchdata.datapipes.iter.CSVDictParser"
] | [((1968, 1989), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1980, 1989), False, 'import pathlib\n'), ((3477, 3513), 'torchdata.datapipes.iter.CSVDictParser', 'CSVDictParser', (['ann_dp'], {'delimiter': '""";"""'}), "(ann_dp, delimiter=';')\n", (3490, 3513), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, CSVDictParser, Zipper, Demultiplexer\n'), ((3527, 3552), 'torchdata.datapipes.iter.Zipper', 'Zipper', (['images_dp', 'ann_dp'], {}), '(images_dp, ann_dp)\n', (3533, 3552), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, CSVDictParser, Zipper, Demultiplexer\n'), ((3567, 3584), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (3580, 3584), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling, INFINITE_BUFFER_SIZE\n'), ((3598, 3616), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (3612, 3616), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling, INFINITE_BUFFER_SIZE\n'), ((3633, 3665), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (3639, 3665), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, CSVDictParser, Zipper, Demultiplexer\n'), ((1531, 1607), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['self._URLS[config.split]'], {'sha256': 'self._CHECKSUMS[config.split]'}), '(self._URLS[config.split], sha256=self._CHECKSUMS[config.split])\n', (1543, 1607), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, OnlineResource, HttpResource\n'), ((2612, 2642), 'torchvision.prototype.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (2634, 2642), False, 'from torchvision.prototype.features import Label, BoundingBox, EncodedImage\n'), ((2665, 2705), 'torchvision.prototype.features.Label', 'Label', (['label'], {'categories': 'self.categories'}), '(label, categories=self.categories)\n', (2670, 2705), False, 'from torchvision.prototype.features import Label, BoundingBox, EncodedImage\n'), ((2964, 3082), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['resource_dps[0]', '(2)', 'self._classify_train_archive'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(resource_dps[0], 2, self._classify_train_archive, drop_none=\n True, buffer_size=INFINITE_BUFFER_SIZE)\n', (2977, 3082), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, CSVDictParser, Zipper, Demultiplexer\n'), ((1687, 1782), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (["self._URLS['test_ground_truth']"], {'sha256': "self._CHECKSUMS['test_ground_truth']"}), "(self._URLS['test_ground_truth'], sha256=self._CHECKSUMS[\n 'test_ground_truth'])\n", (1699, 1782), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, OnlineResource, HttpResource\n'), ((3209, 3242), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""suffix"""', '""".ppm"""'], {}), "('suffix', '.ppm')\n", (3224, 3242), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling, INFINITE_BUFFER_SIZE\n')] |
import functools
import pathlib
import re
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, Match, cast
from torchdata.datapipes.iter import (
IterDataPipe,
LineReader,
IterKeyZipper,
Mapper,
Filter,
Demultiplexer,
TarArchiveLoader,
Enumerator,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
OnlineResource,
ManualDownloadResource,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
BUILTIN_DIR,
path_comparator,
getitem,
read_mat,
hint_sharding,
hint_shuffling,
)
from torchvision.prototype.features import Label, EncodedImage
from torchvision.prototype.utils._internal import FrozenMapping
class ImageNetResource(ManualDownloadResource):
def __init__(self, **kwargs: Any) -> None:
super().__init__("Register on https://image-net.org/ and follow the instructions there.", **kwargs)
class ImageNet(Dataset):
def _make_info(self) -> DatasetInfo:
name = "imagenet"
categories, wnids = zip(*DatasetInfo.read_categories_file(BUILTIN_DIR / f"{name}.categories"))
return DatasetInfo(
name,
dependencies=("scipy",),
categories=categories,
homepage="https://www.image-net.org/",
valid_options=dict(split=("train", "val", "test")),
extra=dict(
wnid_to_category=FrozenMapping(zip(wnids, categories)),
category_to_wnid=FrozenMapping(zip(categories, wnids)),
sizes=FrozenMapping(
[
(DatasetConfig(split="train"), 1_281_167),
(DatasetConfig(split="val"), 50_000),
(DatasetConfig(split="test"), 100_000),
]
),
),
)
def supports_sharded(self) -> bool:
return True
_IMAGES_CHECKSUMS = {
"train": "b08200a27a8e34218a0e58fde36b0fe8f73bc377f4acea2d91602057c3ca45bb",
"val": "c7e06a6c0baccf06d8dbeb6577d71efff84673a5dbdd50633ab44f8ea0456ae0",
"test_v10102019": "9cf7f8249639510f17d3d8a0deb47cd22a435886ba8e29e2b3223e65a4079eb4",
}
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
name = "test_v10102019" if config.split == "test" else config.split
images = ImageNetResource(
file_name=f"ILSVRC2012_img_{name}.tar",
sha256=self._IMAGES_CHECKSUMS[name],
)
resources: List[OnlineResource] = [images]
if config.split == "val":
devkit = ImageNetResource(
file_name="ILSVRC2012_devkit_t12.tar.gz",
sha256="b59243268c0d266621fd587d2018f69e906fb22875aca0e295b48cafaa927953",
)
resources.append(devkit)
return resources
def num_samples(self, config: DatasetConfig) -> int:
return {
"train": 1_281_167,
"val": 50_000,
"test": 100_000,
}[config.split]
_TRAIN_IMAGE_NAME_PATTERN = re.compile(r"(?P<wnid>n\d{8})_\d+[.]JPEG")
def _prepare_train_data(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[Label, str], Tuple[str, BinaryIO]]:
path = pathlib.Path(data[0])
wnid = cast(Match[str], self._TRAIN_IMAGE_NAME_PATTERN.match(path.name))["wnid"]
label = Label.from_category(self.info.extra.wnid_to_category[wnid], categories=self.categories)
return (label, wnid), data
def _prepare_test_data(self, data: Tuple[str, BinaryIO]) -> Tuple[None, Tuple[str, BinaryIO]]:
return None, data
def _classifiy_devkit(self, data: Tuple[str, BinaryIO]) -> Optional[int]:
return {
"meta.mat": 0,
"ILSVRC2012_validation_ground_truth.txt": 1,
}.get(pathlib.Path(data[0]).name)
def _extract_categories_and_wnids(self, data: Tuple[str, BinaryIO]) -> List[Tuple[str, str]]:
synsets = read_mat(data[1], squeeze_me=True)["synsets"]
return [
(self._WNID_MAP.get(wnid, category.split(",", 1)[0]), wnid)
for _, wnid, category, _, num_children, *_ in synsets
# if num_children > 0, we are looking at a superclass that has no direct instance
if num_children == 0
]
def _imagenet_label_to_wnid(self, imagenet_label: str, *, wnids: List[str]) -> str:
return wnids[int(imagenet_label) - 1]
_VAL_TEST_IMAGE_NAME_PATTERN = re.compile(r"ILSVRC2012_(val|test)_(?P<id>\d{8})[.]JPEG")
def _val_test_image_key(self, data: Tuple[str, Any]) -> int:
path = pathlib.Path(data[0])
return int(self._VAL_TEST_IMAGE_NAME_PATTERN.match(path.name).group("id")) # type: ignore[union-attr]
def _prepare_val_data(
self, data: Tuple[Tuple[int, str], Tuple[str, BinaryIO]]
) -> Tuple[Tuple[Label, str], Tuple[str, BinaryIO]]:
label_data, image_data = data
_, wnid = label_data
label = Label.from_category(self.info.extra.wnid_to_category[wnid], categories=self.categories)
return (label, wnid), image_data
def _prepare_sample(
self,
data: Tuple[Optional[Tuple[Label, str]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
label_data, (path, buffer) = data
return dict(
dict(zip(("label", "wnid"), label_data if label_data else (None, None))),
path=path,
image=EncodedImage.from_file(buffer),
)
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
if config.split in {"train", "test"}:
dp = resource_dps[0]
# the train archive is a tar of tars
if config.split == "train":
dp = TarArchiveLoader(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
dp = Mapper(dp, self._prepare_train_data if config.split == "train" else self._prepare_test_data)
else: # config.split == "val":
images_dp, devkit_dp = resource_dps
meta_dp, label_dp = Demultiplexer(
devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
_, wnids = zip(*next(iter(meta_dp)))
label_dp = LineReader(label_dp, decode=True, return_path=False)
label_dp = Mapper(label_dp, functools.partial(self._imagenet_label_to_wnid, wnids=wnids))
label_dp: IterDataPipe[Tuple[int, str]] = Enumerator(label_dp, 1)
label_dp = hint_shuffling(label_dp)
label_dp = hint_sharding(label_dp)
dp = IterKeyZipper(
label_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._val_test_image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = Mapper(dp, self._prepare_val_data)
return Mapper(dp, self._prepare_sample)
# Although the WordNet IDs (wnids) are unique, the corresponding categories are not. For example, both n02012849
# and n03126707 are labeled 'crane' while the first means the bird and the latter means the construction equipment
_WNID_MAP = {
"n03126707": "construction crane",
"n03710721": "tank suit",
}
def _generate_categories(self, root: pathlib.Path) -> List[Tuple[str, ...]]:
config = self.info.make_config(split="val")
resources = self.resources(config)
devkit_dp = resources[1].load(root)
meta_dp = Filter(devkit_dp, path_comparator("name", "meta.mat"))
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
categories_and_wnids = cast(List[Tuple[str, ...]], next(iter(meta_dp)))
categories_and_wnids.sort(key=lambda category_and_wnid: category_and_wnid[1])
return categories_and_wnids
| [
"torchdata.datapipes.iter.Demultiplexer",
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Enumerator",
"torchdata.datapipes.iter.LineReader",
"torchdata.datapipes.iter.TarArchiveLoader"
] | [((3107, 3150), 're.compile', 're.compile', (['"""(?P<wnid>n\\\\d{8})_\\\\d+[.]JPEG"""'], {}), "('(?P<wnid>n\\\\d{8})_\\\\d+[.]JPEG')\n", (3117, 3150), False, 'import re\n'), ((4503, 4560), 're.compile', 're.compile', (['"""ILSVRC2012_(val|test)_(?P<id>\\\\d{8})[.]JPEG"""'], {}), "('ILSVRC2012_(val|test)_(?P<id>\\\\d{8})[.]JPEG')\n", (4513, 4560), False, 'import re\n'), ((3279, 3300), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (3291, 3300), False, 'import pathlib\n'), ((3406, 3498), 'torchvision.prototype.features.Label.from_category', 'Label.from_category', (['self.info.extra.wnid_to_category[wnid]'], {'categories': 'self.categories'}), '(self.info.extra.wnid_to_category[wnid], categories=self\n .categories)\n', (3425, 3498), False, 'from torchvision.prototype.features import Label, EncodedImage\n'), ((4642, 4663), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (4654, 4663), False, 'import pathlib\n'), ((5008, 5100), 'torchvision.prototype.features.Label.from_category', 'Label.from_category', (['self.info.extra.wnid_to_category[wnid]'], {'categories': 'self.categories'}), '(self.info.extra.wnid_to_category[wnid], categories=self\n .categories)\n', (5027, 5100), False, 'from torchvision.prototype.features import Label, EncodedImage\n'), ((7072, 7104), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (7078, 7104), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((7756, 7807), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['meta_dp', 'self._extract_categories_and_wnids'], {}), '(meta_dp, self._extract_categories_and_wnids)\n', (7762, 7807), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((3994, 4028), 'torchvision.prototype.datasets.utils._internal.read_mat', 'read_mat', (['data[1]'], {'squeeze_me': '(True)'}), '(data[1], squeeze_me=True)\n', (4002, 4028), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, BUILTIN_DIR, path_comparator, getitem, read_mat, hint_sharding, hint_shuffling\n'), ((5873, 5891), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (5887, 5891), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, BUILTIN_DIR, path_comparator, getitem, read_mat, hint_sharding, hint_shuffling\n'), ((5909, 5926), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (5922, 5926), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, BUILTIN_DIR, path_comparator, getitem, read_mat, hint_sharding, hint_shuffling\n'), ((5944, 6041), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', "(self._prepare_train_data if config.split == 'train' else self.\n _prepare_test_data)"], {}), "(dp, self._prepare_train_data if config.split == 'train' else self.\n _prepare_test_data)\n", (5950, 6041), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6158, 6263), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['devkit_dp', '(2)', 'self._classifiy_devkit'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(devkit_dp, 2, self._classifiy_devkit, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (6171, 6263), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6313, 6364), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['meta_dp', 'self._extract_categories_and_wnids'], {}), '(meta_dp, self._extract_categories_and_wnids)\n', (6319, 6364), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6438, 6490), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['label_dp'], {'decode': '(True)', 'return_path': '(False)'}), '(label_dp, decode=True, return_path=False)\n', (6448, 6490), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6647, 6670), 'torchdata.datapipes.iter.Enumerator', 'Enumerator', (['label_dp', '(1)'], {}), '(label_dp, 1)\n', (6657, 6670), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6694, 6718), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['label_dp'], {}), '(label_dp)\n', (6708, 6718), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, BUILTIN_DIR, path_comparator, getitem, read_mat, hint_sharding, hint_shuffling\n'), ((6742, 6765), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['label_dp'], {}), '(label_dp)\n', (6755, 6765), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, BUILTIN_DIR, path_comparator, getitem, read_mat, hint_sharding, hint_shuffling\n'), ((7021, 7055), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_val_data'], {}), '(dp, self._prepare_val_data)\n', (7027, 7055), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((7701, 7736), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', '"""meta.mat"""'], {}), "('name', 'meta.mat')\n", (7716, 7736), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, BUILTIN_DIR, path_comparator, getitem, read_mat, hint_sharding, hint_shuffling\n'), ((1095, 1163), 'torchvision.prototype.datasets.utils.DatasetInfo.read_categories_file', 'DatasetInfo.read_categories_file', (["(BUILTIN_DIR / f'{name}.categories')"], {}), "(BUILTIN_DIR / f'{name}.categories')\n", (1127, 1163), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, OnlineResource, ManualDownloadResource\n'), ((3849, 3870), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (3861, 3870), False, 'import pathlib\n'), ((5465, 5495), 'torchvision.prototype.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (5487, 5495), False, 'from torchvision.prototype.features import Label, EncodedImage\n'), ((5834, 5854), 'torchdata.datapipes.iter.TarArchiveLoader', 'TarArchiveLoader', (['dp'], {}), '(dp)\n', (5850, 5854), False, 'from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator\n'), ((6531, 6591), 'functools.partial', 'functools.partial', (['self._imagenet_label_to_wnid'], {'wnids': 'wnids'}), '(self._imagenet_label_to_wnid, wnids=wnids)\n', (6548, 6591), False, 'import functools\n'), ((6875, 6885), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(0)'], {}), '(0)\n', (6882, 6885), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, BUILTIN_DIR, path_comparator, getitem, read_mat, hint_sharding, hint_shuffling\n'), ((1651, 1679), 'torchvision.prototype.datasets.utils.DatasetConfig', 'DatasetConfig', ([], {'split': '"""train"""'}), "(split='train')\n", (1664, 1679), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, OnlineResource, ManualDownloadResource\n'), ((1718, 1744), 'torchvision.prototype.datasets.utils.DatasetConfig', 'DatasetConfig', ([], {'split': '"""val"""'}), "(split='val')\n", (1731, 1744), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, OnlineResource, ManualDownloadResource\n'), ((1780, 1807), 'torchvision.prototype.datasets.utils.DatasetConfig', 'DatasetConfig', ([], {'split': '"""test"""'}), "(split='test')\n", (1793, 1807), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, OnlineResource, ManualDownloadResource\n')] |
#!python3
"""training the model in pytorch"""
from torchmodel import PHMModel
from torchdata import load_traindata
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(description='train model')
# data parameters
parser.add_argument('--traindata', help='path to training data', default='../../../data_set/N-CMAPSS_DS02-006.h5')
parser.add_argument('--units', help='units used for training', action='append', nargs='*', type=int, default=[])
parser.add_argument('--save-dir', help='model directory', default='../../checkpoints')
parser.add_argument('--overwrite', help='overwrite old models', action='store_true', default=False)
parser.add_argument('--load-file', help='model location', default='../../checkpoints/ProgNet.pt')
parser.add_argument('--features-last', help='features in data', default=False, action='store_true')
# training hyperparameters
parser.add_argument('-b', '--batch-size', help='batch size', default=128, type=int)
parser.add_argument('-e', '--num-epochs', help='number of epochs', default=20, type=int)
# model options
parser.add_argument('--sequence', help='use sequence model', action='store_true', default=False)
parser.add_argument('--restore', help='restore pre-trained model', action='store_true', default=False)
# training options
parser.add_argument('--cuda', help='use cuda', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
params = parse_args()
train_loader = load_traindata(params)
model = PHMModel(params, trainable=True)
if params.restore:
model.load_model(params.load_file)
print(f'done loading checkpoint: {params.load_file}')
model.train(train_loader)
| [
"torchdata.load_traindata"
] | [((186, 227), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""train model"""'}), "(description='train model')\n", (200, 227), False, 'from argparse import ArgumentParser\n'), ((1534, 1556), 'torchdata.load_traindata', 'load_traindata', (['params'], {}), '(params)\n', (1548, 1556), False, 'from torchdata import load_traindata\n'), ((1569, 1601), 'torchmodel.PHMModel', 'PHMModel', (['params'], {'trainable': '(True)'}), '(params, trainable=True)\n', (1577, 1601), False, 'from torchmodel import PHMModel\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from unittest import TestCase
from torchdata.dataloader2 import DataLoader2
from torchdata.dataloader2.dataloader2 import READING_SERVICE_STATE_KEY_NAME, SERIALIZED_DATAPIPE_KEY_NAME
from torchdata.datapipes.iter import IterableWrapper
class DataLoader2Test(TestCase):
def test_dataloader2(self) -> None:
test_data_pipe = IterableWrapper(range(3))
data_loader = DataLoader2(datapipe=test_data_pipe)
expected_batch = 0
for batch in iter(data_loader):
self.assertEqual(batch, expected_batch)
expected_batch += 1
def test_dataloader2_shutdown(self) -> None:
test_data_pipe = IterableWrapper(range(3))
data_loader = DataLoader2(datapipe=test_data_pipe)
data_loader.shutdown()
def test_dataloader2_state_dict(self) -> None:
test_data_pipe = IterableWrapper(range(3))
data_loader = DataLoader2(datapipe=test_data_pipe)
state = data_loader.state_dict()
self.assertIsNotNone(state)
self.assertIsNotNone(state[SERIALIZED_DATAPIPE_KEY_NAME])
self.assertIsNone(state[READING_SERVICE_STATE_KEY_NAME])
data_loader.shutdown()
| [
"torchdata.dataloader2.DataLoader2"
] | [((595, 631), 'torchdata.dataloader2.DataLoader2', 'DataLoader2', ([], {'datapipe': 'test_data_pipe'}), '(datapipe=test_data_pipe)\n', (606, 631), False, 'from torchdata.dataloader2 import DataLoader2\n'), ((907, 943), 'torchdata.dataloader2.DataLoader2', 'DataLoader2', ([], {'datapipe': 'test_data_pipe'}), '(datapipe=test_data_pipe)\n', (918, 943), False, 'from torchdata.dataloader2 import DataLoader2\n'), ((1100, 1136), 'torchdata.dataloader2.DataLoader2', 'DataLoader2', ([], {'datapipe': 'test_data_pipe'}), '(datapipe=test_data_pipe)\n', (1111, 1136), False, 'from torchdata.dataloader2 import DataLoader2\n')] |
from torchtext._internal.module_utils import is_module_available
from typing import Union, Tuple
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_add_docstring_header,
_create_dataset_directory,
)
import os
URL = {
'train': "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json",
'dev': "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json",
}
MD5 = {
'train': "981b29407e0affa3b1b156f72073b945",
'dev': "3e85deb501d4e538b6bc56f786231552",
}
NUM_LINES = {
'train': 87599,
'dev': 10570,
}
DATASET_NAME = "SQuAD1"
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD1(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL[split])): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
return cache_dp.parse_json_files().read_squad()
| [
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.HttpReader",
"torchdata.datapipes.iter.IterableWrapper"
] | [((101, 133), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (120, 133), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((722, 764), 'torchtext.data.datasets_utils._add_docstring_header', '_add_docstring_header', ([], {'num_lines': 'NUM_LINES'}), '(num_lines=NUM_LINES)\n', (743, 764), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((766, 818), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (791, 818), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((820, 858), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev')"], {}), "(('train', 'dev'))\n", (840, 858), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((1115, 1144), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL[split]]'], {}), '([URL[split]])\n', (1130, 1144), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1506, 1536), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'mode': '"""b"""'}), "(cache_dp, mode='b')\n", (1516, 1536), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((924, 956), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (943, 956), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1424, 1444), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (1434, 1444), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1274, 1293), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1290, 1293), False, 'import os\n'), ((1334, 1362), 'os.path.basename', 'os.path.basename', (['URL[split]'], {}), '(URL[split])\n', (1350, 1362), False, 'import os\n')] |
import os
from pathlib import Path
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import _add_docstring_header
from torchtext.data.datasets_utils import _create_dataset_directory
from torchtext.data.datasets_utils import _wrap_split_argument
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, IterableWrapper, HttpReader
URL = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
MD5 = '7c2ac02c03563afcf9b574c7e56c153a'
NUM_LINES = {
'train': 25000,
'test': 25000,
}
_PATH = 'aclImdb_v1.tar.gz'
DATASET_NAME = "IMDB"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'test'))
def IMDB(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _PATH),
hash_dict={os.path.join(root, _PATH): MD5}, hash_type="md5"
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
labels = {"neg", "pos"}
decompressed_folder = "aclImdb_v1"
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: [os.path.join(root, decompressed_folder, split, label) for label in labels]
)
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b")
cache_decompressed_dp = cache_decompressed_dp.read_from_tar()
def filter_imdb_data(key, fname):
# eg. fname = "aclImdb/train/neg/12416_3.txt"
*_, split, label, file = Path(fname).parts
return key == split and label in labels
cache_decompressed_dp = cache_decompressed_dp.filter(lambda t: filter_imdb_data(split, t[0]))
# eg. "aclImdb/train/neg/12416_3.txt" -> "neg"
cache_decompressed_dp = cache_decompressed_dp.map(lambda t: (Path(t[0]).parts[-2], t[1]))
cache_decompressed_dp = cache_decompressed_dp.readlines(decode=True)
cache_decompressed_dp = cache_decompressed_dp.lines_to_paragraphs() # group by label in cache file
cache_decompressed_dp = cache_decompressed_dp.end_caching(
mode="wt", filepath_fn=lambda x: os.path.join(root, decompressed_folder, split, x)
)
data_dp = FileOpener(cache_decompressed_dp, mode="t")
# get label from cache file, eg. "aclImdb_v1/train/neg" -> "neg"
return data_dp.readlines().map(lambda t: (Path(t[0]).parts[-1], t[1]))
| [
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.HttpReader",
"torchdata.datapipes.iter.IterableWrapper"
] | [((332, 364), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (351, 364), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((673, 730), 'torchtext.data.datasets_utils._add_docstring_header', '_add_docstring_header', ([], {'num_lines': 'NUM_LINES', 'num_classes': '(2)'}), '(num_lines=NUM_LINES, num_classes=2)\n', (694, 730), False, 'from torchtext.data.datasets_utils import _add_docstring_header\n'), ((732, 784), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (757, 784), False, 'from torchtext.data.datasets_utils import _create_dataset_directory\n'), ((786, 825), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'test')"], {}), "(('train', 'test'))\n", (806, 825), False, 'from torchtext.data.datasets_utils import _wrap_split_argument\n'), ((1080, 1102), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (1095, 1102), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper, HttpReader\n'), ((1658, 1701), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (1668, 1701), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper, HttpReader\n'), ((2557, 2600), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""t"""'}), "(cache_decompressed_dp, mode='t')\n", (2567, 2600), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper, HttpReader\n'), ((889, 921), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (908, 921), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1309, 1340), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_compressed_dp'], {}), '(cache_compressed_dp)\n', (1319, 1340), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper, HttpReader\n'), ((1894, 1905), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (1898, 1905), False, 'from pathlib import Path\n'), ((1182, 1207), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (1194, 1207), False, 'import os\n'), ((1228, 1253), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (1240, 1253), False, 'import os\n'), ((2486, 2535), 'os.path.join', 'os.path.join', (['root', 'decompressed_folder', 'split', 'x'], {}), '(root, decompressed_folder, split, x)\n', (2498, 2535), False, 'import os\n'), ((1549, 1602), 'os.path.join', 'os.path.join', (['root', 'decompressed_folder', 'split', 'label'], {}), '(root, decompressed_folder, split, label)\n', (1561, 1602), False, 'import os\n'), ((2176, 2186), 'pathlib.Path', 'Path', (['t[0]'], {}), '(t[0])\n', (2180, 2186), False, 'from pathlib import Path\n'), ((2716, 2726), 'pathlib.Path', 'Path', (['t[0]'], {}), '(t[0])\n', (2720, 2726), False, 'from pathlib import Path\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Callable, Optional
from torch.utils.data import DataLoader
from torchdata.datapipes.iter import IterableWrapper, IterDataPipe
class ReadingServiceInterface(ABC):
@abstractmethod
def initialize(self, datapipe: IterDataPipe) -> IterDataPipe:
"""
ReadingService traverses datapipe graph, finds executable part,
adapts into its own datapipe, and replaces in datapipe graph.
Called once in creating DataLoader iterator at first time.
Args:
datapipe: IterDataPipe. Original datapipe.
Return:
Adapated IterDataPipe.
Example:
MultiProcessingReadingService finds information about sharding,
separates graph by multiple pieces and reconnects it using queues.
Spawns processes/threads.
"""
pass
def finalize(self) -> None:
"""
ReadingService cleanup states.
Called in DataLoader shutdown and __del__
Example:
MultiProcessingReadingService invalidate states & handle persistent worker.
"""
pass
def initialize_iteration(self) -> None:
"""
ReadingService spin up service.
Called at the beginning of every time getting DataLoader iterator.
Example:
MultiProcessingReadingService starts prefetching items from the graph.
"""
pass
def finalize_iteration(self) -> None:
"""
ReadingService end service.
Example:
MultiprocessingReadingService cleans up processes.
"""
pass
class CheckpointableReadingServiceInterface(ReadingServiceInterface):
@abstractmethod
def checkpoint(self) -> bytes:
"""
ReadingService serialize backend states.
Called in DataLoader checkpoint.
"""
pass
@abstractmethod
def restore(self, datapipe: IterDataPipe, serialized_state: bytes) -> IterDataPipe:
"""
ReadingService adapts datapipe and consume serialized state.
Called once in creating DataLoader iterator at first time.
Counterpart of `initialize`, which adapt datapipe from scratch.
Returns:
Adapted IterDataPipe.
"""
pass
class MultiProcessingReadingService(ReadingServiceInterface):
num_workers: int
pin_memory: bool
timeout: float
worker_init_fn: Optional[Callable[[int], None]]
prefetch_factor: int
persistent_workers: bool
def __init__(
self,
num_workers: int = 0,
pin_memory: bool = False,
timeout: float = 0,
worker_init_fn: Optional[Callable[[int], None]] = None,
multiprocessing_context=None,
prefetch_factor: int = 2,
persistent_workers: bool = False,
) -> None:
self.num_workers = num_workers
self.pin_memory = pin_memory
self.timeout = timeout
self.worker_init_fn = worker_init_fn
self.multiprocessing_context = multiprocessing_context
self.prefetch_factor = prefetch_factor
self.persistent_workers = persistent_workers
self.dl_: Optional[DataLoader] = None
# Wrap the DataLoader with IterableWrapper to respect type annotation
def initialize(self, datapipe: IterDataPipe) -> IterDataPipe:
self.dl_ = DataLoader(
datapipe,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
timeout=self.timeout,
worker_init_fn=self.worker_init_fn,
multiprocessing_context=self.multiprocessing_context,
prefetch_factor=self.prefetch_factor,
persistent_workers=self.persistent_workers,
)
return IterableWrapper(self.dl_) # type: ignore[return-value]
def finalize(self) -> None:
if self.persistent_workers and self.dl_ is not None and self.dl_._iterator is not None:
self.dl_._iterator._shutdown_workers() # type: ignore[attr-defined]
self.dl_._iterator = None
| [
"torchdata.datapipes.iter.IterableWrapper"
] | [((3588, 3874), 'torch.utils.data.DataLoader', 'DataLoader', (['datapipe'], {'num_workers': 'self.num_workers', 'pin_memory': 'self.pin_memory', 'timeout': 'self.timeout', 'worker_init_fn': 'self.worker_init_fn', 'multiprocessing_context': 'self.multiprocessing_context', 'prefetch_factor': 'self.prefetch_factor', 'persistent_workers': 'self.persistent_workers'}), '(datapipe, num_workers=self.num_workers, pin_memory=self.\n pin_memory, timeout=self.timeout, worker_init_fn=self.worker_init_fn,\n multiprocessing_context=self.multiprocessing_context, prefetch_factor=\n self.prefetch_factor, persistent_workers=self.persistent_workers)\n', (3598, 3874), False, 'from torch.utils.data import DataLoader\n'), ((3983, 4008), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['self.dl_'], {}), '(self.dl_)\n', (3998, 4008), False, 'from torchdata.datapipes.iter import IterableWrapper, IterDataPipe\n')] |
from typing import Any, Dict, List, cast
import torch
from torchdata.datapipes.iter import IterDataPipe, Mapper, CSVDictParser
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
OnlineResource,
KaggleDownloadResource,
)
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
)
from torchvision.prototype.features import Label, Image
class FER2013(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"fer2013",
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge",
categories=("angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"),
valid_options=dict(split=("train", "test")),
)
_CHECKSUMS = {
"train": "a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10",
"test": "dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3",
}
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
archive = KaggleDownloadResource(
cast(str, self.info.homepage),
file_name=f"{config.split}.csv.zip",
sha256=self._CHECKSUMS[config.split],
)
return [archive]
def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
label_id = data.get("emotion")
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self.categories) if label_id is not None else None,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.CSVDictParser"
] | [((1896, 1913), 'torchdata.datapipes.iter.CSVDictParser', 'CSVDictParser', (['dp'], {}), '(dp)\n', (1909, 1913), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, CSVDictParser\n'), ((1927, 1945), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (1941, 1945), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((1959, 1976), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (1972, 1976), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((1992, 2024), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (1998, 2024), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, CSVDictParser\n'), ((1162, 1191), 'typing.cast', 'cast', (['str', 'self.info.homepage'], {}), '(str, self.info.homepage)\n', (1166, 1191), False, 'from typing import Any, Dict, List, cast\n')] |
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
path_comparator,
hint_sharding,
hint_shuffling,
read_categories_file,
)
from torchvision.prototype.features import EncodedImage, Label
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
| [
"torchdata.datapipes.iter.Mapper"
] | [((2199, 2217), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (2213, 2217), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((2231, 2248), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (2244, 2248), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((2264, 2296), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (2270, 2296), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter\n'), ((584, 610), 'torchvision.prototype.datasets.utils._internal.read_categories_file', 'read_categories_file', (['NAME'], {}), '(NAME)\n', (604, 610), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((1295, 1449), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://openaipublic.azureedge.net/clip/data/country211.tgz"""'], {'sha256': '"""c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c"""'}), "('https://openaipublic.azureedge.net/clip/data/country211.tgz',\n sha256='c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c')\n", (1307, 1449), False, 'from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource\n'), ((2122, 2184), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""parent.parent.name"""', 'self._split_folder_name'], {}), "('parent.parent.name', self._split_folder_name)\n", (2137, 2184), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((1623, 1641), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1635, 1641), False, 'import pathlib\n'), ((1693, 1751), 'torchvision.prototype.features.Label.from_category', 'Label.from_category', (['category'], {'categories': 'self._categories'}), '(category, categories=self._categories)\n', (1712, 1751), False, 'from torchvision.prototype.features import EncodedImage, Label\n'), ((1794, 1824), 'torchvision.prototype.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (1816, 1824), False, 'from torchvision.prototype.features import EncodedImage, Label\n'), ((1927, 1948), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1939, 1948), False, 'import pathlib\n'), ((2606, 2624), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2618, 2624), False, 'import pathlib\n')] |
import tkinter as tk
import tkinter.filedialog
import tkinter.font
from functools import lru_cache
import torch
import torchvision.transforms
from PIL import ImageTk, Image
from torch.autograd import Variable
from torchdata.mpii import MPII_Joint_Names, MpiiData
from dsnt.data import MPIIDataset
from dsnt.util import draw_skeleton
@lru_cache(maxsize=32)
def generate_heatmaps(model, mpii_data: MpiiData, index):
img = mpii_data.load_image(index)
orig_width, orig_height = img.size
bb = mpii_data.get_bounding_box(index)
bb = [int(round(x)) for x in bb]
size = bb[2] - bb[0]
img = img.crop(bb)
img = img.resize((model.image_specs.size, model.image_specs.size), Image.BILINEAR)
print('Running model on: {}'.format(mpii_data.image_names[index]))
img_tensor = model.image_specs.convert(img, MPIIDataset)
img_tensor = img_tensor.unsqueeze(0).type(torch.cuda.FloatTensor)
model(Variable(img_tensor, volatile=True))
hms_tensor = model.heatmaps.data.cpu()[0]
heatmaps = []
for hm_tensor in hms_tensor.split(1, 0):
# Scale and clamp pixel values
hm_tensor.div_(hm_tensor.max()).clamp_(0, 1)
# Convert tensor to PIL Image
hm_img = torchvision.transforms.ToPILImage()(hm_tensor)
hm_img = hm_img.resize((size, size), Image.NEAREST)
# "Uncrop" heatmap to match original image size
hm_padded = Image.new('RGB', (orig_width, orig_height), (0, 0, 0))
hm_padded.paste(hm_img, (bb[0], bb[1]))
# Add heatmap to list
heatmaps.append(hm_padded)
return heatmaps
class PoseResultsFrame(tk.Frame):
SKELETON_NONE = 'None'
SKELETON_TRUTH = 'Ground truth'
SKELETON_PREDICTION = 'Prediction'
def __init__(self, mpii_data: MpiiData, subset_indices, preds, model=None):
super().__init__()
self.mpii_data = mpii_data
self.subset_indices = subset_indices
self.preds = preds
self.model = model
self.savable_image = None
self.init_gui()
@property
def cur_sample(self):
return int(self.var_cur_sample.get())
@cur_sample.setter
def cur_sample(self, value):
self.var_cur_sample.set(str(value))
@property
def crop_as_input(self):
return self.var_crop_as_input.get() == 1
@crop_as_input.setter
def crop_as_input(self, value):
self.var_crop_as_input.set(1 if value else 0)
@property
def show_heatmap(self):
return self.var_show_heatmap.get() == 1
def update_image(self):
index = self.subset_indices[self.cur_sample]
self.var_index.set('Index: {:04d}'.format(index))
if self.show_heatmap:
img = self.get_joint_heatmap()
else:
img = self.mpii_data.load_image(index)
if self.var_skeleton.get() == self.SKELETON_TRUTH:
draw_skeleton(img, self.mpii_data.keypoints[index], self.mpii_data.keypoint_masks[index])
elif self.var_skeleton.get() == self.SKELETON_PREDICTION:
draw_skeleton(img, self.preds[self.cur_sample], self.mpii_data.keypoint_masks[index])
if self.crop_as_input:
# Calculate crop used for input
bb = self.mpii_data.get_bounding_box(index)
img = img.crop(bb)
self.savable_image = img.copy()
width = self.image_panel.winfo_width()
height = self.image_panel.winfo_height() - 2
img.thumbnail((width, height), Image.ANTIALIAS)
tkimg = ImageTk.PhotoImage(img)
self.image_panel.configure(image=tkimg)
self.image_panel.image = tkimg
def on_key(self, event):
"""Handle keyboard event."""
cur_sample = self.cur_sample
if event.keysym == 'Escape':
self.master.destroy()
return
if event.keysym == 'Right':
cur_sample += 1
if event.keysym == 'Left':
cur_sample -= 1
if event.keysym == 'Home':
cur_sample = 0
if event.keysym == 'End':
cur_sample = len(self.subset_indices) - 1
self.cur_sample = cur_sample % len(self.subset_indices)
self.update_image()
def on_key_cur_sample(self, event):
if event.keysym == 'Return':
self.update_image()
self.image_panel.focus_set()
if event.keysym == 'Escape':
self.image_panel.focus_set()
def on_press_save_image(self):
if self.savable_image is None:
return
index = self.subset_indices[self.cur_sample]
filename = tk.filedialog.asksaveasfilename(
defaultextension='.png',
initialfile='image_{:04d}.png'.format(index))
if filename:
self.savable_image.save(filename)
def get_joint_heatmap(self):
joint_id = MPII_Joint_Names.index(self.var_joint.get())
index = self.subset_indices[self.cur_sample]
heatmaps = generate_heatmaps(self.model, self.mpii_data, index)
return heatmaps[joint_id].copy()
def init_gui(self):
self.master.title('Pose estimation results explorer')
toolbar = tk.Frame(self.master)
self.var_index = tk.StringVar()
lbl_index = tk.Label(toolbar, width=12, textvariable=self.var_index)
lbl_index.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
self.var_cur_sample = tk.StringVar()
self.var_cur_sample.set('0')
txt_cur_sample = tk.Spinbox(toolbar,
textvariable=self.var_cur_sample,
command=self.update_image,
wrap=True,
from_=0,
to=len(self.subset_indices) - 1,
font=tk.font.Font(size=12))
txt_cur_sample.bind('<Key>', self.on_key_cur_sample)
txt_cur_sample.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
self.txt_cur_sample = txt_cur_sample
lbl_skeleton = tk.Label(toolbar, text='Skeleton:')
lbl_skeleton.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
self.var_skeleton = tk.StringVar()
self.var_skeleton.set(self.SKELETON_PREDICTION)
opt_skeleton = tk.OptionMenu(
toolbar, self.var_skeleton, self.SKELETON_NONE, self.SKELETON_TRUTH,
self.SKELETON_PREDICTION, command=lambda event: self.update_image())
opt_skeleton.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
self.var_crop_as_input = tk.IntVar()
cb_crop = tk.Checkbutton(toolbar, text='Crop',
variable=self.var_crop_as_input,
command=self.update_image)
cb_crop.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
btn_save = tk.Button(toolbar, text='Save image',
command=self.on_press_save_image)
btn_save.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
lbl_joint = tk.Label(toolbar, text='Heatmap joint:')
lbl_joint.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
self.var_joint = tk.StringVar()
self.var_joint.set(MPII_Joint_Names[0])
opt_joint = tk.OptionMenu(
toolbar, self.var_joint, *MPII_Joint_Names,
command=lambda event: self.update_image())
opt_joint.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
self.var_show_heatmap = tk.IntVar()
cb_hm = tk.Checkbutton(toolbar, text='Show heatmap',
variable=self.var_show_heatmap,
command=self.update_image)
cb_hm.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)
if self.model is None:
cb_hm['state'] = tk.DISABLED
toolbar.pack(side=tk.TOP, fill=tk.X)
image_panel = tk.Label(self.master)
image_panel.configure(background='#333333')
image_panel.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=tk.YES)
image_panel.bind('<Key>', self.on_key)
image_panel.focus_set()
image_panel.bind('<Button-1>', lambda event: event.widget.focus_set())
image_panel.bind('<Configure>', lambda event: self.update_image())
self.image_panel = image_panel
self.pack()
def run_gui(preds, subset, model=None):
mpii_data = MpiiData('/datasets/mpii')
subset_indices = mpii_data.subset_indices(subset)
root = tk.Tk()
root.geometry("1280x720+0+0")
app = PoseResultsFrame(mpii_data, subset_indices, preds, model)
root.update()
app.update_image()
root.mainloop()
| [
"torchdata.mpii.MpiiData"
] | [((338, 359), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (347, 359), False, 'from functools import lru_cache\n'), ((8328, 8354), 'torchdata.mpii.MpiiData', 'MpiiData', (['"""/datasets/mpii"""'], {}), "('/datasets/mpii')\n", (8336, 8354), False, 'from torchdata.mpii import MPII_Joint_Names, MpiiData\n'), ((8421, 8428), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (8426, 8428), True, 'import tkinter as tk\n'), ((924, 959), 'torch.autograd.Variable', 'Variable', (['img_tensor'], {'volatile': '(True)'}), '(img_tensor, volatile=True)\n', (932, 959), False, 'from torch.autograd import Variable\n'), ((1402, 1456), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(orig_width, orig_height)', '(0, 0, 0)'], {}), "('RGB', (orig_width, orig_height), (0, 0, 0))\n", (1411, 1456), False, 'from PIL import ImageTk, Image\n'), ((3503, 3526), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['img'], {}), '(img)\n', (3521, 3526), False, 'from PIL import ImageTk, Image\n'), ((5146, 5167), 'tkinter.Frame', 'tk.Frame', (['self.master'], {}), '(self.master)\n', (5154, 5167), True, 'import tkinter as tk\n'), ((5194, 5208), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (5206, 5208), True, 'import tkinter as tk\n'), ((5229, 5285), 'tkinter.Label', 'tk.Label', (['toolbar'], {'width': '(12)', 'textvariable': 'self.var_index'}), '(toolbar, width=12, textvariable=self.var_index)\n', (5237, 5285), True, 'import tkinter as tk\n'), ((5381, 5395), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (5393, 5395), True, 'import tkinter as tk\n'), ((6035, 6070), 'tkinter.Label', 'tk.Label', (['toolbar'], {'text': '"""Skeleton:"""'}), "(toolbar, text='Skeleton:')\n", (6043, 6070), True, 'import tkinter as tk\n'), ((6166, 6180), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (6178, 6180), True, 'import tkinter as tk\n'), ((6538, 6549), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (6547, 6549), True, 'import tkinter as tk\n'), ((6568, 6668), 'tkinter.Checkbutton', 'tk.Checkbutton', (['toolbar'], {'text': '"""Crop"""', 'variable': 'self.var_crop_as_input', 'command': 'self.update_image'}), "(toolbar, text='Crop', variable=self.var_crop_as_input,\n command=self.update_image)\n", (6582, 6668), True, 'import tkinter as tk\n'), ((6813, 6884), 'tkinter.Button', 'tk.Button', (['toolbar'], {'text': '"""Save image"""', 'command': 'self.on_press_save_image'}), "(toolbar, text='Save image', command=self.on_press_save_image)\n", (6822, 6884), True, 'import tkinter as tk\n'), ((6998, 7038), 'tkinter.Label', 'tk.Label', (['toolbar'], {'text': '"""Heatmap joint:"""'}), "(toolbar, text='Heatmap joint:')\n", (7006, 7038), True, 'import tkinter as tk\n'), ((7128, 7142), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (7140, 7142), True, 'import tkinter as tk\n'), ((7434, 7445), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (7443, 7445), True, 'import tkinter as tk\n'), ((7462, 7569), 'tkinter.Checkbutton', 'tk.Checkbutton', (['toolbar'], {'text': '"""Show heatmap"""', 'variable': 'self.var_show_heatmap', 'command': 'self.update_image'}), "(toolbar, text='Show heatmap', variable=self.var_show_heatmap,\n command=self.update_image)\n", (7476, 7569), True, 'import tkinter as tk\n'), ((7833, 7854), 'tkinter.Label', 'tk.Label', (['self.master'], {}), '(self.master)\n', (7841, 7854), True, 'import tkinter as tk\n'), ((2872, 2966), 'dsnt.util.draw_skeleton', 'draw_skeleton', (['img', 'self.mpii_data.keypoints[index]', 'self.mpii_data.keypoint_masks[index]'], {}), '(img, self.mpii_data.keypoints[index], self.mpii_data.\n keypoint_masks[index])\n', (2885, 2966), False, 'from dsnt.util import draw_skeleton\n'), ((3040, 3130), 'dsnt.util.draw_skeleton', 'draw_skeleton', (['img', 'self.preds[self.cur_sample]', 'self.mpii_data.keypoint_masks[index]'], {}), '(img, self.preds[self.cur_sample], self.mpii_data.\n keypoint_masks[index])\n', (3053, 3130), False, 'from dsnt.util import draw_skeleton\n'), ((5813, 5834), 'tkinter.font.Font', 'tk.font.Font', ([], {'size': '(12)'}), '(size=12)\n', (5825, 5834), True, 'import tkinter as tk\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from torchdata.datapipes.iter import (
GDriveReader,
IterableWrapper,
)
from .utils import (
_wrap_split_argument,
_add_docstring_header,
_create_dataset_directory,
)
URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
MD5 = "fe39f8b653cada45afd5792e0f0e8f9b"
NUM_LINES = {
"train": 3600000,
"test": 400000,
}
_PATH = "amazon_review_polarity_csv.tar.gz"
_EXTRACTED_FILES = {
"train": os.path.join(_PATH, "amazon_review_polarity_csv", "train.csv"),
"test": os.path.join(_PATH, "amazon_review_polarity_csv", "test.csv"),
}
_EXTRACTED_FILES_MD5 = {
"train": "520937107c39a2d1d1f66cd410e9ed9e",
"test": "f4c8bded2ecbde5f996b675db6228f16",
}
DATASET_NAME = "AmazonReviewPolarity"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AmazonReviewPolarity(root, split):
"""Demonstrating caching, extraction and sanity check pipelines."""
# cache data on-disk
cache_dp = IterableWrapper([URL]).on_disk_cache(
GDriveReader,
op_map=lambda x: (x[0], x[1].read()),
filepath_fn=lambda x: os.path.join(root, x),
)
# do sanity check
check_cache_dp = cache_dp.check_hash({os.path.join(root, _PATH): MD5}, "md5")
# stack TAR extractor on top of loader DP
extracted_files = check_cache_dp.read_from_tar()
# filter files as necessary
filter_extracted_files = extracted_files.filter(lambda x: split in x[0])
# stack sanity checker on top of extracted files
check_filter_extracted_files = filter_extracted_files.check_hash(
{os.path.normpath(os.path.join(root, _EXTRACTED_FILES[split])): _EXTRACTED_FILES_MD5[split]},
"md5",
)
# stack CSV reader and do some mapping
return check_filter_extracted_files.parse_csv().map(lambda t: (int(t[0]), t[1]))
| [
"torchdata.datapipes.iter.IterableWrapper"
] | [((516, 578), 'os.path.join', 'os.path.join', (['_PATH', '"""amazon_review_polarity_csv"""', '"""train.csv"""'], {}), "(_PATH, 'amazon_review_polarity_csv', 'train.csv')\n", (528, 578), False, 'import os\n'), ((592, 653), 'os.path.join', 'os.path.join', (['_PATH', '"""amazon_review_polarity_csv"""', '"""test.csv"""'], {}), "(_PATH, 'amazon_review_polarity_csv', 'test.csv')\n", (604, 653), False, 'import os\n'), ((1129, 1151), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (1144, 1151), False, 'from torchdata.datapipes.iter import GDriveReader, IterableWrapper\n'), ((1359, 1384), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (1371, 1384), False, 'import os\n'), ((1265, 1286), 'os.path.join', 'os.path.join', (['root', 'x'], {}), '(root, x)\n', (1277, 1286), False, 'import os\n'), ((1759, 1802), 'os.path.join', 'os.path.join', (['root', '_EXTRACTED_FILES[split]'], {}), '(root, _EXTRACTED_FILES[split])\n', (1771, 1802), False, 'import os\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import re
from typing import Iterator, Optional, Tuple
from urllib.parse import urlparse
import requests
from requests.exceptions import HTTPError, RequestException
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
def _get_response_from_http(url: str, *, timeout: Optional[float]) -> Tuple[str, StreamWrapper]:
try:
with requests.Session() as session:
if timeout is None:
r = session.get(url, stream=True)
else:
r = session.get(url, timeout=timeout, stream=True)
return url, StreamWrapper(r.raw)
except HTTPError as e:
raise Exception(f"Could not get the file. [HTTP Error] {e.response}.")
except RequestException as e:
raise Exception(f"Could not get the file at {url}. [RequestException] {e.response}.")
except Exception:
raise
class HTTPReaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Takes file URLs (HTTP URLs pointing to files), and yields tuples of file URL and IO stream.
Args:
source_datapipe: a DataPipe that contains URLs
timeout: timeout in seconds for HTTP request
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, HttpReader
>>> file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
>>> http_reader_dp = HttpReader(IterableWrapper([file_url]))
>>> reader_dp = http_reader_dp.readlines()
>>> it = iter(reader_dp)
>>> path, line = next(it)
>>> path
https://raw.githubusercontent.com/pytorch/data/main/LICENSE
>>> line
b'BSD 3-Clause License'
"""
def __init__(self, source_datapipe: IterDataPipe[str], timeout: Optional[float] = None) -> None:
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.timeout = timeout
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
yield _get_response_from_http(url, timeout=self.timeout)
def __len__(self) -> int:
return len(self.source_datapipe)
def _get_response_from_google_drive(url: str, *, timeout: Optional[float]) -> Tuple[str, StreamWrapper]:
confirm_token = None
with requests.Session() as session:
if timeout is None:
response = session.get(url, stream=True)
else:
response = session.get(url, timeout=timeout, stream=True)
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
if confirm_token is None:
if "Quota exceeded" in str(response.content):
raise RuntimeError(f"Google drive link {url} is currently unavailable, because the quota was exceeded.")
if confirm_token:
url = url + "&confirm=" + confirm_token
if timeout is None:
response = session.get(url, stream=True)
else:
response = session.get(url, timeout=timeout, stream=True)
if "content-disposition" not in response.headers:
raise RuntimeError("Internal error: headers don't contain content-disposition.")
filename = re.findall('filename="(.+)"', response.headers["content-disposition"])
if filename is None:
raise RuntimeError("Filename could not be autodetected")
return filename[0], StreamWrapper(response.raw)
class GDriveReaderDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Takes URLs pointing at GDrive files, and yields tuples of file name and IO stream.
Args:
source_datapipe: a DataPipe that contains URLs to GDrive files
timeout: timeout in seconds for HTTP request
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, GDriveReader
>>> gdrive_file_url = "https://drive.google.com/uc?export=download&id=SomeIDToAGDriveFile"
>>> gdrive_reader_dp = GDriveReader(IterableWrapper([gdrive_file_url]))
>>> reader_dp = gdrive_reader_dp.readlines()
>>> it = iter(reader_dp)
>>> path, line = next(it)
>>> path
https://drive.google.com/uc?export=download&id=SomeIDToAGDriveFile
>>> line
<First line from the GDrive File>
"""
source_datapipe: IterDataPipe[str]
def __init__(self, source_datapipe: IterDataPipe[str], *, timeout: Optional[float] = None) -> None:
self.source_datapipe = source_datapipe
self.timeout = timeout
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
yield _get_response_from_google_drive(url, timeout=self.timeout)
def __len__(self) -> int:
return len(self.source_datapipe)
class OnlineReaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Takes file URLs (can be HTTP URLs pointing to files or URLs to GDrive files), and
yields tuples of file URL and IO stream.
Args:
source_datapipe: a DataPipe that contains URLs
timeout: timeout in seconds for HTTP request
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, OnlineReader
>>> file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
>>> online_reader_dp = OnlineReader(IterableWrapper([file_url]))
>>> reader_dp = online_reader_dp.readlines()
>>> it = iter(reader_dp)
>>> path, line = next(it)
>>> path
https://raw.githubusercontent.com/pytorch/data/main/LICENSE
>>> line
b'BSD 3-Clause License'
"""
source_datapipe: IterDataPipe[str]
def __init__(self, source_datapipe: IterDataPipe[str], *, timeout: Optional[float] = None) -> None:
self.source_datapipe = source_datapipe
self.timeout = timeout
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
parts = urlparse(url)
if re.match(r"(drive|docs)[.]google[.]com", parts.netloc):
yield _get_response_from_google_drive(url, timeout=self.timeout)
else:
yield _get_response_from_http(url, timeout=self.timeout)
def __len__(self) -> int:
return len(self.source_datapipe)
| [
"torchdata.datapipes.utils.StreamWrapper"
] | [((2338, 2356), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2354, 2356), False, 'import requests\n'), ((3293, 3363), 're.findall', 're.findall', (['"""filename="(.+)\\""""', "response.headers['content-disposition']"], {}), '(\'filename="(.+)"\', response.headers[\'content-disposition\'])\n', (3303, 3363), False, 'import re\n'), ((3486, 3513), 'torchdata.datapipes.utils.StreamWrapper', 'StreamWrapper', (['response.raw'], {}), '(response.raw)\n', (3499, 3513), False, 'from torchdata.datapipes.utils import StreamWrapper\n'), ((441, 459), 'requests.Session', 'requests.Session', ([], {}), '()\n', (457, 459), False, 'import requests\n'), ((659, 679), 'torchdata.datapipes.utils.StreamWrapper', 'StreamWrapper', (['r.raw'], {}), '(r.raw)\n', (672, 679), False, 'from torchdata.datapipes.utils import StreamWrapper\n'), ((6037, 6050), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (6045, 6050), False, 'from urllib.parse import urlparse\n'), ((6067, 6120), 're.match', 're.match', (['"""(drive|docs)[.]google[.]com"""', 'parts.netloc'], {}), "('(drive|docs)[.]google[.]com', parts.netloc)\n", (6075, 6120), False, 'import re\n')] |
import pathlib
from typing import Any, Dict, List, Tuple, Union
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
CSVParser,
)
from torchvision.prototype.datasets.utils import (
Dataset,
HttpResource,
OnlineResource,
)
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Image, OneHotLabel
from .._api import register_dataset, register_info
NAME = "semeion"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(i) for i in range(10)])
@register_dataset(NAME)
class SEMEION(Dataset):
"""Semeion dataset
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _prepare_sample(self, data: Tuple[str, ...]) -> Dict[str, Any]:
image_data, label_data = data[:256], data[256:-1]
return dict(
image=Image(torch.tensor([float(pixel) for pixel in image_data], dtype=torch.float).reshape(16, 16)),
label=OneHotLabel([int(label) for label in label_data], categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 1_593
| [
"torchdata.datapipes.iter.CSVParser",
"torchdata.datapipes.iter.Mapper"
] | [((1056, 1239), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"""'], {'sha256': '"""f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1"""'}), "(\n 'http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data'\n , sha256='f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1'\n )\n", (1068, 1239), False, 'from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource\n'), ((1791, 1819), 'torchdata.datapipes.iter.CSVParser', 'CSVParser', (['dp'], {'delimiter': '""" """'}), "(dp, delimiter=' ')\n", (1800, 1819), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, CSVParser\n'), ((1833, 1851), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (1847, 1851), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((1865, 1882), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (1878, 1882), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((1898, 1930), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (1904, 1930), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, CSVParser\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import io
import expecttest
import os
import unittest
import warnings
from torchdata.datapipes.iter import (
HttpReader,
IterableWrapper,
)
from _utils._common_utils_for_test import (
create_temp_dir,
)
class TestDataPipeRemoteIO(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
def tearDown(self):
try:
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestDataPipeRemoteIO was not able to cleanup temp dir due to {e}")
def test_http_reader_iterdatapipe(self):
file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
expected_file_name = "LICENSE"
expected_MD5_hash = "4aabe940637d4389eca42ac1a0e874ec"
http_reader_dp = HttpReader(IterableWrapper([file_url]))
# Functional Test: test if the Http Reader can download and read properly
reader_dp = http_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(b"BSD" in line)
# Reset Test: http_reader_dp has been read, but we reset when calling check_hash()
check_cache_dp = http_reader_dp.check_hash({file_url: expected_MD5_hash}, "md5", rewind=False)
it = iter(check_cache_dp)
path, stream = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(io.BufferedReader, type(stream))
# __len__ Test: returns the length of source DataPipe
source_dp = IterableWrapper([file_url])
http_dp = HttpReader(source_dp)
self.assertEqual(1, len(http_dp))
if __name__ == "__main__":
unittest.main()
| [
"torchdata.datapipes.iter.HttpReader",
"torchdata.datapipes.iter.IterableWrapper"
] | [((1788, 1803), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1801, 1803), False, 'import unittest\n'), ((364, 381), '_utils._common_utils_for_test.create_temp_dir', 'create_temp_dir', ([], {}), '()\n', (379, 381), False, 'from _utils._common_utils_for_test import create_temp_dir\n'), ((1645, 1672), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[file_url]'], {}), '([file_url])\n', (1660, 1672), False, 'from torchdata.datapipes.iter import HttpReader, IterableWrapper\n'), ((1691, 1712), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['source_dp'], {}), '(source_dp)\n', (1701, 1712), False, 'from torchdata.datapipes.iter import HttpReader, IterableWrapper\n'), ((848, 875), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[file_url]'], {}), '([file_url])\n', (863, 875), False, 'from torchdata.datapipes.iter import HttpReader, IterableWrapper\n'), ((1111, 1133), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1127, 1133), False, 'import os\n'), ((1481, 1503), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1497, 1503), False, 'import os\n'), ((499, 586), 'warnings.warn', 'warnings.warn', (['f"""TestDataPipeRemoteIO was not able to cleanup temp dir due to {e}"""'], {}), "(\n f'TestDataPipeRemoteIO was not able to cleanup temp dir due to {e}')\n", (512, 586), False, 'import warnings\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import io
import itertools
import unittest
import warnings
from collections import defaultdict
from typing import Dict
import expecttest
import torch.utils.data.datapipes.iter
import torchdata
from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls
from torch.utils.data.datapipes.map import SequenceWrapper
from torchdata.datapipes.iter import (
BucketBatcher,
Cycler,
Header,
IndexAdder,
InMemoryCacheHolder,
IterableWrapper,
IterDataPipe,
IterKeyZipper,
LineReader,
MapKeyZipper,
ParagraphAggregator,
Rows2Columnar,
SampleMultiplexer,
)
def test_torchdata_pytorch_consistency() -> None:
def extract_datapipe_names(module):
return {
name
for name, dp_type in module.__dict__.items()
if not name.startswith("_") and isinstance(dp_type, type) and issubclass(dp_type, IterDataPipe)
}
pytorch_datapipes = extract_datapipe_names(torch.utils.data.datapipes.iter)
torchdata_datapipes = extract_datapipe_names(torchdata.datapipes.iter)
missing_datapipes = pytorch_datapipes - torchdata_datapipes
deprecated_datapipes = {"FileLoader"}
for dp in deprecated_datapipes:
if dp in missing_datapipes:
missing_datapipes.remove("FileLoader")
if any(missing_datapipes):
msg = (
"The following datapipes are exposed under `torch.utils.data.datapipes.iter`, "
"but not under `torchdata.datapipes.iter`:\n"
)
raise AssertionError(msg + "\n".join(sorted(missing_datapipes)))
class TestDataPipe(expecttest.TestCase):
def test_in_memory_cache_holder_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(10))
cache_dp = source_dp.in_memory_cache(size=5)
# Functional Test: Cache DP should just return the data without changing the values
res1 = list(cache_dp)
self.assertEqual(list(range(10)), res1)
# Functional Test: Ensure the objects are the same ones from source DataPipe
res1 = list(cache_dp)
res2 = list(cache_dp)
self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res1))
self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res2))
# TODO(122): Figure out a way to consistently test caching when size is in megabytes
# Reset Test: reset the DataPipe after reading part of it
cache_dp = InMemoryCacheHolder(source_dp, size=5)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(cache_dp, n_elements_before_reset)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(list(range(10)), res_after_reset)
# __len__ Test: inherits length from source_dp
self.assertEqual(10, len(cache_dp))
# __len__ Test: source_dp has no len and cache is not yet loaded
source_dp_no_len = IDP_NoLen(range(10))
cache_dp = InMemoryCacheHolder(source_dp_no_len, size=5)
with self.assertRaisesRegex(TypeError, "doesn't have valid length until the cache is loaded"):
len(cache_dp)
# __len__ Test: source_dp has no len but we still can calculate after cache is loaded
list(cache_dp)
self.assertEqual(10, len(cache_dp))
def test_iter_key_zipper_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(10))
ref_dp = IterableWrapper(range(20))
# Functional Test: Output should be a zip list of tuple
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100
)
self.assertEqual([(i, i) for i in range(10)], list(zip_dp))
# Functional Test: keep_key=True, and key should show up as the first element
zip_dp_w_key = source_dp.zip_with_iter(
ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=True, buffer_size=10
)
self.assertEqual([(i, (i, i)) for i in range(10)], list(zip_dp_w_key))
# Functional Test: using a different merge function
def merge_to_string(item1, item2):
return f"{item1},{item2}"
zip_dp_w_str_merge = source_dp.zip_with_iter(
ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, buffer_size=10, merge_fn=merge_to_string
)
self.assertEqual([f"{i},{i}" for i in range(10)], list(zip_dp_w_str_merge))
# Functional Test: using a different merge function and keep_key=True
zip_dp_w_key_str_merge = source_dp.zip_with_iter(
ref_datapipe=ref_dp,
key_fn=lambda x: x,
ref_key_fn=lambda x: x,
keep_key=True,
buffer_size=10,
merge_fn=merge_to_string,
)
self.assertEqual([(i, f"{i},{i}") for i in range(10)], list(zip_dp_w_key_str_merge))
# Functional Test: testing nested zipping
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100
)
# Without a custom merge function, there will be nested tuples
zip_dp2 = zip_dp.zip_with_iter(
ref_datapipe=ref_dp, key_fn=lambda x: x[0], ref_key_fn=lambda x: x, keep_key=False, buffer_size=100
)
self.assertEqual([((i, i), i) for i in range(10)], list(zip_dp2))
# With a custom merge function, nesting can be prevented
zip_dp2_w_merge = zip_dp.zip_with_iter(
ref_datapipe=ref_dp,
key_fn=lambda x: x[0],
ref_key_fn=lambda x: x,
keep_key=False,
buffer_size=100,
merge_fn=lambda x, y: list(x) + [y],
)
self.assertEqual([[i, i, i] for i in range(10)], list(zip_dp2_w_merge))
# Functional Test: element is in source but missing in reference
ref_dp_missing = IterableWrapper(range(1, 10))
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp_missing, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100
)
with self.assertRaisesRegex(BufferError, r"No matching key can be found"):
list(zip_dp)
# Functional Test: Buffer is not large enough, hence, element can't be found and raises error
ref_dp_end = IterableWrapper(list(range(1, 10)) + [0])
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp_end, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=5
)
it = iter(zip_dp)
with warnings.catch_warnings(record=True) as wa:
# In order to find '0' at the end, the buffer is filled, hence the warning
# and ref_dp is fully traversed
self.assertEqual(
(
0,
0,
),
next(it),
)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Buffer reaches the upper limit")
with self.assertRaisesRegex(BufferError, r"No matching key can be found"):
# '1' cannot be find because the value was thrown out when buffer was filled
next(it)
# Functional Test: Buffer is just big enough
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp_end, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=10
)
self.assertEqual([(i, i) for i in range(10)], list(zip_dp))
# Reset Test: reset the DataPipe after reading part of it
zip_dp = IterKeyZipper(
source_datapipe=source_dp,
ref_datapipe=ref_dp,
key_fn=lambda x: x,
ref_key_fn=lambda x: x,
keep_key=False,
buffer_size=10,
)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(zip_dp, n_elements_before_reset)
self.assertEqual([(i, i) for i in range(5)], res_before_reset)
self.assertEqual([(i, i) for i in range(10)], res_after_reset)
# __len__ Test: inherits length from source_dp
self.assertEqual(10, len(zip_dp))
def test_map_key_zipper_datapipe(self) -> None:
source_dp = IterableWrapper(range(10))
map_dp = SequenceWrapper(["even", "odd"])
# Functional Test: ensure the hash join is working and return tuple by default
def odd_even(i: int) -> int:
return i % 2
result_dp = source_dp.zip_with_map(map_dp, odd_even)
def odd_even_string(i: int) -> str:
return "odd" if i % 2 else "even"
expected_res = [(i, odd_even_string(i)) for i in range(10)]
self.assertEqual(expected_res, list(result_dp))
# Functional Test: ensure that a custom merge function works
def custom_merge(a, b):
return f"{a} is a {b} number."
result_dp = source_dp.zip_with_map(map_dp, odd_even, custom_merge)
expected_res2 = [f"{i} is a {odd_even_string(i)} number." for i in range(10)]
self.assertEqual(expected_res2, list(result_dp))
# Functional Test: raises error when key is invalid
def odd_even_bug(i: int) -> int:
return 2 if i == 0 else i % 2
result_dp = MapKeyZipper(source_dp, map_dp, odd_even_bug)
it = iter(result_dp)
with self.assertRaisesRegex(KeyError, "is not a valid key in the given MapDataPipe"):
next(it)
# Reset Test:
n_elements_before_reset = 4
result_dp = source_dp.zip_with_map(map_dp, odd_even)
res_before_reset, res_after_reset = reset_after_n_next_calls(result_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: returns the length of source DataPipe
result_dp = source_dp.zip_with_map(map_dp, odd_even)
self.assertEqual(len(source_dp), len(result_dp))
def test_cycler_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(5))
# Functional Test: cycle for finite number of times and ends
cycler_dp = source_dp.cycle(3)
self.assertEqual(list(range(5)) * 3, list(cycler_dp))
# Functional Test: cycle for indefinitely
cycler_dp = source_dp.cycle()
it = iter(cycler_dp)
for expected_val in list(range(5)) * 10:
self.assertEqual(expected_val, next(it))
# Functional Test: zero is allowed but immediately triggers StopIteration
cycler_dp = source_dp.cycle(0)
self.assertEqual([], list(cycler_dp))
# Functional Test: negative value is not allowed
with self.assertRaisesRegex(ValueError, "Expected non-negative count"):
source_dp.cycle(-1)
# Reset Test:
cycler_dp = Cycler(source_dp, count=2)
n_elements_before_reset = 4
res_before_reset, res_after_reset = reset_after_n_next_calls(cycler_dp, n_elements_before_reset)
self.assertEqual(list(range(4)), res_before_reset)
self.assertEqual(list(range(5)) * 2, res_after_reset)
# __len__ Test: returns length when count is not None
self.assertEqual(10, len(cycler_dp))
# __len__ Test: inherits length from source_dp
cycler_dp = Cycler(source_dp)
with self.assertRaisesRegex(TypeError, "instance cycles forever, and therefore doesn't have valid length"):
len(cycler_dp)
def test_header_iterdatapipe(self) -> None:
# Functional Test: ensure the limit is enforced
source_dp = IterableWrapper(range(20))
header_dp = source_dp.header(5)
self.assertEqual(list(range(5)), list(header_dp))
# Functional Test: ensure it works when the source has less elements than the limit
source_dp = IterableWrapper(range(5))
header_dp = source_dp.header(100)
self.assertEqual(list(range(5)), list(header_dp))
# Reset Test:
source_dp = IterableWrapper(range(20))
header_dp = Header(source_dp, 5)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(header_dp, n_elements_before_reset)
self.assertEqual(list(range(2)), res_before_reset)
self.assertEqual(list(range(5)), res_after_reset)
self.assertEqual(list(range(5)), list(header_dp))
# __len__ Test: returns the limit when it is less than the length of source
self.assertEqual(5, len(header_dp))
# __len__ Test: returns the length of source when it is less than the limit
header_dp = source_dp.header(30)
self.assertEqual(20, len(header_dp))
# __len__ Test: returns limit if source doesn't have length
source_dp_NoLen = IDP_NoLen(list(range(20)))
header_dp = source_dp_NoLen.header(30)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(30, len(header_dp))
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"length of this HeaderIterDataPipe is inferred to be equal to its limit"
)
# __len__ Test: returns limit if source doesn't have length, but it has been iterated through once
for _ in header_dp:
pass
self.assertEqual(20, len(header_dp))
def test_enumerator_iterdatapipe(self) -> None:
letters = "abcde"
source_dp = IterableWrapper(letters)
enum_dp = source_dp.enumerate()
# Functional Test: ensure that the correct index value is added to each element (tuple)
self.assertEqual([(0, "a"), (1, "b"), (2, "c"), (3, "d"), (4, "e")], list(enum_dp))
# Functional Test: start index from non-zero
enum_dp = source_dp.enumerate(starting_index=10)
self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], list(enum_dp))
# Reset Test:
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(enum_dp, n_elements_before_reset)
self.assertEqual([(10, "a"), (11, "b")], res_before_reset)
self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], res_after_reset)
# __len__ Test: returns length of source DataPipe
self.assertEqual(5, len(enum_dp))
def test_index_adder_iterdatapipe(self) -> None:
letters = "abcdefg"
source_dp = IterableWrapper([{i: i} for i in letters])
index_adder_dp = source_dp.add_index()
it = iter(index_adder_dp)
def dict_content_test_helper(iterator):
for i, curr_dict in enumerate(iterator):
self.assertEqual(i, curr_dict["index"])
self.assertTrue(letters[i] in curr_dict)
# Functional Test: ensure that the correct index value is added to each element (dict)
dict_content_test_helper(it)
# Functional Test: raises error when the elements of source_dp is not of type Dict
source_dp = IterableWrapper(range(10))
index_adder_dp = source_dp.add_index()
it = iter(index_adder_dp)
with self.assertRaisesRegex(NotImplementedError, "We only support adding index to row or batch in dict type"):
next(it)
# Reset Test
source_dp = IterableWrapper([{i: i} for i in "abcdefg"])
index_adder_dp = IndexAdder(source_dp)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(index_adder_dp, n_elements_before_reset)
dict_content_test_helper(iter(res_before_reset))
dict_content_test_helper(iter(res_after_reset))
# __len__ Test: returns length of source DataPipe
self.assertEqual(7, len(index_adder_dp))
def test_line_reader_iterdatapipe(self) -> None:
text1 = "Line1\nLine2"
text2 = "Line2,1\r\nLine2,2\r\nLine2,3"
# Functional Test: read lines correctly
source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))])
line_reader_dp = source_dp.readlines()
expected_result = [("file1", line) for line in text1.splitlines()] + [
("file2", line) for line in text2.splitlines()
]
self.assertEqual(expected_result, list(line_reader_dp))
# Functional Test: strip new lines for bytes
source_dp = IterableWrapper(
[("file1", io.BytesIO(text1.encode("utf-8"))), ("file2", io.BytesIO(text2.encode("utf-8")))]
)
line_reader_dp = source_dp.readlines()
expected_result_bytes = [("file1", line.encode("utf-8")) for line in text1.splitlines()] + [
("file2", line.encode("utf-8")) for line in text2.splitlines()
]
self.assertEqual(expected_result_bytes, list(line_reader_dp))
# Functional Test: do not strip new lines
source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))])
line_reader_dp = source_dp.readlines(strip_newline=False)
expected_result = [
("file1", "Line1\n"),
("file1", "Line2"),
("file2", "Line2,1\r\n"),
("file2", "Line2,2\r\n"),
("file2", "Line2,3"),
]
self.assertEqual(expected_result, list(line_reader_dp))
# Reset Test:
source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))])
line_reader_dp = LineReader(source_dp, strip_newline=False)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(line_reader_dp, n_elements_before_reset)
self.assertEqual(expected_result[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_result, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(line_reader_dp)
def test_paragraph_aggregator_iterdatapipe(self) -> None:
# Functional Test: aggregate lines correctly
source_dp = IterableWrapper(
[("file1", "Line1"), ("file1", "Line2"), ("file2", "Line2,1"), ("file2", "Line2,2"), ("file2", "Line2,3")]
)
para_agg_dp = source_dp.lines_to_paragraphs()
self.assertEqual([("file1", "Line1\nLine2"), ("file2", "Line2,1\nLine2,2\nLine2,3")], list(para_agg_dp))
# Functional Test: aggregate lines correctly with different joiner
para_agg_dp = source_dp.lines_to_paragraphs(joiner=lambda ls: " ".join(ls))
self.assertEqual([("file1", "Line1 Line2"), ("file2", "Line2,1 Line2,2 Line2,3")], list(para_agg_dp))
# Reset Test: each yield is for a single file
para_agg_dp = ParagraphAggregator(source_dp)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(para_agg_dp, n_elements_before_reset)
self.assertEqual([("file1", "Line1\nLine2")], res_before_reset)
self.assertEqual([("file1", "Line1\nLine2"), ("file2", "Line2,1\nLine2,2\nLine2,3")], res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(para_agg_dp)
def test_rows_to_columnar_iterdatapipe(self) -> None:
# Functional Test: working with DataPipe with dict
column_names_dict = {"a", "b", "c"}
source_dp = IterableWrapper(
[
[{l: i for i, l in enumerate("abc")}, {l: i * 10 for i, l in enumerate("abc")}],
[{l: i + 100 for i, l in enumerate("abc")}, {l: (i + 100) * 10 for i, l in enumerate("abc")}],
]
)
result_dp = source_dp.rows2columnar(column_names_dict)
batch1 = defaultdict(list, {"a": [0, 0], "b": [1, 10], "c": [2, 20]})
batch2 = defaultdict(list, {"a": [100, 1000], "b": [101, 1010], "c": [102, 1020]})
expected_output = [batch1, batch2]
self.assertEqual(expected_output, list(result_dp))
# Functional Test: working with DataPipe with list
column_names_list = ["a", "b", "c"]
source_dp = IterableWrapper(
[
[[i for i, _ in enumerate("abc")], [i * 10 for i, _ in enumerate("abc")]],
[[i + 100 for i, _ in enumerate("abc")], [(i + 100) * 10 for i, _ in enumerate("abc")]],
]
)
result_dp = source_dp.rows2columnar(column_names_list)
self.assertEqual(expected_output, list(result_dp))
# Reset Test:
result_dp = Rows2Columnar(source_dp, column_names_list)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(result_dp, n_elements_before_reset)
self.assertEqual([expected_output[0]], res_before_reset)
self.assertEqual(expected_output, res_after_reset)
# __len__ Test: returns length of source DataPipe
self.assertEqual(2, len(result_dp))
def test_sample_multiplexer_iterdatapipe(self) -> None:
# Functional Test: yields all values from the sources
source_dp1 = IterableWrapper([0] * 10)
source_dp2 = IterableWrapper([1] * 10)
d: Dict[IterDataPipe, float] = {source_dp1: 99999999, source_dp2: 0.0000001}
sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
result = list(sample_mul_dp)
self.assertEqual([0] * 10 + [1] * 10, result)
# Functional Test: raises error for empty dict
with self.assertRaisesRegex(ValueError, "Empty dictionary"):
SampleMultiplexer(pipes_to_weights_dict={}, seed=0) # type: ignore[arg-type]
# Functional Test: raises error for negative or zero weight
d = {source_dp1: 99999999, source_dp2: 0}
with self.assertRaisesRegex(ValueError, "Expecting a positive and non-zero weight"):
SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
# Reset Test
d = {source_dp1: 99999999, source_dp2: 0.0000001}
sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(sample_mul_dp, n_elements_before_reset)
self.assertEqual([0] * n_elements_before_reset, res_before_reset)
self.assertEqual([0] * 10 + [1] * 10, res_after_reset)
# __len__ Test: returns the sum of the lengths of the sources
self.assertEqual(20, len(sample_mul_dp))
def test_bucket_batcher_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(10))
# Functional Test: drop last reduces length
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=True, batch_num=100, bucket_num=1, in_batch_shuffle=True
)
self.assertEqual(9, len(list(batch_dp.unbatch())))
# Functional Test: drop last is False preserves length
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=False, batch_num=100, bucket_num=1, in_batch_shuffle=False
)
self.assertEqual(10, len(list(batch_dp.unbatch())))
def _return_self(x):
return x
# Functional Test: using sort_key, with in_batch_shuffle
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=True, batch_num=100, bucket_num=1, in_batch_shuffle=True, sort_key=_return_self
)
# bucket_num = 1 means there will be no shuffling if a sort key is given
self.assertEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]], list(batch_dp))
self.assertEqual(9, len(list(batch_dp.unbatch())))
# Functional Test: using sort_key, without in_batch_shuffle
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=True, batch_num=100, bucket_num=2, in_batch_shuffle=False, sort_key=_return_self
)
self.assertEqual(9, len(list(batch_dp.unbatch())))
# Reset Test:
batch_dp = BucketBatcher(
source_dp,
batch_size=3,
drop_last=True,
batch_num=100,
bucket_num=2,
in_batch_shuffle=False,
sort_key=_return_self,
)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset)
self.assertEqual(n_elements_before_reset, len(res_before_reset))
self.assertEqual(6, len([item for batch in res_before_reset for item in batch]))
self.assertEqual(3, len(res_after_reset))
self.assertEqual(9, len([item for batch in res_after_reset for item in batch]))
# __len__ Test: returns the number of batches
with self.assertRaises(TypeError):
len(batch_dp)
def test_flatmap_datapipe(self):
source_dp = IterableWrapper(list(range(20)))
def fn(e):
return [e, e * 10]
flatmapped_dp = source_dp.flatmap(fn)
expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp]))
flatmapped_dp_list = list(flatmapped_dp)
self.assertEqual(expected_list, flatmapped_dp_list)
# Reset Test: reset the DataPipe after reading part of it
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(flatmapped_dp, n_elements_before_reset)
self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_list, res_after_reset)
# __len__ Test: length should be len(source_dp)*len(fn->out_shape) which we can't know
with self.assertRaisesRegex(TypeError, "length relies on the output of its function."):
len(flatmapped_dp)
if __name__ == "__main__":
unittest.main()
| [
"torchdata.datapipes.iter.Rows2Columnar",
"torchdata.datapipes.iter.Cycler",
"torchdata.datapipes.iter.ParagraphAggregator",
"torchdata.datapipes.iter.MapKeyZipper",
"torchdata.datapipes.iter.IndexAdder",
"torchdata.datapipes.iter.LineReader",
"torchdata.datapipes.iter.Header",
"torchdata.datapipes.it... | [((26031, 26046), 'unittest.main', 'unittest.main', ([], {}), '()\n', (26044, 26046), False, 'import unittest\n'), ((2523, 2561), 'torchdata.datapipes.iter.InMemoryCacheHolder', 'InMemoryCacheHolder', (['source_dp'], {'size': '(5)'}), '(source_dp, size=5)\n', (2542, 2561), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((2642, 2701), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['cache_dp', 'n_elements_before_reset'], {}), '(cache_dp, n_elements_before_reset)\n', (2666, 2701), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((3061, 3106), 'torchdata.datapipes.iter.InMemoryCacheHolder', 'InMemoryCacheHolder', (['source_dp_no_len'], {'size': '(5)'}), '(source_dp_no_len, size=5)\n', (3080, 3106), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((7740, 7881), 'torchdata.datapipes.iter.IterKeyZipper', 'IterKeyZipper', ([], {'source_datapipe': 'source_dp', 'ref_datapipe': 'ref_dp', 'key_fn': '(lambda x: x)', 'ref_key_fn': '(lambda x: x)', 'keep_key': '(False)', 'buffer_size': '(10)'}), '(source_datapipe=source_dp, ref_datapipe=ref_dp, key_fn=lambda\n x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=10)\n', (7753, 7881), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((8041, 8098), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['zip_dp', 'n_elements_before_reset'], {}), '(zip_dp, n_elements_before_reset)\n', (8065, 8098), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((8456, 8488), 'torch.utils.data.datapipes.map.SequenceWrapper', 'SequenceWrapper', (["['even', 'odd']"], {}), "(['even', 'odd'])\n", (8471, 8488), False, 'from torch.utils.data.datapipes.map import SequenceWrapper\n'), ((9446, 9491), 'torchdata.datapipes.iter.MapKeyZipper', 'MapKeyZipper', (['source_dp', 'map_dp', 'odd_even_bug'], {}), '(source_dp, map_dp, odd_even_bug)\n', (9458, 9491), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((9800, 9860), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['result_dp', 'n_elements_before_reset'], {}), '(result_dp, n_elements_before_reset)\n', (9824, 9860), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((11048, 11074), 'torchdata.datapipes.iter.Cycler', 'Cycler', (['source_dp'], {'count': '(2)'}), '(source_dp, count=2)\n', (11054, 11074), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((11155, 11215), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['cycler_dp', 'n_elements_before_reset'], {}), '(cycler_dp, n_elements_before_reset)\n', (11179, 11215), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((11521, 11538), 'torchdata.datapipes.iter.Cycler', 'Cycler', (['source_dp'], {}), '(source_dp)\n', (11527, 11538), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((12261, 12281), 'torchdata.datapipes.iter.Header', 'Header', (['source_dp', '(5)'], {}), '(source_dp, 5)\n', (12267, 12281), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((12362, 12422), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['header_dp', 'n_elements_before_reset'], {}), '(header_dp, n_elements_before_reset)\n', (12386, 12422), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((13665, 13689), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['letters'], {}), '(letters)\n', (13680, 13689), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((14230, 14288), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['enum_dp', 'n_elements_before_reset'], {}), '(enum_dp, n_elements_before_reset)\n', (14254, 14288), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((14658, 14700), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[{i: i} for i in letters]'], {}), '([{i: i} for i in letters])\n', (14673, 14700), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((15532, 15576), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[{i: i} for i in 'abcdefg']"], {}), "([{i: i} for i in 'abcdefg'])\n", (15547, 15576), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((15602, 15623), 'torchdata.datapipes.iter.IndexAdder', 'IndexAdder', (['source_dp'], {}), '(source_dp)\n', (15612, 15623), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((15704, 15769), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['index_adder_dp', 'n_elements_before_reset'], {}), '(index_adder_dp, n_elements_before_reset)\n', (15728, 15769), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((17684, 17726), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['source_dp'], {'strip_newline': '(False)'}), '(source_dp, strip_newline=False)\n', (17694, 17726), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((17807, 17872), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['line_reader_dp', 'n_elements_before_reset'], {}), '(line_reader_dp, n_elements_before_reset)\n', (17831, 17872), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((18337, 18464), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[('file1', 'Line1'), ('file1', 'Line2'), ('file2', 'Line2,1'), ('file2',\n 'Line2,2'), ('file2', 'Line2,3')]"], {}), "([('file1', 'Line1'), ('file1', 'Line2'), ('file2',\n 'Line2,1'), ('file2', 'Line2,2'), ('file2', 'Line2,3')])\n", (18352, 18464), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((18997, 19027), 'torchdata.datapipes.iter.ParagraphAggregator', 'ParagraphAggregator', (['source_dp'], {}), '(source_dp)\n', (19016, 19027), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((19108, 19170), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['para_agg_dp', 'n_elements_before_reset'], {}), '(para_agg_dp, n_elements_before_reset)\n', (19132, 19170), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((20059, 20119), 'collections.defaultdict', 'defaultdict', (['list', "{'a': [0, 0], 'b': [1, 10], 'c': [2, 20]}"], {}), "(list, {'a': [0, 0], 'b': [1, 10], 'c': [2, 20]})\n", (20070, 20119), False, 'from collections import defaultdict\n'), ((20137, 20210), 'collections.defaultdict', 'defaultdict', (['list', "{'a': [100, 1000], 'b': [101, 1010], 'c': [102, 1020]}"], {}), "(list, {'a': [100, 1000], 'b': [101, 1010], 'c': [102, 1020]})\n", (20148, 20210), False, 'from collections import defaultdict\n'), ((20853, 20896), 'torchdata.datapipes.iter.Rows2Columnar', 'Rows2Columnar', (['source_dp', 'column_names_list'], {}), '(source_dp, column_names_list)\n', (20866, 20896), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((20977, 21037), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['result_dp', 'n_elements_before_reset'], {}), '(result_dp, n_elements_before_reset)\n', (21001, 21037), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((21409, 21434), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['([0] * 10)'], {}), '([0] * 10)\n', (21424, 21434), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((21456, 21481), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['([1] * 10)'], {}), '([1] * 10)\n', (21471, 21481), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((21591, 21641), 'torchdata.datapipes.iter.SampleMultiplexer', 'SampleMultiplexer', ([], {'pipes_to_weights_dict': 'd', 'seed': '(0)'}), '(pipes_to_weights_dict=d, seed=0)\n', (21608, 21641), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((22327, 22377), 'torchdata.datapipes.iter.SampleMultiplexer', 'SampleMultiplexer', ([], {'pipes_to_weights_dict': 'd', 'seed': '(0)'}), '(pipes_to_weights_dict=d, seed=0)\n', (22344, 22377), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((22458, 22522), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['sample_mul_dp', 'n_elements_before_reset'], {}), '(sample_mul_dp, n_elements_before_reset)\n', (22482, 22522), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((24252, 24386), 'torchdata.datapipes.iter.BucketBatcher', 'BucketBatcher', (['source_dp'], {'batch_size': '(3)', 'drop_last': '(True)', 'batch_num': '(100)', 'bucket_num': '(2)', 'in_batch_shuffle': '(False)', 'sort_key': '_return_self'}), '(source_dp, batch_size=3, drop_last=True, batch_num=100,\n bucket_num=2, in_batch_shuffle=False, sort_key=_return_self)\n', (24265, 24386), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((24558, 24617), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['batch_dp', 'n_elements_before_reset'], {}), '(batch_dp, n_elements_before_reset)\n', (24582, 24617), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((25568, 25632), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['flatmapped_dp', 'n_elements_before_reset'], {}), '(flatmapped_dp, n_elements_before_reset)\n', (25592, 25632), False, 'from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls\n'), ((6724, 6760), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (6747, 6760), False, 'import warnings\n'), ((13080, 13116), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (13103, 13116), False, 'import warnings\n'), ((21870, 21921), 'torchdata.datapipes.iter.SampleMultiplexer', 'SampleMultiplexer', ([], {'pipes_to_weights_dict': '{}', 'seed': '(0)'}), '(pipes_to_weights_dict={}, seed=0)\n', (21887, 21921), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((22172, 22222), 'torchdata.datapipes.iter.SampleMultiplexer', 'SampleMultiplexer', ([], {'pipes_to_weights_dict': 'd', 'seed': '(0)'}), '(pipes_to_weights_dict=d, seed=0)\n', (22189, 22222), False, 'from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer\n'), ((25260, 25310), 'itertools.chain', 'itertools.chain', (['*[(e, e * 10) for e in source_dp]'], {}), '(*[(e, e * 10) for e in source_dp])\n', (25275, 25310), False, 'import itertools\n'), ((16220, 16238), 'io.StringIO', 'io.StringIO', (['text1'], {}), '(text1)\n', (16231, 16238), False, 'import io\n'), ((16251, 16269), 'io.StringIO', 'io.StringIO', (['text2'], {}), '(text2)\n', (16262, 16269), False, 'import io\n'), ((17139, 17157), 'io.StringIO', 'io.StringIO', (['text1'], {}), '(text1)\n', (17150, 17157), False, 'import io\n'), ((17170, 17188), 'io.StringIO', 'io.StringIO', (['text2'], {}), '(text2)\n', (17181, 17188), False, 'import io\n'), ((17606, 17624), 'io.StringIO', 'io.StringIO', (['text1'], {}), '(text1)\n', (17617, 17624), False, 'import io\n'), ((17637, 17655), 'io.StringIO', 'io.StringIO', (['text2'], {}), '(text2)\n', (17648, 17655), False, 'import io\n')] |
import pathlib
from typing import Any, Dict, List, Tuple, BinaryIO, Union
import numpy as np
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
UnBatcher,
)
from torchvision.prototype.datasets.utils import (
Dataset,
HttpResource,
OnlineResource,
)
from torchvision.prototype.datasets.utils._internal import (
read_mat,
hint_sharding,
hint_shuffling,
)
from torchvision.prototype.features import Label, Image
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.UnBatcher"
] | [((1544, 1664), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['f"""http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat"""'], {'sha256': 'self._CHECKSUMS[self._split]'}), "(f'http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat',\n sha256=self._CHECKSUMS[self._split])\n", (1556, 1664), False, 'from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource\n'), ((1869, 1885), 'torchvision.prototype.datasets.utils._internal.read_mat', 'read_mat', (['buffer'], {}), '(buffer)\n', (1877, 1885), False, 'from torchvision.prototype.datasets.utils._internal import read_mat, hint_sharding, hint_shuffling\n'), ((2471, 2511), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._read_images_and_labels'], {}), '(dp, self._read_images_and_labels)\n', (2477, 2511), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher\n'), ((2525, 2538), 'torchdata.datapipes.iter.UnBatcher', 'UnBatcher', (['dp'], {}), '(dp)\n', (2534, 2538), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher\n'), ((2552, 2570), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (2566, 2570), False, 'from torchvision.prototype.datasets.utils._internal import read_mat, hint_sharding, hint_shuffling\n'), ((2584, 2601), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (2597, 2601), False, 'from torchvision.prototype.datasets.utils._internal import read_mat, hint_sharding, hint_shuffling\n'), ((2617, 2649), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (2623, 2649), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from typing import List
from torch import distributed as dist
from torch.utils.data import DataLoader
from torchrec.datasets.criteo import (
INT_FEATURE_COUNT,
CAT_FEATURE_COUNT,
DEFAULT_CAT_NAMES,
DEFAULT_INT_NAMES,
InMemoryBinaryCriteoIterDataPipe,
)
from torchrec.datasets.random import RandomRecDataset
import torch
from typing import (
Iterator,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Union,
Tuple,
)
from torchrec.datasets.criteo import _default_row_mapper
from torch.utils.data import IterDataPipe
from torchdata.datapipes.iter import S3FileLister, S3FileLoader
from torchrec.datasets.utils import ReadLinesFromCSV, safe_cast, Batch
import torch.utils.data.datapipes as dp
import io
import numpy as np
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
STAGES = ["train", "val", "test"]
DAYS = 24
class LoadWithTextIOWrapper(IterDataPipe):
def __init__(self, paths, **open_kw):
self.paths = paths
self.open_kw: Any = open_kw # pyre-ignore[4]
def __iter__(self) -> Iterator[Any]:
for url, buffer in self.paths:
yield url, io.TextIOWrapper(buffer, encoding='utf-8')
class S3CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example:
>>> datapipe = CriteoIterDataPipe(
>>> ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
>>> )
>>> datapipe = dp.iter.Batcher(datapipe, 100)
>>> datapipe = dp.iter.Collator(datapipe)
>>> batch = next(iter(datapipe))
"""
def __init__(
self,
paths: S3FileLoader,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
batch_size = open_kw['batch_size']
# These values are the same for the KeyedJaggedTensors in all batches, so they
# are computed once here. This avoids extra work from the KeyedJaggedTensor sync
# functions.
self._num_ids_in_batch: int = CAT_FEATURE_COUNT * batch_size
self.keys: List[str] = DEFAULT_CAT_NAMES
self.lengths: torch.Tensor = torch.ones(
(self._num_ids_in_batch,), dtype=torch.int32
)
self.offsets: torch.Tensor = torch.arange(
0, self._num_ids_in_batch + 1, dtype=torch.int32
)
self.stride = batch_size
self.length_per_key: List[int] = CAT_FEATURE_COUNT * [batch_size]
self.offset_per_key: List[int] = [
batch_size * i for i in range(CAT_FEATURE_COUNT + 1)
]
self.index_per_key: Dict[str, int] = {
key: i for (i, key) in enumerate(self.keys)
}
def _np_arrays_to_batch(
self, dense: np.ndarray, sparse: np.ndarray, labels: np.ndarray
) -> Batch:
return Batch(
dense_features=torch.from_numpy(dense),
sparse_features=KeyedJaggedTensor(
keys=self.keys,
# transpose + reshape(-1) incurs an additional copy.
values=torch.from_numpy(sparse.transpose(1, 0).reshape(-1)),
lengths=self.lengths,
offsets=self.offsets,
stride=self.stride,
length_per_key=self.length_per_key,
offset_per_key=self.offset_per_key,
index_per_key=self.index_per_key,
),
labels=torch.from_numpy(labels.reshape(-1)),
)
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
# datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = LoadWithTextIOWrapper(paths)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
# datapipe = dp.iter.Mapper(datapipe, self._np_arrays_to_batch)
yield from datapipe
class NpToBatchIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example:
>>> datapipe = CriteoIterDataPipe(
>>> ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
>>> )
>>> datapipe = dp.iter.Batcher(datapipe, 100)
>>> datapipe = dp.iter.Collator(datapipe)
>>> batch = next(iter(datapipe))
"""
def __init__(
self,
datapipe,
batch_size,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.datapipe = datapipe
self.open_kw: Any = open_kw # pyre-ignore[4]
# These values are the same for the KeyedJaggedTensors in all batches, so they
# are computed once here. This avoids extra work from the KeyedJaggedTensor sync
# functions.
self._num_ids_in_batch: int = CAT_FEATURE_COUNT * batch_size
self.keys: List[str] = DEFAULT_CAT_NAMES
self.lengths: torch.Tensor = torch.ones(
(self._num_ids_in_batch,), dtype=torch.int32
)
self.offsets: torch.Tensor = torch.arange(
0, self._num_ids_in_batch + 1, dtype=torch.int32
)
self.stride = batch_size
self.length_per_key: List[int] = CAT_FEATURE_COUNT * [batch_size]
self.offset_per_key: List[int] = [
batch_size * i for i in range(CAT_FEATURE_COUNT + 1)
]
self.index_per_key: Dict[str, int] = {
key: i for (i, key) in enumerate(self.keys)
}
def _np_arrays_to_batch(
self, dense: np.ndarray, sparse: np.ndarray, labels: np.ndarray
) -> Batch:
return Batch(
dense_features=dense,
sparse_features=KeyedJaggedTensor(
keys=self.keys,
# transpose + reshape(-1) incurs an additional copy.
# values=sparse.transpose(1, 0).reshape(-1),
values=sparse.reshape(-1),
lengths=self.lengths,
offsets=self.offsets,
stride=self.stride,
length_per_key=self.length_per_key,
offset_per_key=self.offset_per_key,
index_per_key=self.index_per_key,
),
labels=labels.reshape(-1),
)
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
for data in self.datapipe:
if dist.get_rank() == 0:
print(data)
yield self._np_arrays_to_batch(*data)
def _get_s3_dataloader(
args: argparse.Namespace,
stage: str,
pin_memory: bool,
) -> DataLoader:
s3_urls = S3FileLister([args.s3_criteo_prefix])
if dist.get_rank() == 0:
print(f"urls: {s3_urls}")
def is_final_day(s: str) -> bool:
return f"day_{DAYS - 1}" in s
if stage == "train":
# Train set gets all data except from the final day.
s3_urls = list(filter(lambda s: not is_final_day(s), s3_urls))
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
# Validation set gets the first half of the final day's samples. Test set get
# the other half.
s3_urls = list(filter(is_final_day, s3_urls))
rank = (
dist.get_rank()
if stage == "val"
else dist.get_rank() + dist.get_world_size()
)
world_size = dist.get_world_size() * 2
s3_urls_buffers = S3FileLoader(s3_urls)
def row_mapper(row: List[str]) -> Tuple[List[int], List[int], int]:
label = safe_cast(row[0], int, 0)
dense = [safe_cast(row[i], int, 0) for i in range(1, 1 + INT_FEATURE_COUNT)]
sparse = [
int(safe_cast(row[i], str, "0") or "0", 16)
for i in range(
1 + INT_FEATURE_COUNT, 1 + INT_FEATURE_COUNT + CAT_FEATURE_COUNT
)
]
return dense, sparse, label # pyre-ignore[7]
datapipe = S3CriteoIterDataPipe(
s3_urls_buffers,
row_mapper=row_mapper,
batch_size=args.batch_size,
rank=rank,
world_size=world_size,
hashes=args.num_embeddings_per_feature
if args.num_embeddings is None
else ([args.num_embeddings] * CAT_FEATURE_COUNT),
)
datapipe = dp.iter.Batcher(datapipe, args.batch_size)
def my_collate(batch):
buffer = 3*[None,]
dtypes = [torch.float32, torch.int64, torch.int64]
for samples in batch:
for idx, arr in enumerate(samples):
if buffer[idx] is None:
buffer[idx] = torch.as_tensor(arr, dtype=dtypes[idx])
else:
buffer[idx] = torch.vstack((buffer[idx], torch.as_tensor(arr, dtype=dtypes[idx])))
dense, sparse, labels = buffer
dense += 3
dense = torch.log(dense)
labels = labels.reshape((-1, 1))
return dense, sparse, labels
datapipe = dp.iter.Collator(datapipe, collate_fn=my_collate)
datapipe = NpToBatchIterDataPipe(datapipe, args.batch_size)
dataloader = DataLoader(
datapipe,
batch_size=None,
pin_memory=pin_memory,
collate_fn=lambda x: x,
)
return dataloader
def _get_random_dataloader(
args: argparse.Namespace,
pin_memory: bool,
) -> DataLoader:
return DataLoader(
RandomRecDataset(
keys=DEFAULT_CAT_NAMES,
batch_size=args.batch_size,
hash_size=args.num_embeddings,
hash_sizes=args.num_embeddings_per_feature,
manual_seed=args.seed,
ids_per_feature=1,
num_dense=len(DEFAULT_INT_NAMES),
),
batch_size=None,
batch_sampler=None,
pin_memory=pin_memory,
num_workers=args.num_workers,
)
def _get_in_memory_dataloader(
args: argparse.Namespace,
stage: str,
pin_memory: bool,
) -> DataLoader:
files = os.listdir(args.in_memory_binary_criteo_path)
def is_final_day(s: str) -> bool:
return f"day_{DAYS - 1}" in s
if stage == "train":
# Train set gets all data except from the final day.
files = list(filter(lambda s: not is_final_day(s), files))
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
# Validation set gets the first half of the final day's samples. Test set get
# the other half.
files = list(filter(is_final_day, files))
rank = (
dist.get_rank()
if stage == "val"
else dist.get_rank() + dist.get_world_size()
)
world_size = dist.get_world_size() * 2
stage_files: List[List[str]] = [
sorted(
map(
lambda x: os.path.join(args.in_memory_binary_criteo_path, x),
filter(lambda s: kind in s, files),
)
)
for kind in ["dense", "sparse", "labels"]
]
dataloader = DataLoader(
InMemoryBinaryCriteoIterDataPipe(
*stage_files, # pyre-ignore[6]
batch_size=args.batch_size,
rank=rank,
world_size=world_size,
hashes=args.num_embeddings_per_feature
if args.num_embeddings is None
else ([args.num_embeddings] * CAT_FEATURE_COUNT),
),
batch_size=None,
pin_memory=pin_memory,
collate_fn=lambda x: x,
)
return dataloader
def get_dataloader(args: argparse.Namespace, backend: str, stage: str) -> DataLoader:
"""
Gets desired dataloader from dlrm_main command line options. Currently, this
function is able to return either a DataLoader wrapped around a RandomRecDataset or
a Dataloader wrapped around an InMemoryBinaryCriteoIterDataPipe.
Args:
args (argparse.Namespace): Command line options supplied to dlrm_main.py's main
function.
backend (str): "nccl" or "gloo".
stage (str): "train", "val", or "test".
Returns:
dataloader (DataLoader): PyTorch dataloader for the specified options.
"""
stage = stage.lower()
if stage not in STAGES:
raise ValueError(f"Supplied stage was {stage}. Must be one of {STAGES}.")
pin_memory = (backend == "nccl") if args.pin_memory is None else args.pin_memory
if args.in_memory_binary_criteo_path is not None:
return _get_in_memory_dataloader(args, stage, pin_memory)
elif args.s3_criteo_prefix is not None:
return _get_s3_dataloader(args, stage, pin_memory)
else:
return _get_random_dataloader(args, pin_memory)
| [
"torchdata.datapipes.iter.S3FileLister",
"torchdata.datapipes.iter.S3FileLoader"
] | [((8563, 8600), 'torchdata.datapipes.iter.S3FileLister', 'S3FileLister', (['[args.s3_criteo_prefix]'], {}), '([args.s3_criteo_prefix])\n', (8575, 8600), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((9365, 9386), 'torchdata.datapipes.iter.S3FileLoader', 'S3FileLoader', (['s3_urls'], {}), '(s3_urls)\n', (9377, 9386), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((10230, 10272), 'torch.utils.data.datapipes.iter.Batcher', 'dp.iter.Batcher', (['datapipe', 'args.batch_size'], {}), '(datapipe, args.batch_size)\n', (10245, 10272), True, 'import torch.utils.data.datapipes as dp\n'), ((10892, 10941), 'torch.utils.data.datapipes.iter.Collator', 'dp.iter.Collator', (['datapipe'], {'collate_fn': 'my_collate'}), '(datapipe, collate_fn=my_collate)\n', (10908, 10941), True, 'import torch.utils.data.datapipes as dp\n'), ((11025, 11113), 'torch.utils.data.DataLoader', 'DataLoader', (['datapipe'], {'batch_size': 'None', 'pin_memory': 'pin_memory', 'collate_fn': '(lambda x: x)'}), '(datapipe, batch_size=None, pin_memory=pin_memory, collate_fn=lambda\n x: x)\n', (11035, 11113), False, 'from torch.utils.data import DataLoader\n'), ((11874, 11919), 'os.listdir', 'os.listdir', (['args.in_memory_binary_criteo_path'], {}), '(args.in_memory_binary_criteo_path)\n', (11884, 11919), False, 'import os\n'), ((3208, 3264), 'torch.ones', 'torch.ones', (['(self._num_ids_in_batch,)'], {'dtype': 'torch.int32'}), '((self._num_ids_in_batch,), dtype=torch.int32)\n', (3218, 3264), False, 'import torch\n'), ((3324, 3386), 'torch.arange', 'torch.arange', (['(0)', '(self._num_ids_in_batch + 1)'], {'dtype': 'torch.int32'}), '(0, self._num_ids_in_batch + 1, dtype=torch.int32)\n', (3336, 3386), False, 'import torch\n'), ((4597, 4631), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (4629, 4631), False, 'import torch\n'), ((5002, 5044), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['datapipe'], {'delimiter': '"""\t"""'}), "(datapipe, delimiter='\\t')\n", (5018, 5044), False, 'from torchrec.datasets.utils import ReadLinesFromCSV, safe_cast, Batch\n'), ((6931, 6987), 'torch.ones', 'torch.ones', (['(self._num_ids_in_batch,)'], {'dtype': 'torch.int32'}), '((self._num_ids_in_batch,), dtype=torch.int32)\n', (6941, 6987), False, 'import torch\n'), ((7047, 7109), 'torch.arange', 'torch.arange', (['(0)', '(self._num_ids_in_batch + 1)'], {'dtype': 'torch.int32'}), '(0, self._num_ids_in_batch + 1, dtype=torch.int32)\n', (7059, 7109), False, 'import torch\n'), ((8608, 8623), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8621, 8623), True, 'from torch import distributed as dist\n'), ((8918, 8933), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8931, 8933), True, 'from torch import distributed as dist\n'), ((8955, 8976), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (8974, 8976), True, 'from torch import distributed as dist\n'), ((9476, 9501), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[0]', 'int', '(0)'], {}), '(row[0], int, 0)\n', (9485, 9501), False, 'from torchrec.datasets.utils import ReadLinesFromCSV, safe_cast, Batch\n'), ((10779, 10795), 'torch.log', 'torch.log', (['dense'], {}), '(dense)\n', (10788, 10795), False, 'import torch\n'), ((12166, 12181), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (12179, 12181), True, 'from torch import distributed as dist\n'), ((12203, 12224), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (12222, 12224), True, 'from torch import distributed as dist\n'), ((12904, 13144), 'torchrec.datasets.criteo.InMemoryBinaryCriteoIterDataPipe', 'InMemoryBinaryCriteoIterDataPipe', (['*stage_files'], {'batch_size': 'args.batch_size', 'rank': 'rank', 'world_size': 'world_size', 'hashes': '(args.num_embeddings_per_feature if args.num_embeddings is None else [args.\n num_embeddings] * CAT_FEATURE_COUNT)'}), '(*stage_files, batch_size=args.batch_size,\n rank=rank, world_size=world_size, hashes=args.\n num_embeddings_per_feature if args.num_embeddings is None else [args.\n num_embeddings] * CAT_FEATURE_COUNT)\n', (12936, 13144), False, 'from torchrec.datasets.criteo import INT_FEATURE_COUNT, CAT_FEATURE_COUNT, DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES, InMemoryBinaryCriteoIterDataPipe\n'), ((5096, 5137), 'torch.utils.data.datapipes.iter.Mapper', 'dp.iter.Mapper', (['datapipe', 'self.row_mapper'], {}), '(datapipe, self.row_mapper)\n', (5110, 5137), True, 'import torch.utils.data.datapipes as dp\n'), ((9182, 9197), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (9195, 9197), True, 'from torch import distributed as dist\n'), ((9316, 9337), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (9335, 9337), True, 'from torch import distributed as dist\n'), ((9519, 9544), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[i]', 'int', '(0)'], {}), '(row[i], int, 0)\n', (9528, 9544), False, 'from torchrec.datasets.utils import ReadLinesFromCSV, safe_cast, Batch\n'), ((12426, 12441), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (12439, 12441), True, 'from torch import distributed as dist\n'), ((12560, 12581), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (12579, 12581), True, 'from torch import distributed as dist\n'), ((3914, 3937), 'torch.from_numpy', 'torch.from_numpy', (['dense'], {}), '(dense)\n', (3930, 3937), False, 'import torch\n'), ((8339, 8354), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8352, 8354), True, 'from torch import distributed as dist\n'), ((9245, 9260), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (9258, 9260), True, 'from torch import distributed as dist\n'), ((9263, 9284), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (9282, 9284), True, 'from torch import distributed as dist\n'), ((12489, 12504), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (12502, 12504), True, 'from torch import distributed as dist\n'), ((12507, 12528), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (12526, 12528), True, 'from torch import distributed as dist\n'), ((1411, 1453), 'io.TextIOWrapper', 'io.TextIOWrapper', (['buffer'], {'encoding': '"""utf-8"""'}), "(buffer, encoding='utf-8')\n", (1427, 1453), False, 'import io\n'), ((9622, 9649), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[i]', 'str', '"""0"""'], {}), "(row[i], str, '0')\n", (9631, 9649), False, 'from torchrec.datasets.utils import ReadLinesFromCSV, safe_cast, Batch\n'), ((10539, 10578), 'torch.as_tensor', 'torch.as_tensor', (['arr'], {'dtype': 'dtypes[idx]'}), '(arr, dtype=dtypes[idx])\n', (10554, 10578), False, 'import torch\n'), ((12683, 12733), 'os.path.join', 'os.path.join', (['args.in_memory_binary_criteo_path', 'x'], {}), '(args.in_memory_binary_criteo_path, x)\n', (12695, 12733), False, 'import os\n'), ((10662, 10701), 'torch.as_tensor', 'torch.as_tensor', (['arr'], {'dtype': 'dtypes[idx]'}), '(arr, dtype=dtypes[idx])\n', (10677, 10701), False, 'import torch\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Iterator, List, Tuple, TypeVar
import expecttest
from _utils._common_utils_for_test import IS_WINDOWS
from torch.utils.data import IterDataPipe
from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService, ReadingServiceInterface
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
from torchdata.datapipes.iter import IterableWrapper, Mapper
T_co = TypeVar("T_co", covariant=True)
class Adaptor(IterDataPipe[T_co]):
def __init__(self, datapipe: IterDataPipe) -> None:
self.datapipe = datapipe
self.started = False
def __iter__(self) -> Iterator[T_co]:
yield from self.datapipe
class TempReadingService(ReadingServiceInterface):
adaptors: List[IterDataPipe] = []
def initialize(self, datapipe: IterDataPipe) -> IterDataPipe:
graph = traverse(datapipe, only_datapipe=True)
dps = find_dps(graph, Mapper)
for dp in reversed(dps):
new_dp = Adaptor(dp)
self.adaptors.append(new_dp)
graph = replace_dp(graph, dp, new_dp)
return list(graph.keys())[0]
def initialize_iteration(self) -> None:
for dp in self.adaptors:
dp.started = True
def finalize_iteration(self) -> None:
for dp in self.adaptors:
dp.started = False
def _x_and_x_plus_5(x):
return [x, x + 5]
def _x_mod_2(x):
return x % 2
def _x_mult_2(x):
return x * 2
class TestGraph(expecttest.TestCase):
def _get_datapipes(self) -> Tuple[IterDataPipe, IterDataPipe, IterDataPipe]:
src_dp = IterableWrapper(range(20))
m1 = src_dp.map(_x_and_x_plus_5)
ub = m1.unbatch()
c1, c2 = ub.demux(2, _x_mod_2)
dm = c1.main_datapipe
m2 = c1.map(_x_mult_2)
dp = m2.zip(c2)
return traverse(dp, only_datapipe=True), (src_dp, m1, ub, dm, c1, c2, m2, dp)
def test_find_dps(self) -> None:
graph, (_, m1, *_, m2, _) = self._get_datapipes() # pyre-ignore
dps = find_dps(graph, Mapper)
expected_dps = {m1, m2}
for dp in dps:
self.assertTrue(dp in expected_dps)
def test_replace_dps(self) -> None:
# pyre-fixme[23]: Unable to unpack 3 values, 2 were expected.
graph, (
src_dp,
m1,
ub,
dm,
c1,
c2,
m2,
dp,
) = self._get_datapipes()
new_dp1 = Adaptor(m1)
new_dp2 = Adaptor(m2)
graph = replace_dp(graph, m1, new_dp1)
exp_g1 = {
dp: {
m2: {c1: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}}},
c2: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}},
}
}
self.assertEqual(graph, exp_g1)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g1)
graph = replace_dp(graph, m2, new_dp2)
exp_g2 = {
dp: {
new_dp2: {m2: {c1: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}}}},
c2: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}},
}
}
self.assertEqual(graph, exp_g2)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g2)
def test_remove_dps(self) -> None:
# pyre-fixme[23]: Unable to unpack 3 values, 2 were expected.
graph, (
src_dp,
m1,
ub,
dm,
c1,
c2,
m2,
dp,
) = self._get_datapipes()
graph = remove_dp(graph, m1)
exp_g1 = {dp: {m2: {c1: {dm: {ub: {src_dp: {}}}}}, c2: {dm: {ub: {src_dp: {}}}}}}
self.assertEqual(graph, exp_g1)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g1)
graph = remove_dp(graph, m2)
exp_g2 = {dp: {c1: {dm: {ub: {src_dp: {}}}}, c2: {dm: {ub: {src_dp: {}}}}}}
self.assertEqual(graph, exp_g2)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g2)
with self.assertRaisesRegex(
Exception,
"Cannot remove source DataPipe that is the first DataPipe in the pipeline",
):
remove_dp(graph, src_dp)
with self.assertRaisesRegex(
Exception,
"Cannot remove a receiving DataPipe having multiple sending DataPipes",
):
remove_dp(graph, dp)
def test_reading_service(self) -> None:
_, (*_, dp) = self._get_datapipes() # pyre-ignore
rs = TempReadingService()
dl = DataLoader2(dp, reading_service=rs)
self.assertTrue(len(rs.adaptors) == 0)
it = iter(dl)
for new_dp in rs.adaptors:
self.assertTrue(new_dp.started)
_ = list(it)
for new_dp in rs.adaptors:
self.assertFalse(new_dp.started)
@unittest.skipIf(IS_WINDOWS, "Fork is required for lambda")
def test_multiprocessing_reading_service(self) -> None:
_, (*_, dp) = self._get_datapipes() # pyre-ignore
import torch.multiprocessing as mp
mp.set_start_method("fork")
rs = MultiProcessingReadingService(2, persistent_workers=True)
dl = DataLoader2(dp, reading_service=rs)
d1 = list(dl)
d2 = list(dl)
self.assertEqual(d1, d2)
if __name__ == "__main__":
unittest.main()
| [
"torchdata.dataloader2.MultiProcessingReadingService",
"torchdata.dataloader2.graph.traverse",
"torchdata.dataloader2.graph.find_dps",
"torchdata.dataloader2.graph.replace_dp",
"torchdata.dataloader2.graph.remove_dp",
"torchdata.dataloader2.DataLoader2"
] | [((644, 675), 'typing.TypeVar', 'TypeVar', (['"""T_co"""'], {'covariant': '(True)'}), "('T_co', covariant=True)\n", (651, 675), False, 'from typing import Iterator, List, Tuple, TypeVar\n'), ((5039, 5097), 'unittest.skipIf', 'unittest.skipIf', (['IS_WINDOWS', '"""Fork is required for lambda"""'], {}), "(IS_WINDOWS, 'Fork is required for lambda')\n", (5054, 5097), False, 'import unittest\n'), ((5529, 5544), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5542, 5544), False, 'import unittest\n'), ((1081, 1119), 'torchdata.dataloader2.graph.traverse', 'traverse', (['datapipe'], {'only_datapipe': '(True)'}), '(datapipe, only_datapipe=True)\n', (1089, 1119), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((1134, 1157), 'torchdata.dataloader2.graph.find_dps', 'find_dps', (['graph', 'Mapper'], {}), '(graph, Mapper)\n', (1142, 1157), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((2259, 2282), 'torchdata.dataloader2.graph.find_dps', 'find_dps', (['graph', 'Mapper'], {}), '(graph, Mapper)\n', (2267, 2282), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((2759, 2789), 'torchdata.dataloader2.graph.replace_dp', 'replace_dp', (['graph', 'm1', 'new_dp1'], {}), '(graph, m1, new_dp1)\n', (2769, 2789), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((3107, 3137), 'torchdata.dataloader2.graph.replace_dp', 'replace_dp', (['graph', 'm2', 'new_dp2'], {}), '(graph, m2, new_dp2)\n', (3117, 3137), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((3759, 3779), 'torchdata.dataloader2.graph.remove_dp', 'remove_dp', (['graph', 'm1'], {}), '(graph, m1)\n', (3768, 3779), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((3994, 4014), 'torchdata.dataloader2.graph.remove_dp', 'remove_dp', (['graph', 'm2'], {}), '(graph, m2)\n', (4003, 4014), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((4744, 4779), 'torchdata.dataloader2.DataLoader2', 'DataLoader2', (['dp'], {'reading_service': 'rs'}), '(dp, reading_service=rs)\n', (4755, 4779), False, 'from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService, ReadingServiceInterface\n'), ((5270, 5297), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""fork"""'], {}), "('fork')\n", (5289, 5297), True, 'import torch.multiprocessing as mp\n'), ((5312, 5369), 'torchdata.dataloader2.MultiProcessingReadingService', 'MultiProcessingReadingService', (['(2)'], {'persistent_workers': '(True)'}), '(2, persistent_workers=True)\n', (5341, 5369), False, 'from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService, ReadingServiceInterface\n'), ((5383, 5418), 'torchdata.dataloader2.DataLoader2', 'DataLoader2', (['dp'], {'reading_service': 'rs'}), '(dp, reading_service=rs)\n', (5394, 5418), False, 'from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService, ReadingServiceInterface\n'), ((1286, 1315), 'torchdata.dataloader2.graph.replace_dp', 'replace_dp', (['graph', 'dp', 'new_dp'], {}), '(graph, dp, new_dp)\n', (1296, 1315), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((2062, 2094), 'torchdata.dataloader2.graph.traverse', 'traverse', (['dp'], {'only_datapipe': '(True)'}), '(dp, only_datapipe=True)\n', (2070, 2094), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((3048, 3080), 'torchdata.dataloader2.graph.traverse', 'traverse', (['dp'], {'only_datapipe': '(True)'}), '(dp, only_datapipe=True)\n', (3056, 3080), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((3407, 3439), 'torchdata.dataloader2.graph.traverse', 'traverse', (['dp'], {'only_datapipe': '(True)'}), '(dp, only_datapipe=True)\n', (3415, 3439), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((3935, 3967), 'torchdata.dataloader2.graph.traverse', 'traverse', (['dp'], {'only_datapipe': '(True)'}), '(dp, only_datapipe=True)\n', (3943, 3967), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((4164, 4196), 'torchdata.dataloader2.graph.traverse', 'traverse', (['dp'], {'only_datapipe': '(True)'}), '(dp, only_datapipe=True)\n', (4172, 4196), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((4378, 4402), 'torchdata.dataloader2.graph.remove_dp', 'remove_dp', (['graph', 'src_dp'], {}), '(graph, src_dp)\n', (4387, 4402), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n'), ((4571, 4591), 'torchdata.dataloader2.graph.remove_dp', 'remove_dp', (['graph', 'dp'], {}), '(graph, dp)\n', (4580, 4591), False, 'from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse\n')] |
import csv
import os
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
URL = {
"train": "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt",
"test": "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt",
}
MD5 = {
"train": "793daf7b6224281e75fe61c1f80afe35",
"test": "e437fdddb92535b820fe8852e2df8a49",
}
NUM_LINES = {
"train": 4076,
"test": 1725,
}
DATASET_NAME = "MRPC"
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def MRPC(root: str, split: Union[Tuple[str], str]):
"""MRPC Dataset
For additional details refer to https://www.microsoft.com/en-us/download/details.aspx?id=52398
Number of lines per split:
- train: 4076
- test: 1725
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)
:returns: DataPipe that yields data points from MRPC dataset which consist of label, sentence1, sentence2
:rtype: (int, str, str)
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
def _filepath_fn(x):
return os.path.join(root, os.path.basename(x))
def _modify_res(x):
return (int(x[0]), x[3], x[4])
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=_filepath_fn,
hash_dict={_filepath_fn(URL[split]): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
cache_dp = cache_dp.parse_csv(skip_lines=1, delimiter="\t", quoting=csv.QUOTE_NONE).map(_modify_res)
return cache_dp.shuffle().set_shuffle(False).sharding_filter()
| [
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.HttpReader",
"torchdata.datapipes.iter.IterableWrapper"
] | [((226, 258), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (245, 258), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((730, 782), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (755, 782), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((784, 823), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'test')"], {}), "(('train', 'test'))\n", (804, 823), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((1813, 1842), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL[split]]'], {}), '([URL[split]])\n', (1828, 1842), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((2143, 2181), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'encoding': '"""utf-8"""'}), "(cache_dp, encoding='utf-8')\n", (2153, 2181), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1455, 1487), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1474, 1487), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1714, 1733), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1730, 1733), False, 'import os\n'), ((2061, 2081), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (2071, 2081), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import http.server
import os
import re
import threading
import torch
import torch.utils.data.backward_compatibility
import torchvision.datasets as datasets
import torchvision.datasets.folder
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import DataLoader
from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe
IMAGES_ROOT = os.path.join("fakedata", "imagefolder")
USE_FORK_DATAPIPE = False
NUM_WORKERS = 5
BATCH_SIZE = None
data_transform = transforms.Compose(
[
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
# DataPipes implementation of ImageFolder constructs and executes graph of DataPipes (aka DataPipeline)
# FileLister -> ObtainCategories
# |
# V
# FileLister -> AttributeCategories -> LoadAndDecodeImages (using `map`) -> ApplyTorchVisionTransforms (using `map`)
def get_category_name(path):
rel_path = os.path.relpath(path, start=IMAGES_ROOT)
elements = rel_path.split(os.sep)
return elements[0]
class ObtainCategories(IterDataPipe):
def __init__(self, source_dp, parse_category_fn=get_category_name) -> None:
self.source_dp = source_dp
self.parse_category_fn = parse_category_fn
def __iter__(self):
categories = set()
for path in self.source_dp:
categories.add(self.parse_category_fn(path))
cat_to_id = {name: i for i, name in enumerate(sorted(categories))}
yield cat_to_id
class AttributeCategories(IterDataPipe):
def __init__(self, listfiles_dp, categories_dp, parse_category_fn=get_category_name) -> None:
self.listfiles_dp = listfiles_dp
self.categories_dp = categories_dp
self.parse_category_fn = parse_category_fn
def __iter__(self):
for categories in self.categories_dp:
cat_to_dp = categories
for data in self.listfiles_dp:
if isinstance(data, tuple):
category = cat_to_dp[self.parse_category_fn(data[0])]
yield data + (category,)
else:
category = cat_to_dp[self.parse_category_fn(data)]
yield (data, category)
def MyImageFolder(root=IMAGES_ROOT, transform=None):
if not USE_FORK_DATAPIPE:
# Yes, we had to scan files twice. Alternativelly it is possible to use
# `fork` DataPipe, but it will require buffer equal to the size of all
# full file names
# TODO(125): Make sure that `fork` complains when buffer becomes
# too large
list_files_0 = FileLister(root=IMAGES_ROOT, recursive=True)
list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).sharding_filter()
else:
list_files_0, list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).fork(2)
list_files_1 = list_files_1.sharding_filter()
categories = ObtainCategories(list_files_0)
with_categories = AttributeCategories(list_files_1, categories)
using_default_loader = with_categories.map(lambda x: (torchvision.datasets.folder.default_loader(x[0]), x[1]))
transformed = using_default_loader.map(lambda x: (transform(x[0]), x[1]))
return transformed
class ExpandURLPatternDataPipe(IterDataPipe):
def __init__(self, pattern) -> None:
result = re.match(r"(.*?)\{(.*?)}(.*)", pattern)
if result:
self.prefix = result.group(1)
self.pattern = result.group(2)
self.postfix = result.group(3)
result = re.match(r"(\d+)\.\.(\d+)", self.pattern)
if result:
self.start_str = result.group(1)
self.end_str = result.group(2)
else:
raise Exception("Invalid pattern")
else:
raise Exception("Invalid pattern")
def __iter__(self):
current_int = int(self.start_str)
end_int = int(self.end_str)
for i in range(current_int, end_int + 1):
str_i = str(i)
while len(str_i) < len(self.start_str):
str_i = "0" + str_i
yield self.prefix + str_i + self.postfix
HTTP_PATH_ROOT = "http://localhost:8000/"
HTTP_PATH_CAT = "http://localhost:8000/cat/{1..3}.jpg"
HTTP_PATH_DOG = "http://localhost:8000/dog/{1..3}.jpg"
def get_category_name_url(url):
rel_path = os.path.relpath(url, start=HTTP_PATH_ROOT)
elements = rel_path.split(os.sep)
return elements[0]
def stream_to_pil(stream):
img = Image.open(stream)
return img.convert("RGB")
def MyHTTPImageFolder(transform=None):
# HTTP Protocol doesn't support listing files, so we had to provide it explicitly
list_files = ExpandURLPatternDataPipe(HTTP_PATH_CAT) + ExpandURLPatternDataPipe(HTTP_PATH_DOG)
list_files_0, list_files_1 = list_files.fork(2)
list_files_1 = list_files_1.sharding_filter().shuffle()
categories = ObtainCategories(list_files_0, parse_category_fn=get_category_name_url)
loaded_files = HttpReader(list_files_1)
with_categories = AttributeCategories(loaded_files, categories, parse_category_fn=get_category_name_url)
pil_images = with_categories.map(lambda x: (x[0], stream_to_pil(x[1]), x[2]))
transformed = pil_images.map(lambda x: (transform(x[1]), x[2]))
return transformed
if __name__ == "__main__":
dataset = datasets.ImageFolder(root=IMAGES_ROOT, transform=data_transform)
dl = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
items = list(dl)
assert len(items) == 6
dataset = MyImageFolder(root=IMAGES_ROOT, transform=data_transform)
dl = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn,
)
items = list(dl)
assert len(items) == 6
http_handler = http.server.SimpleHTTPRequestHandler
http_handler.log_message = lambda a, b, c, d, e: None
httpd = http.server.HTTPServer(("", 8000), http_handler)
os.chdir(IMAGES_ROOT)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
dataset = MyHTTPImageFolder(transform=data_transform)
dl = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn,
)
try:
items = list(dl)
assert len(items) == 6
finally:
httpd.shutdown()
| [
"torchdata.datapipes.iter.FileLister",
"torchdata.datapipes.iter.HttpReader"
] | [((438, 477), 'os.path.join', 'os.path.join', (['"""fakedata"""', '"""imagefolder"""'], {}), "('fakedata', 'imagefolder')\n", (450, 477), False, 'import os\n'), ((1144, 1184), 'os.path.relpath', 'os.path.relpath', (['path'], {'start': 'IMAGES_ROOT'}), '(path, start=IMAGES_ROOT)\n', (1159, 1184), False, 'import os\n'), ((4525, 4567), 'os.path.relpath', 'os.path.relpath', (['url'], {'start': 'HTTP_PATH_ROOT'}), '(url, start=HTTP_PATH_ROOT)\n', (4540, 4567), False, 'import os\n'), ((4668, 4686), 'PIL.Image.open', 'Image.open', (['stream'], {}), '(stream)\n', (4678, 4686), False, 'from PIL import Image\n'), ((5166, 5190), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['list_files_1'], {}), '(list_files_1)\n', (5176, 5190), False, 'from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe\n'), ((5517, 5581), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', ([], {'root': 'IMAGES_ROOT', 'transform': 'data_transform'}), '(root=IMAGES_ROOT, transform=data_transform)\n', (5537, 5581), True, 'import torchvision.datasets as datasets\n'), ((5591, 5677), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': 'NUM_WORKERS'}), '(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=\n NUM_WORKERS)\n', (5601, 5677), False, 'from torch.utils.data import DataLoader\n'), ((5803, 5966), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(False)', 'num_workers': 'NUM_WORKERS', 'worker_init_fn': 'torch.utils.data.backward_compatibility.worker_init_fn'}), '(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=\n NUM_WORKERS, worker_init_fn=torch.utils.data.backward_compatibility.\n worker_init_fn)\n', (5813, 5966), False, 'from torch.utils.data import DataLoader\n'), ((6232, 6253), 'os.chdir', 'os.chdir', (['IMAGES_ROOT'], {}), '(IMAGES_ROOT)\n', (6240, 6253), False, 'import os\n'), ((6267, 6311), 'threading.Thread', 'threading.Thread', ([], {'target': 'httpd.serve_forever'}), '(target=httpd.serve_forever)\n', (6283, 6311), False, 'import threading\n'), ((6399, 6562), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(False)', 'num_workers': 'NUM_WORKERS', 'worker_init_fn': 'torch.utils.data.backward_compatibility.worker_init_fn'}), '(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=\n NUM_WORKERS, worker_init_fn=torch.utils.data.backward_compatibility.\n worker_init_fn)\n', (6409, 6562), False, 'from torch.utils.data import DataLoader\n'), ((591, 622), 'torchvision.transforms.RandomSizedCrop', 'transforms.RandomSizedCrop', (['(224)'], {}), '(224)\n', (617, 622), True, 'import torchvision.transforms as transforms\n'), ((632, 665), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (663, 665), True, 'import torchvision.transforms as transforms\n'), ((675, 696), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (694, 696), True, 'import torchvision.transforms as transforms\n'), ((706, 781), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (726, 781), True, 'import torchvision.transforms as transforms\n'), ((2778, 2822), 'torchdata.datapipes.iter.FileLister', 'FileLister', ([], {'root': 'IMAGES_ROOT', 'recursive': '(True)'}), '(root=IMAGES_ROOT, recursive=True)\n', (2788, 2822), False, 'from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe\n'), ((3502, 3541), 're.match', 're.match', (['"""(.*?)\\\\{(.*?)}(.*)"""', 'pattern'], {}), "('(.*?)\\\\{(.*?)}(.*)', pattern)\n", (3510, 3541), False, 'import re\n'), ((3710, 3754), 're.match', 're.match', (['"""(\\\\d+)\\\\.\\\\.(\\\\d+)"""', 'self.pattern'], {}), "('(\\\\d+)\\\\.\\\\.(\\\\d+)', self.pattern)\n", (3718, 3754), False, 'import re\n'), ((2846, 2890), 'torchdata.datapipes.iter.FileLister', 'FileLister', ([], {'root': 'IMAGES_ROOT', 'recursive': '(True)'}), '(root=IMAGES_ROOT, recursive=True)\n', (2856, 2890), False, 'from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe\n'), ((2956, 3000), 'torchdata.datapipes.iter.FileLister', 'FileLister', ([], {'root': 'IMAGES_ROOT', 'recursive': '(True)'}), '(root=IMAGES_ROOT, recursive=True)\n', (2966, 3000), False, 'from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe\n')] |
import csv
import os
from functools import partial
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_create_dataset_directory,
_wrap_split_argument,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, IterableWrapper
# we import HttpReader from _download_hooks so we can swap out public URLs
# with interal URLs when the dataset is used within Facebook
from torchtext._download_hooks import HttpReader
URL = "http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz"
MD5 = "4eb0065aba063ef77873d3a9c8088811"
NUM_LINES = {
"train": 5749,
"dev": 1500,
"test": 1379,
}
_PATH = "Stsbenchmark.tar.gz"
DATASET_NAME = "STSB"
_EXTRACTED_FILES = {
"train": os.path.join("stsbenchmark", "sts-train.csv"),
"dev": os.path.join("stsbenchmark", "sts-dev.csv"),
"test": os.path.join("stsbenchmark", "sts-test.csv"),
}
def _filepath_fn(root, x=_PATH):
return os.path.join(root, os.path.basename(x))
def _extracted_filepath_fn(root, split, _=None):
return _filepath_fn(root, _EXTRACTED_FILES[split])
def _filter_fn(split, x):
return _EXTRACTED_FILES[split] in x[0]
def _modify_res(x):
return (int(x[3]), float(x[4]), x[5], x[6])
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev", "test"))
def STSB(root, split):
"""STSB Dataset
For additional details refer to https://ixa2.si.ehu.eus/stswiki/index.php/STSbenchmark
Number of lines per split:
- train: 5749
- dev: 1500
- test: 1379
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev`, `test`)
:returns: DataPipe that yields tuple of (index (int), label (float), sentence1 (str), sentence2 (str))
:rtype: (int, float, str, str)
"""
# TODO Remove this after removing conditional dependency
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root),
hash_dict={_filepath_fn(root, URL): MD5},
hash_type="md5",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=partial(_extracted_filepath_fn, root, split))
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").read_from_tar().filter(partial(_filter_fn, split))
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8")
parsed_data = data_dp.parse_csv(delimiter="\t", quoting=csv.QUOTE_NONE).map(_modify_res)
return parsed_data.shuffle().set_shuffle(False).sharding_filter()
| [
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.IterableWrapper"
] | [((224, 256), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (243, 256), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1298, 1350), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (1323, 1350), False, 'from torchtext.data.datasets_utils import _create_dataset_directory, _wrap_split_argument\n'), ((1352, 1398), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev', 'test')"], {}), "(('train', 'dev', 'test'))\n", (1372, 1398), False, 'from torchtext.data.datasets_utils import _create_dataset_directory, _wrap_split_argument\n'), ((799, 844), 'os.path.join', 'os.path.join', (['"""stsbenchmark"""', '"""sts-train.csv"""'], {}), "('stsbenchmark', 'sts-train.csv')\n", (811, 844), False, 'import os\n'), ((857, 900), 'os.path.join', 'os.path.join', (['"""stsbenchmark"""', '"""sts-dev.csv"""'], {}), "('stsbenchmark', 'sts-dev.csv')\n", (869, 900), False, 'import os\n'), ((914, 958), 'os.path.join', 'os.path.join', (['"""stsbenchmark"""', '"""sts-test.csv"""'], {}), "('stsbenchmark', 'sts-test.csv')\n", (926, 958), False, 'import os\n'), ((2298, 2320), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (2313, 2320), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((2974, 3025), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'encoding': '"""utf-8"""'}), "(cache_decompressed_dp, encoding='utf-8')\n", (2984, 3025), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((1027, 1046), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1043, 1046), False, 'import os\n'), ((2085, 2117), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (2104, 2117), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((2829, 2855), 'functools.partial', 'partial', (['_filter_fn', 'split'], {}), '(_filter_fn, split)\n', (2836, 2855), False, 'from functools import partial\n'), ((2389, 2416), 'functools.partial', 'partial', (['_filepath_fn', 'root'], {}), '(_filepath_fn, root)\n', (2396, 2416), False, 'from functools import partial\n'), ((2525, 2556), 'torchtext._download_hooks.HttpReader', 'HttpReader', (['cache_compressed_dp'], {}), '(cache_compressed_dp)\n', (2535, 2556), False, 'from torchtext._download_hooks import HttpReader\n'), ((2678, 2722), 'functools.partial', 'partial', (['_extracted_filepath_fn', 'root', 'split'], {}), '(_extracted_filepath_fn, root, split)\n', (2685, 2722), False, 'from functools import partial\n'), ((2762, 2805), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (2772, 2805), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n')] |
import os
import tarfile
import enum
import functools
from tqdm import tqdm
import h5py
import torch
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, cast, Union
from xml.etree import ElementTree
from torch.utils.data import DataLoader2
from Dataset4EO import transforms
import pathlib
import pdb
import numpy as np
from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader
from PIL import Image
from torchdata.datapipes.iter import Mapper
from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset
from Dataset4EO.datasets.utils._internal import (
path_accessor,
getitem,
INFINITE_BUFFER_SIZE,
path_comparator,
hint_sharding,
hint_shuffling,
read_categories_file,
)
from Dataset4EO.features import BoundingBox, Label, EncodedImage
from .._api import register_dataset, register_info
NAME = "rsuss"
FNAME = "RSUSS"
_TRAIN_LEN = 5137
_VAL_LEN = 1059
_TEST_LEN = 3144
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class RSUSS(Dataset):
"""
- **homepage**: https://www.iarai.ac.at/rsbenchmark4uss/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
mode: str = "unsupervised",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self.root = root
self._categories = _info()["categories"]
self.mode = mode
super().__init__(root, skip_integrity_check=skip_integrity_check)
_TRAIN_VAL_ARCHIVES = {
"trainval": ("landslide4sense.tar", "c7f6678d50c7003eba47b3cace8053c9bfa6b4692cd1630fe2d6b7bec11ccc77"),
}
def decompress_integrity_check(self, decom_dir):
train_img_dir = os.path.join(decom_dir, 'train', 'img')
train_mask_dir = os.path.join(decom_dir, 'train', 'mask')
val_img_dir = os.path.join(decom_dir, 'val', 'img')
if not os.path.exists(train_img_dir) or not os.path.exists(train_mask_dir) or not os.path.exists(val_img_dir):
return False
num_train_img = len(os.listdir(train_img_dir))
num_train_mask = len(os.listdir(train_mask_dir))
num_val_img = len(os.listdir(val_img_dir))
return True
return (num_train_img == _TRAIN_LEN) and \
(num_train_mask == _TRAIN_LEN) and \
(num_val_img == _VAL_LEN)
def _resources(self) -> List[OnlineResource]:
file_name, sha256 = self._TRAIN_VAL_ARCHIVES['trainval']
decom_dir = os.path.join(self.root, 'landslide4sense')
self.decom_dir = decom_dir
archive = HttpResource("https://syncandshare.lrz.de/dl/fiLurHQ9Cy4NwvmPGYQe7RWM/{}".format(file_name), sha256=sha256)
if not self.decompress_integrity_check(decom_dir):
print('Decompressing the tar file...')
with tarfile.open(os.path.join(self.root, file_name), 'r:gz') as tar:
tar.extractall(decom_dir)
tar.close()
return [archive]
def _is_in_folder(self, data: Tuple[str, Any], *, name: str, depth: int = 1) -> bool:
path = pathlib.Path(data)
in_folder = name in str(path.parent)
return in_folder
def _prepare_sample(self, data):
label_path, label = None, None
if self.mode=='unsupervised' and self._split=='train':
image_path, height_path = data
else:
(image_path, height_path), label_path = data
#img = h5py.File(image_path, 'r')['image'][()]
#img = torch.tensor(img).permute(2, 0, 1)
#height = h5py.File(height_path, 'r')['image'][()]
#height = torch.tensor(height)
#if label_path:
# label = h5py.File(label_path, 'r')['image'][()]
# label = torch.tensor(label)
if self._split == 'train' and self.mode == 'unsupervised':
return (image_path, height_path, None)
return (image_path, height_path, label_path)
class _Demux(enum.IntEnum):
VAL = 0
TEST = 1
TRAIN = 2
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
if self._is_in_folder(data, name="train", depth=2):
return self._Demux.TRAIN
if self._is_in_folder(data, name="val", depth=2):
return self._Demux.VAL
elif self._is_in_folder(data, name="test", depth=2):
return self._Demux.TEST
else:
return None
def _datapipe(self, res):
image_dp = FileLister(root=os.path.join(self.root, FNAME, 'images'), recursive=True)
val_img_dp, test_img_dp, train_img_dp = image_dp.demux(num_instances=3, classifier_fn=self._classify_archive,\
drop_none=True, buffer_size=INFINITE_BUFFER_SIZE)
height_dp = FileLister(root=os.path.join(self.root, FNAME, 'heights'), recursive=True)
val_height_dp, test_height_dp, train_height_dp = height_dp.demux(num_instances=3, classifier_fn=self._classify_archive,\
drop_none=True, buffer_size=INFINITE_BUFFER_SIZE)
label_dp = FileLister(root=os.path.join(self.root, FNAME, 'classes'), recursive=True)
val_label_dp, test_label_dp, train_label_dp = label_dp.demux(num_instances=3, classifier_fn=self._classify_archive,\
drop_none=True, buffer_size=INFINITE_BUFFER_SIZE)
train_dp = train_img_dp.zip(train_height_dp).zip(train_label_dp)
val_dp = val_img_dp.zip(val_height_dp).zip(val_label_dp)
test_dp = test_img_dp.zip(test_height_dp).zip(test_label_dp)
'''tfs = transforms.Compose(transforms.Resize((256,256)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomResizedCrop((224, 224), scale=[0.5, 1]))'''
ndp = eval(self._split+'_dp')
ndp = hint_shuffling(ndp)
ndp = hint_sharding(ndp)
ndp = Mapper(ndp, self._prepare_sample)
#ndp = ndp.map(tfs)
return ndp
def __len__(self) -> int:
return {
'train': _TRAIN_LEN,
'val': _VAL_LEN,
'test': _TEST_LEN
}[self._split]
| [
"torchdata.datapipes.iter.Mapper"
] | [((1874, 1913), 'os.path.join', 'os.path.join', (['decom_dir', '"""train"""', '"""img"""'], {}), "(decom_dir, 'train', 'img')\n", (1886, 1913), False, 'import os\n'), ((1939, 1979), 'os.path.join', 'os.path.join', (['decom_dir', '"""train"""', '"""mask"""'], {}), "(decom_dir, 'train', 'mask')\n", (1951, 1979), False, 'import os\n'), ((2002, 2039), 'os.path.join', 'os.path.join', (['decom_dir', '"""val"""', '"""img"""'], {}), "(decom_dir, 'val', 'img')\n", (2014, 2039), False, 'import os\n'), ((2652, 2694), 'os.path.join', 'os.path.join', (['self.root', '"""landslide4sense"""'], {}), "(self.root, 'landslide4sense')\n", (2664, 2694), False, 'import os\n'), ((3251, 3269), 'pathlib.Path', 'pathlib.Path', (['data'], {}), '(data)\n', (3263, 3269), False, 'import pathlib\n'), ((6039, 6058), 'Dataset4EO.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['ndp'], {}), '(ndp)\n', (6053, 6058), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((6073, 6091), 'Dataset4EO.datasets.utils._internal.hint_sharding', 'hint_sharding', (['ndp'], {}), '(ndp)\n', (6086, 6091), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((6106, 6139), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['ndp', 'self._prepare_sample'], {}), '(ndp, self._prepare_sample)\n', (6112, 6139), False, 'from torchdata.datapipes.iter import Mapper\n'), ((1026, 1052), 'Dataset4EO.datasets.utils._internal.read_categories_file', 'read_categories_file', (['NAME'], {}), '(NAME)\n', (1046, 1052), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((2214, 2239), 'os.listdir', 'os.listdir', (['train_img_dir'], {}), '(train_img_dir)\n', (2224, 2239), False, 'import os\n'), ((2270, 2296), 'os.listdir', 'os.listdir', (['train_mask_dir'], {}), '(train_mask_dir)\n', (2280, 2296), False, 'import os\n'), ((2324, 2347), 'os.listdir', 'os.listdir', (['val_img_dir'], {}), '(val_img_dir)\n', (2334, 2347), False, 'import os\n'), ((2056, 2085), 'os.path.exists', 'os.path.exists', (['train_img_dir'], {}), '(train_img_dir)\n', (2070, 2085), False, 'import os\n'), ((2093, 2123), 'os.path.exists', 'os.path.exists', (['train_mask_dir'], {}), '(train_mask_dir)\n', (2107, 2123), False, 'import os\n'), ((2131, 2158), 'os.path.exists', 'os.path.exists', (['val_img_dir'], {}), '(val_img_dir)\n', (2145, 2158), False, 'import os\n'), ((4655, 4695), 'os.path.join', 'os.path.join', (['self.root', 'FNAME', '"""images"""'], {}), "(self.root, FNAME, 'images')\n", (4667, 4695), False, 'import os\n'), ((4935, 4976), 'os.path.join', 'os.path.join', (['self.root', 'FNAME', '"""heights"""'], {}), "(self.root, FNAME, 'heights')\n", (4947, 4976), False, 'import os\n'), ((5225, 5266), 'os.path.join', 'os.path.join', (['self.root', 'FNAME', '"""classes"""'], {}), "(self.root, FNAME, 'classes')\n", (5237, 5266), False, 'import os\n'), ((2997, 3031), 'os.path.join', 'os.path.join', (['self.root', 'file_name'], {}), '(self.root, file_name)\n', (3009, 3031), False, 'import os\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.