code stringlengths 1.14k 31.2k | apis sequence | extract_api stringlengths 187 38.3k |
|---|---|---|
import pathlib
from typing import Any, Dict, List, Tuple
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter
from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling
from torchvision.prototype.features import EncodedImage, Label
class Country211(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"country211",
homepage="https://github.com/openai/CLIP/blob/main/data/country211.md",
valid_options=dict(split=("train", "val", "test")),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
_SPLIT_NAME_MAPPER = {
"train": "train",
"val": "valid",
"test": "test",
}
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self.categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._SPLIT_NAME_MAPPER[config.split]))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self, root: pathlib.Path) -> List[str]:
resources = self.resources(self.default_config)
dp = resources[0].load(root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
| [
"torchdata.datapipes.iter.Mapper"
] | [((1862, 1880), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (1876, 1880), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling\n'), ((1894, 1911), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (1907, 1911), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling\n'), ((1927, 1959), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (1933, 1959), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter\n'), ((792, 946), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://openaipublic.azureedge.net/clip/data/country211.tgz"""'], {'sha256': '"""c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c"""'}), "('https://openaipublic.azureedge.net/clip/data/country211.tgz',\n sha256='c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c')\n", (804, 946), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource\n'), ((1771, 1847), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""parent.parent.name"""', 'self._SPLIT_NAME_MAPPER[config.split]'], {}), "('parent.parent.name', self._SPLIT_NAME_MAPPER[config.split])\n", (1786, 1847), False, 'from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling\n'), ((1228, 1246), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1240, 1246), False, 'import pathlib\n'), ((1298, 1355), 'torchvision.prototype.features.Label.from_category', 'Label.from_category', (['category'], {'categories': 'self.categories'}), '(category, categories=self.categories)\n', (1317, 1355), False, 'from torchvision.prototype.features import EncodedImage, Label\n'), ((1398, 1428), 'torchvision.prototype.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (1420, 1428), False, 'from torchvision.prototype.features import EncodedImage, Label\n'), ((1531, 1552), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1543, 1552), False, 'import pathlib\n'), ((2146, 2164), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2158, 2164), False, 'import pathlib\n')] |
import enum
import pdb
import functools
import pathlib
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, cast, Union
from xml.etree import ElementTree
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Filter,
Demultiplexer,
IterKeyZipper,
LineReader,
)
from torchvision.datasets import VOCDetection
from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset
from Dataset4EO.datasets.utils._internal import (
path_accessor,
getitem,
INFINITE_BUFFER_SIZE,
path_comparator,
hint_sharding,
hint_shuffling,
read_categories_file,
)
from Dataset4EO.features import BoundingBox, Label, EncodedImage
from .._api import register_dataset, register_info
NAME = "voc"
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor')
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class VOC(Dataset):
"""
- **homepage**: http://host.robots.ox.ac.uk/pascal/VOC/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
year: str = "2012",
task: str = "segmentation",
skip_integrity_check: bool = False,
) -> None:
self._year = self._verify_str_arg(year, "year", ("2007", "2008", "2009", "2010", "2011", "2012"))
if split == "test" and year != "2007":
raise ValueError("`split='test'` is only available for `year='2007'`")
else:
self._split = self._verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._task = self._verify_str_arg(task, "task", ("detection", "segmentation"))
self._anns_folder = "Annotations" if task == "detection" else "SegmentationClass"
self._split_folder = "Main" if task == "detection" else "Segmentation"
self._categories = _info()["categories"]
self.CLASSES = CLASSES
self.PALETTE = PALETTE
super().__init__(root, skip_integrity_check=skip_integrity_check)
_TRAIN_VAL_ARCHIVES = {
"2007": ("VOCtrainval_06-Nov-2007.tar", "7d8cd951101b0957ddfd7a530bdc8a94f06121cfc1e511bb5937e973020c7508"),
"2008": ("VOCtrainval_14-Jul-2008.tar", "7f0ca53c1b5a838fbe946965fc106c6e86832183240af5c88e3f6c306318d42e"),
"2009": ("VOCtrainval_11-May-2009.tar", "11cbe1741fb5bdadbbca3c08e9ec62cd95c14884845527d50847bc2cf57e7fd6"),
"2010": ("VOCtrainval_03-May-2010.tar", "1af4189cbe44323ab212bff7afbc7d0f55a267cc191eb3aac911037887e5c7d4"),
"2011": ("VOCtrainval_25-May-2011.tar", "0a7f5f5d154f7290ec65ec3f78b72ef72c6d93ff6d79acd40dc222a9ee5248ba"),
"2012": ("VOCtrainval_11-May-2012.tar", "e14f763270cf193d0b5f74b169f44157a4b0c6efa708f4dd0ff78ee691763bcb"),
}
_TEST_ARCHIVES = {
"2007": ("VOCtest_06-Nov-2007.tar", "6836888e2e01dca84577a849d339fa4f73e1e4f135d312430c4856b5609b4892")
}
def _resources(self) -> List[OnlineResource]:
file_name, sha256 = (self._TEST_ARCHIVES if self._split == "test" else self._TRAIN_VAL_ARCHIVES)[self._year]
archive = HttpResource(f"http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}", sha256=sha256)
return [archive]
def _is_in_folder(self, data: Tuple[str, Any], *, name: str, depth: int = 1) -> bool:
path = pathlib.Path(data[0])
return name in path.parent.parts[-depth:]
class _Demux(enum.IntEnum):
SPLIT = 0
IMAGES = 1
ANNS = 2
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
if self._is_in_folder(data, name="ImageSets", depth=2):
return self._Demux.SPLIT
elif self._is_in_folder(data, name="JPEGImages"):
return self._Demux.IMAGES
elif self._is_in_folder(data, name=self._anns_folder):
return self._Demux.ANNS
else:
return None
def _parse_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return cast(Dict[str, Any], VOCDetection.parse_voc_xml(ElementTree.parse(buffer).getroot())["annotation"])
def _prepare_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
anns = self._parse_detection_ann(buffer)
instances = anns["object"]
return dict(
bounding_boxes=BoundingBox(
[
[int(instance["bndbox"][part]) for part in ("xmin", "ymin", "xmax", "ymax")]
for instance in instances
],
format="xyxy",
image_size=cast(Tuple[int, int], tuple(int(anns["size"][dim]) for dim in ("height", "width"))),
),
labels=Label(
[self._categories.index(instance["name"]) for instance in instances], categories=self._categories
),
)
def _prepare_segmentation_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return dict(segmentation=EncodedImage.from_file(buffer))
def _prepare_sample(
self,
data: Tuple[Tuple[Tuple[str, str], Tuple[str, BinaryIO]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, image_data = split_and_image_data
image_path, image_buffer = image_data
ann_path, ann_buffer = ann_data
image_path = pathlib.PosixPath(image_path).name
ann_path = pathlib.PosixPath(ann_path).name
#{'img_info': {'filename': '2009_000801.jpg', 'ann': {'seg_map': '2009_000801.png'}}, 'ann_info': {'seg_map': '2009_000801.png'}}
img_info = dict({'filename':image_path, 'ann':dict({'seg_map':ann_path})})
return img_info
return dict(
(self._prepare_detection_ann if self._task == "detection" else self._prepare_segmentation_ann)(ann_buffer),
image_path=image_path,
image=EncodedImage.from_file(image_buffer),
ann_path=ann_path,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
split_dp, images_dp, anns_dp = Demultiplexer(
archive_dp,
3,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
split_dp = Filter(split_dp, functools.partial(self._is_in_folder, name=self._split_folder))
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True)
split_dp = hint_shuffling(split_dp)
split_dp = hint_sharding(split_dp)
dp = split_dp
for level, data_dp in enumerate((images_dp, anns_dp)):
dp = IterKeyZipper(
dp,
data_dp,
key_fn=getitem(*[0] * level, 1),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
("train", "2007", "detection"): 2_501,
("train", "2007", "segmentation"): 209,
("train", "2008", "detection"): 2_111,
("train", "2008", "segmentation"): 511,
("train", "2009", "detection"): 3_473,
("train", "2009", "segmentation"): 749,
("train", "2010", "detection"): 4_998,
("train", "2010", "segmentation"): 964,
("train", "2011", "detection"): 5_717,
("train", "2011", "segmentation"): 1_112,
("train", "2012", "detection"): 5_717,
("train", "2012", "segmentation"): 1_464,
("val", "2007", "detection"): 2_510,
("val", "2007", "segmentation"): 213,
("val", "2008", "detection"): 2_221,
("val", "2008", "segmentation"): 512,
("val", "2009", "detection"): 3_581,
("val", "2009", "segmentation"): 750,
("val", "2010", "detection"): 5_105,
("val", "2010", "segmentation"): 964,
("val", "2011", "detection"): 5_823,
("val", "2011", "segmentation"): 1_111,
("val", "2012", "detection"): 5_823,
("val", "2012", "segmentation"): 1_449,
("trainval", "2007", "detection"): 5_011,
("trainval", "2007", "segmentation"): 422,
("trainval", "2008", "detection"): 4_332,
("trainval", "2008", "segmentation"): 1_023,
("trainval", "2009", "detection"): 7_054,
("trainval", "2009", "segmentation"): 1_499,
("trainval", "2010", "detection"): 10_103,
("trainval", "2010", "segmentation"): 1_928,
("trainval", "2011", "detection"): 11_540,
("trainval", "2011", "segmentation"): 2_223,
("trainval", "2012", "detection"): 11_540,
("trainval", "2012", "segmentation"): 2_913,
("test", "2007", "detection"): 4_952,
("test", "2007", "segmentation"): 210,
}[(self._split, self._year, self._task)]
def _filter_anns(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == self._Demux.ANNS
def _generate_categories(self) -> List[str]:
self._task = "detection"
resources = self._resources()
archive_dp = resources[0].load(self._root)
dp = Filter(archive_dp, self._filter_anns)
dp = Mapper(dp, self._parse_detection_ann, input_col=1)
categories = sorted({instance["name"] for _, anns in dp for instance in anns["object"]})
# We add a background category to be used during segmentation
categories.insert(0, "__background__")
return categories
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.LineReader",
"torchdata.datapipes.iter.Demultiplexer",
"torchdata.datapipes.iter.Filter"
] | [((3711, 3818), 'Dataset4EO.datasets.utils.HttpResource', 'HttpResource', (['f"""http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}"""'], {'sha256': 'sha256'}), "(\n f'http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}',\n sha256=sha256)\n", (3723, 3818), False, 'from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset\n'), ((3941, 3962), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (3953, 3962), False, 'import pathlib\n'), ((6688, 6794), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['archive_dp', '(3)', 'self._classify_archive'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(archive_dp, 3, self._classify_archive, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (6701, 6794), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((7065, 7098), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['split_dp'], {'decode': '(True)'}), '(split_dp, decode=True)\n', (7075, 7098), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((7118, 7142), 'Dataset4EO.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['split_dp'], {}), '(split_dp)\n', (7132, 7142), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7162, 7185), 'Dataset4EO.datasets.utils._internal.hint_sharding', 'hint_sharding', (['split_dp'], {}), '(split_dp)\n', (7175, 7185), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7527, 7559), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (7533, 7559), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((9955, 9992), 'torchdata.datapipes.iter.Filter', 'Filter', (['archive_dp', 'self._filter_anns'], {}), '(archive_dp, self._filter_anns)\n', (9961, 9992), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((10006, 10056), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._parse_detection_ann'], {'input_col': '(1)'}), '(dp, self._parse_detection_ann, input_col=1)\n', (10012, 10056), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((1465, 1491), 'Dataset4EO.datasets.utils._internal.read_categories_file', 'read_categories_file', (['NAME'], {}), '(NAME)\n', (1485, 1491), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5914, 5943), 'pathlib.PosixPath', 'pathlib.PosixPath', (['image_path'], {}), '(image_path)\n', (5931, 5943), False, 'import pathlib\n'), ((5968, 5995), 'pathlib.PosixPath', 'pathlib.PosixPath', (['ann_path'], {}), '(ann_path)\n', (5985, 5995), False, 'import pathlib\n'), ((6899, 6961), 'functools.partial', 'functools.partial', (['self._is_in_folder'], {'name': 'self._split_folder'}), '(self._is_in_folder, name=self._split_folder)\n', (6916, 6961), False, 'import functools\n'), ((6999, 7044), 'Dataset4EO.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', 'f"""{self._split}.txt"""'], {}), "('name', f'{self._split}.txt')\n", (7014, 7044), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5530, 5560), 'Dataset4EO.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (5552, 5560), False, 'from Dataset4EO.features import BoundingBox, Label, EncodedImage\n'), ((6441, 6477), 'Dataset4EO.features.EncodedImage.from_file', 'EncodedImage.from_file', (['image_buffer'], {}), '(image_buffer)\n', (6463, 6477), False, 'from Dataset4EO.features import BoundingBox, Label, EncodedImage\n'), ((7372, 7398), 'Dataset4EO.datasets.utils._internal.getitem', 'getitem', (['*([0] * level)', '(1)'], {}), '(*([0] * level), 1)\n', (7379, 7398), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7425, 7446), 'Dataset4EO.datasets.utils._internal.path_accessor', 'path_accessor', (['"""stem"""'], {}), "('stem')\n", (7438, 7446), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((4644, 4669), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['buffer'], {}), '(buffer)\n', (4661, 4669), False, 'from xml.etree import ElementTree\n')] |
from torchtext._internal.module_utils import is_module_available
from typing import Union, Tuple
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_add_docstring_header,
_create_dataset_directory,
)
import os
URL = {
'train': "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv",
'test': "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv",
}
MD5 = {
'train': "b1a00f826fdfbd249f79597b59e1dc12",
'test': "d52ea96a97a2d943681189a97654912d",
}
NUM_LINES = {
'train': 120000,
'test': 7600,
}
DATASET_NAME = "AG_NEWS"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=4)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AG_NEWS(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL[split]])
cache_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, split + ".csv"),
hash_dict={os.path.join(root, split + ".csv"): MD5[split]},
hash_type="md5"
)
cache_dp = HttpReader(cache_dp)
cache_dp = cache_dp.end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="r")
return cache_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| [
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.HttpReader"
] | [((101, 133), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (120, 133), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((775, 832), 'torchtext.data.datasets_utils._add_docstring_header', '_add_docstring_header', ([], {'num_lines': 'NUM_LINES', 'num_classes': '(4)'}), '(num_lines=NUM_LINES, num_classes=4)\n', (796, 832), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((834, 886), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (859, 886), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((888, 927), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'test')"], {}), "(('train', 'test'))\n", (908, 927), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((1185, 1214), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL[split]]'], {}), '([URL[split]])\n', (1200, 1214), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1431, 1451), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (1441, 1451), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1537, 1567), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'mode': '"""r"""'}), "(cache_dp, mode='r')\n", (1547, 1567), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((994, 1026), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1013, 1026), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1282, 1316), 'os.path.join', 'os.path.join', (['root', "(split + '.csv')"], {}), "(root, split + '.csv')\n", (1294, 1316), False, 'import os\n'), ((1337, 1371), 'os.path.join', 'os.path.join', (['root', "(split + '.csv')"], {}), "(root, split + '.csv')\n", (1349, 1371), False, 'import os\n')] |
import time
import torch
import torchdata
import torchfunc
from .datasets import ExampleDataset, ExampleIterable
from .utils import artificial_slowdown, enumerate_step, index_is_sample
def test_basic_iterable():
dataset = ExampleIterable(0, 100).map(lambda value: value + 12)
for index, item in enumerate(dataset):
assert index + 12 == item
def test_iterable_filter():
dataset = (
ExampleIterable(0, 100)
.map(lambda value: value + 12)
.filter(lambda elem: elem % 2 == 0)
)
for index, item in enumerate_step(dataset, start=12, step=2):
assert index == item
def test_basic_dataset():
dataset = ExampleDataset(0, 25).map(lambda sample: sample * sample).cache()
for index, value in enumerate(dataset):
assert index ** 2 == value
def test_dataset_multiple_cache():
# Range-like Dataset mapped to item ** 3
dataset = (
ExampleDataset(0, 25)
.cache()
.map(lambda sample: (sample + sample, sample))
.cache()
.map(lambda sample: sample[0] - sample[-1])
.cache()
.map(lambda sample: sample ** 3)
.cache()
)
# Iterate through dataset
for _ in dataset:
pass
for index, value in enumerate(dataset):
assert index ** 3 == value
def test_dataset_cache_speedup():
dataset = ExampleDataset(0, 5).map(artificial_slowdown).cache()
with torchfunc.Timer() as timer:
index_is_sample(dataset)
assert timer.checkpoint() > 5
index_is_sample(dataset)
assert timer.checkpoint() < 0.2
def test_dataset_complicated_cache():
dataset = (
(
(
ExampleDataset(0, 25)
| ExampleDataset(0, 25).map(lambda value: value * -1)
)
.cache()
.map(lambda sample: sample[0] + sample[1] + sample[0])
.cache()
.map(lambda sample: sample + sample)
| ExampleDataset(0, 25)
)
.cache()
.map(lambda values: ((values, values), values))
.map(torchdata.maps.Flatten())
.cache()
.map(lambda values: values[1])
.map(lambda value: value ** 2)
)
for index, value in enumerate(dataset):
assert index ** 2 == value
def test_apply():
def summation(generator):
return sum(value for value in generator)
assert ExampleDataset(0, 101).apply(summation) == 5050 # Returns 5050
def test_reduce():
assert ExampleDataset(0, 10).reduce(lambda x, y: x + y) == 45
def test_reduce_initializer():
assert ExampleDataset(0, 10).reduce(lambda x, y: x + y, 10) == 55
def test_repr():
assert (
repr(ExampleDataset(0, 5))
== "tests.datasets.ExampleDataset(values=[0, 1, 2, 3, 4])"
)
def test_dataset_dataloader():
# Range-like Dataset mapped to item ** 3
dataset = (
ExampleDataset(0, 25)
.cache()
.map(lambda sample: (sample + sample, sample))
.cache()
.map(lambda sample: sample[0] - sample[-1])
.cache()
.map(lambda sample: sample ** 3)
.cache()
)
# Iterate through dataset
for element in torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=3):
print(element)
| [
"torchdata.maps.Flatten"
] | [((3190, 3254), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(True)', 'batch_size': '(3)'}), '(dataset, shuffle=True, batch_size=3)\n', (3217, 3254), False, 'import torch\n'), ((1419, 1436), 'torchfunc.Timer', 'torchfunc.Timer', ([], {}), '()\n', (1434, 1436), False, 'import torchfunc\n'), ((2083, 2107), 'torchdata.maps.Flatten', 'torchdata.maps.Flatten', ([], {}), '()\n', (2105, 2107), False, 'import torchdata\n')] |
from typing import (
Iterator,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
)
import io
import torch
import torch.utils.data.datapipes as dp
from torchdata.datapipes.iter import S3FileLister, S3FileLoader
from torchdata.datapipes.utils import StreamWrapper
from torchrec.datasets.utils import (
LoadFiles,
ReadLinesFromCSV)
from torch.utils.data import IterDataPipe
from torchrec.datasets.criteo import _default_row_mapper
s3_prefixes = ['s3://criteo-dataset/day_0']
dp_s3_urls = S3FileLister(s3_prefixes)
dp_s3_files = S3FileLoader(dp_s3_urls) # outputs in (url, BytesIO)
# more datapipes to convert loaded bytes, e.g.
class LoadWithTextIOWrapper(IterDataPipe):
def __init__(self, paths, **open_kw):
self.paths = paths
self.open_kw: Any = open_kw # pyre-ignore[4]
def __iter__(self) -> Iterator[Any]:
for url, buffer in self.paths:
yield url, io.TextIOWrapper(buffer, encoding='utf-8')
class S3CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example:
>>> datapipe = CriteoIterDataPipe(
>>> ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
>>> )
>>> datapipe = dp.iter.Batcher(datapipe, 100)
>>> datapipe = dp.iter.Collator(datapipe)
>>> batch = next(iter(datapipe))
"""
def __init__(
self,
paths: S3FileLoader,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
# datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = LoadWithTextIOWrapper(paths)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
#print(dp_s3_files)
#datapipe = StreamWrapper(dp_s3_files).parse_csv_files(delimiter=' ')
#for d in datapipe: # Start loading data
datapipe = S3CriteoIterDataPipe(dp_s3_files)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
print(batch.keys())
| [
"torchdata.datapipes.iter.S3FileLoader",
"torchdata.datapipes.iter.S3FileLister"
] | [((520, 545), 'torchdata.datapipes.iter.S3FileLister', 'S3FileLister', (['s3_prefixes'], {}), '(s3_prefixes)\n', (532, 545), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((560, 584), 'torchdata.datapipes.iter.S3FileLoader', 'S3FileLoader', (['dp_s3_urls'], {}), '(dp_s3_urls)\n', (572, 584), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((3173, 3203), 'torch.utils.data.datapipes.iter.Batcher', 'dp.iter.Batcher', (['datapipe', '(100)'], {}), '(datapipe, 100)\n', (3188, 3203), True, 'import torch.utils.data.datapipes as dp\n'), ((3215, 3241), 'torch.utils.data.datapipes.iter.Collator', 'dp.iter.Collator', (['datapipe'], {}), '(datapipe)\n', (3231, 3241), True, 'import torch.utils.data.datapipes as dp\n'), ((2416, 2450), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (2448, 2450), False, 'import torch\n'), ((2821, 2863), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['datapipe'], {'delimiter': '"""\t"""'}), "(datapipe, delimiter='\\t')\n", (2837, 2863), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV\n'), ((2915, 2956), 'torch.utils.data.datapipes.iter.Mapper', 'dp.iter.Mapper', (['datapipe', 'self.row_mapper'], {}), '(datapipe, self.row_mapper)\n', (2929, 2956), True, 'import torch.utils.data.datapipes as dp\n'), ((932, 974), 'io.TextIOWrapper', 'io.TextIOWrapper', (['buffer'], {'encoding': '"""utf-8"""'}), "(buffer, encoding='utf-8')\n", (948, 974), False, 'import io\n')] |
import os
from functools import partial
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, IterableWrapper
from torchtext._download_hooks import HttpReader
URL = {
"train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json",
"dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json",
}
MD5 = {
"train": "981b29407e0affa3b1b156f72073b945",
"dev": "3e85deb501d4e538b6bc56f786231552",
}
NUM_LINES = {
"train": 87599,
"dev": 10570,
}
DATASET_NAME = "SQuAD1"
def _filepath_fn(root, split, _=None):
return os.path.join(root, os.path.basename(URL[split]))
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD1(root: str, split: Union[Tuple[str], str]):
"""SQuAD1 Dataset
For additional details refer to https://rajpurkar.github.io/SQuAD-explorer/
Number of lines per split:
- train: 87599
- dev: 10570
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev`)
:returns: DataPipe that yields data points from SQuaAD1 dataset which consist of context, question, list of answers and corresponding index in context
:rtype: (str, str, list(str), list(int))
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root, split),
hash_dict={_filepath_fn(root, split): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
return cache_dp.parse_json_files().read_squad().shuffle().set_shuffle(False).sharding_filter()
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.FileOpener"
] | [((245, 277), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (264, 277), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((866, 918), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (891, 918), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((920, 958), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev')"], {}), "(('train', 'dev'))\n", (940, 958), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((1850, 1879), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL[split]]'], {}), '([URL[split]])\n', (1865, 1879), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((2203, 2241), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'encoding': '"""utf-8"""'}), "(cache_dp, encoding='utf-8')\n", (2213, 2241), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((833, 861), 'os.path.basename', 'os.path.basename', (['URL[split]'], {}), '(URL[split])\n', (849, 861), False, 'import os\n'), ((1637, 1669), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1656, 1669), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1980, 2014), 'functools.partial', 'partial', (['_filepath_fn', 'root', 'split'], {}), '(_filepath_fn, root, split)\n', (1987, 2014), False, 'from functools import partial\n'), ((2121, 2141), 'torchtext._download_hooks.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (2131, 2141), False, 'from torchtext._download_hooks import HttpReader\n')] |
import progressbar
import torch
from tele.meter import SumMeter
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchdata.mpii import MpiiData
from dsnt.data import MPIIDataset
from dsnt.util import timer, type_as_index, reverse_tensor
def generate_predictions(model, dataset, use_flipped=True, batch_size=1, time_meter=None):
"""Generate predictions with the model"""
if use_flipped:
assert batch_size == 1, 'test-time flip augmentation only work with batch_size=1'
sum_meter = SumMeter()
model.cuda()
model.eval()
loader = DataLoader(dataset, batch_size, num_workers=4, pin_memory=True)
preds = torch.DoubleTensor(len(dataset), 16, 2).zero_()
completed = 0
with progressbar.ProgressBar(max_value=len(dataset)) as bar:
for i, batch in enumerate(loader):
batch_size = batch['input'].size(0)
sum_meter.reset()
with timer(sum_meter):
if use_flipped:
sample = batch['input']
rev_sample = reverse_tensor(batch['input'], -1)
in_var = Variable(torch.cat([sample, rev_sample], 0).cuda(), volatile=True)
hm_var = model.forward_part1(in_var)
if isinstance(hm_var, list):
# Just use the last heatmap from stacked hourglass
hm_var = hm_var[-1]
hm1, hm2 = hm_var.split(1)
hm2 = reverse_tensor(hm2, -1)
hm2 = hm2.index_select(-3, type_as_index(MPIIDataset.HFLIP_INDICES, hm2))
hm = (hm1 + hm2) / 2
out_var = model.forward_part2(hm)
coords = model.compute_coords(out_var)
else:
in_var = Variable(batch['input'].cuda(), volatile=True)
out_var = model(in_var)
coords = model.compute_coords(out_var)
orig_preds = torch.baddbmm(
batch['transform_b'],
coords.double(),
batch['transform_m'])
pos = i * batch_size
preds[pos:(pos + batch_size)] = orig_preds
if time_meter is not None:
time_meter.add(sum_meter.value())
completed += batch_size
bar.update(completed)
return preds
def evaluate_mpii_predictions(preds, subset, evaluator):
mpii_data = MpiiData('/datasets/mpii')
subset_indices = mpii_data.subset_indices(subset)
actual = torch.from_numpy(mpii_data.keypoints[subset_indices])
head_lengths = torch.from_numpy(mpii_data.head_lengths[subset_indices])
joint_mask = torch.from_numpy(mpii_data.keypoint_masks[subset_indices])
# Calculate PCKh accuracies
evaluator.add(preds, actual, joint_mask, head_lengths)
return evaluator
| [
"torchdata.mpii.MpiiData"
] | [((537, 547), 'tele.meter.SumMeter', 'SumMeter', ([], {}), '()\n', (545, 547), False, 'from tele.meter import SumMeter\n'), ((597, 660), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'num_workers': '(4)', 'pin_memory': '(True)'}), '(dataset, batch_size, num_workers=4, pin_memory=True)\n', (607, 660), False, 'from torch.utils.data import DataLoader\n'), ((2484, 2510), 'torchdata.mpii.MpiiData', 'MpiiData', (['"""/datasets/mpii"""'], {}), "('/datasets/mpii')\n", (2492, 2510), False, 'from torchdata.mpii import MpiiData\n'), ((2579, 2632), 'torch.from_numpy', 'torch.from_numpy', (['mpii_data.keypoints[subset_indices]'], {}), '(mpii_data.keypoints[subset_indices])\n', (2595, 2632), False, 'import torch\n'), ((2652, 2708), 'torch.from_numpy', 'torch.from_numpy', (['mpii_data.head_lengths[subset_indices]'], {}), '(mpii_data.head_lengths[subset_indices])\n', (2668, 2708), False, 'import torch\n'), ((2726, 2784), 'torch.from_numpy', 'torch.from_numpy', (['mpii_data.keypoint_masks[subset_indices]'], {}), '(mpii_data.keypoint_masks[subset_indices])\n', (2742, 2784), False, 'import torch\n'), ((944, 960), 'dsnt.util.timer', 'timer', (['sum_meter'], {}), '(sum_meter)\n', (949, 960), False, 'from dsnt.util import timer, type_as_index, reverse_tensor\n'), ((1071, 1105), 'dsnt.util.reverse_tensor', 'reverse_tensor', (["batch['input']", '(-1)'], {}), "(batch['input'], -1)\n", (1085, 1105), False, 'from dsnt.util import timer, type_as_index, reverse_tensor\n'), ((1501, 1524), 'dsnt.util.reverse_tensor', 'reverse_tensor', (['hm2', '(-1)'], {}), '(hm2, -1)\n', (1515, 1524), False, 'from dsnt.util import timer, type_as_index, reverse_tensor\n'), ((1572, 1617), 'dsnt.util.type_as_index', 'type_as_index', (['MPIIDataset.HFLIP_INDICES', 'hm2'], {}), '(MPIIDataset.HFLIP_INDICES, hm2)\n', (1585, 1617), False, 'from dsnt.util import timer, type_as_index, reverse_tensor\n'), ((1144, 1178), 'torch.cat', 'torch.cat', (['[sample, rev_sample]', '(0)'], {}), '([sample, rev_sample], 0)\n', (1153, 1178), False, 'import torch\n')] |
import os
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
URL = {
"train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json",
"dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json",
}
MD5 = {
"train": "62108c273c268d70893182d5cf8df740",
"dev": "246adae8b7002f8679c027697b0b7cf8",
}
NUM_LINES = {
"train": 130319,
"dev": 11873,
}
DATASET_NAME = "SQuAD2"
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD2(root: str, split: Union[Tuple[str], str]):
"""SQuAD2 Dataset
For additional details refer to https://rajpurkar.github.io/SQuAD-explorer/
Number of lines per split:
- train: 130319
- dev: 11873
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev`)
:returns: DataPipe that yields data points from SQuaAD1 dataset which consist of context, question, list of answers and corresponding index in context
:rtype: (str, str, list(str), list(int))
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL[split])): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
return cache_dp.parse_json_files().read_squad()
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.HttpReader",
"torchdata.datapipes.iter.FileOpener"
] | [((215, 247), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (234, 247), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((696, 748), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (721, 748), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((750, 788), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev')"], {}), "(('train', 'dev'))\n", (770, 788), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _create_dataset_directory\n'), ((1682, 1711), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL[split]]'], {}), '([URL[split]])\n', (1697, 1711), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((2073, 2103), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'mode': '"""b"""'}), "(cache_dp, mode='b')\n", (2083, 2103), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1469, 1501), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1488, 1501), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1991, 2011), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (2001, 2011), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1841, 1860), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1857, 1860), False, 'import os\n'), ((1901, 1929), 'os.path.basename', 'os.path.basename', (['URL[split]'], {}), '(URL[split])\n', (1917, 1929), False, 'import os\n')] |
from torchtext._internal.module_utils import is_module_available
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
import os
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_add_docstring_header,
_create_dataset_directory,
)
from typing import Union, Tuple
URL = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'
MD5 = '542ccefacc6c27f945fb54453812b3cd'
NUM_LINES = {
'train': 36718,
'valid': 3760,
'test': 4358,
}
DATASET_NAME = "WikiText2"
_EXTRACTED_FILES = {
'train': os.path.join('wikitext-2', 'wiki.train.tokens'),
'test': os.path.join('wikitext-2', 'wiki.test.tokens'),
'valid': os.path.join('wikitext-2', 'wiki.valid.tokens'),
}
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'valid', 'test'))
def WikiText2(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL])
# cache data on-disk
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL)): MD5},
hash_type="md5",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split]))
# Extract zip and filter the appropriate split file
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b").read_from_zip().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, mode='b')
return data_dp.readlines(strip_newline=False, decode=True, return_path=False)
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.HttpReader",
"torchdata.datapipes.iter.FileOpener"
] | [((69, 101), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (88, 101), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((794, 836), 'torchtext.data.datasets_utils._add_docstring_header', '_add_docstring_header', ([], {'num_lines': 'NUM_LINES'}), '(num_lines=NUM_LINES)\n', (815, 836), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((838, 890), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (863, 890), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((892, 940), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'valid', 'test')"], {}), "(('train', 'valid', 'test'))\n", (912, 940), False, 'from torchtext.data.datasets_utils import _wrap_split_argument, _add_docstring_header, _create_dataset_directory\n'), ((618, 665), 'os.path.join', 'os.path.join', (['"""wikitext-2"""', '"""wiki.train.tokens"""'], {}), "('wikitext-2', 'wiki.train.tokens')\n", (630, 665), False, 'import os\n'), ((679, 725), 'os.path.join', 'os.path.join', (['"""wikitext-2"""', '"""wiki.test.tokens"""'], {}), "('wikitext-2', 'wiki.test.tokens')\n", (691, 725), False, 'import os\n'), ((740, 787), 'os.path.join', 'os.path.join', (['"""wikitext-2"""', '"""wiki.valid.tokens"""'], {}), "('wikitext-2', 'wiki.valid.tokens')\n", (752, 787), False, 'import os\n'), ((1199, 1221), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (1214, 1221), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((2002, 2045), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (2012, 2045), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1009, 1041), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1028, 1041), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1491, 1522), 'torchdata.datapipes.iter.HttpReader', 'HttpReader', (['cache_compressed_dp'], {}), '(cache_compressed_dp)\n', (1501, 1522), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n'), ((1653, 1696), 'os.path.join', 'os.path.join', (['root', '_EXTRACTED_FILES[split]'], {}), '(root, _EXTRACTED_FILES[split])\n', (1665, 1696), False, 'import os\n'), ((1344, 1363), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1360, 1363), False, 'import os\n'), ((1404, 1425), 'os.path.basename', 'os.path.basename', (['URL'], {}), '(URL)\n', (1420, 1425), False, 'import os\n'), ((1782, 1825), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (1792, 1825), False, 'from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper\n')] |
import io
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple, Iterator
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
OnlineResource,
GDriveResource,
)
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
)
from torchvision.prototype.features import Label
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
class PCAM(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"pcam",
homepage="https://github.com/basveeling/pcam",
categories=2,
valid_options=dict(split=("train", "test", "val")),
dependencies=["h5py"],
)
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[config.split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": features.Image(image.transpose(2, 0, 1)),
"label": Label(target.item()),
}
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Zipper"
] | [((1123, 1184), 'collections.namedtuple', 'namedtuple', (['"""_Resource"""', "('file_name', 'gdrive_id', 'sha256')"], {}), "('_Resource', ('file_name', 'gdrive_id', 'sha256'))\n", (1133, 1184), False, 'from collections import namedtuple\n'), ((4149, 4178), 'torchdata.datapipes.iter.Zipper', 'Zipper', (['images_dp', 'targets_dp'], {}), '(images_dp, targets_dp)\n', (4155, 4178), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper\n'), ((4192, 4210), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['dp'], {}), '(dp)\n', (4206, 4210), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((4224, 4241), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (4237, 4241), False, 'from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling\n'), ((4257, 4289), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (4263, 4289), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper\n'), ((3386, 3480), 'torchvision.prototype.datasets.utils.GDriveResource', 'GDriveResource', ([], {'file_name': 'file_name', 'id': 'gdrive_id', 'sha256': 'sha256', 'preprocess': '"""decompress"""'}), "(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess\n ='decompress')\n", (3400, 3480), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, OnlineResource, GDriveResource\n'), ((967, 984), 'h5py.File', 'h5py.File', (['handle'], {}), '(handle)\n', (976, 984), False, 'import h5py\n')] |
import os
from functools import partial
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import _create_dataset_directory
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, IterableWrapper
from torchtext._download_hooks import HttpReader
URL = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
MD5 = "b6d5672bd9dc1e66ab2bb020ebeafb8d"
_PATH = "quora_duplicate_questions.tsv"
NUM_LINES = {"train": 404290}
DATASET_NAME = "QQP"
def _filepath_fn(root, _=None):
return os.path.join(root, _PATH)
def _modify_res(x):
return (int(x[-1]), x[3], x[4])
@_create_dataset_directory(dataset_name=DATASET_NAME)
def QQP(root: str):
"""QQP dataset
For additional details refer to https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
:returns: DataPipe that yields rows from QQP dataset (label (int), question1 (str), question2 (str))
:rtype: (int, str, str)
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root),
hash_dict={_filepath_fn(root): MD5},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
# some context stored at top of the file needs to be removed
parsed_data = cache_dp.parse_csv(skip_lines=1, delimiter="\t").map(_modify_res)
return parsed_data.shuffle().set_shuffle(False).sharding_filter()
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.FileOpener"
] | [((178, 210), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (197, 210), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((668, 720), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (693, 720), False, 'from torchtext.data.datasets_utils import _create_dataset_directory\n'), ((581, 606), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (593, 606), False, 'import os\n'), ((1348, 1370), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (1363, 1370), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((1630, 1668), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_dp'], {'encoding': '"""utf-8"""'}), "(cache_dp, encoding='utf-8')\n", (1640, 1668), False, 'from torchdata.datapipes.iter import FileOpener, IterableWrapper\n'), ((1135, 1167), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1154, 1167), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1428, 1455), 'functools.partial', 'partial', (['_filepath_fn', 'root'], {}), '(_filepath_fn, root)\n', (1435, 1455), False, 'from functools import partial\n'), ((1548, 1568), 'torchtext._download_hooks.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (1558, 1568), False, 'from torchtext._download_hooks import HttpReader\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from torch.utils.data.dataset import IterableDataset
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_add_docstring_header,
_create_dataset_directory,
_wrap_split_argument,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import IterableWrapper, FileLoader
# we import HttpReader from _download_hooks so we can swap out public URLs
# with interal URLs when the dataset is used within Facebook
from torchtext._download_hooks import HttpReader
NUM_LINES = {
"train": 67349,
"dev": 872,
"test": 1821,
}
MD5 = "9f81648d4199384278b86e315dac217c"
URL = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip"
_PATH = "SST-2.zip"
_EXTRACTED_FILES = {
"train": f"{os.sep}".join([_PATH, "SST-2", "train.tsv"]),
"dev": f"{os.sep}".join([_PATH, "SST-2", "dev.tsv"]),
"test": f"{os.sep}".join([_PATH, "SST-2", "test.tsv"]),
}
_EXTRACTED_FILES_MD5 = {
"train": "da409a0a939379ed32a470bc0f7fe99a",
"dev": "268856b487b2a31a28c0a93daaff7288",
"test": "3230e4efec76488b87877a56ae49675a",
}
_FIRST_LINE_MD5 = {
"train": "2552b8cecd57b2e022ef23411c688fa8",
"dev": "1b0ffd6aa5f2bf0fd9840a5f6f1a9f07",
"test": "f838c81fe40bfcd7e42e9ffc4dd004f7",
}
DATASET_NAME = "SST2"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev", "test"))
def SST2(root, split, validate_hash=True):
return SST2Dataset(root, split, validate_hash=validate_hash)
class SST2Dataset(IterableDataset):
"""The SST2 dataset uses torchdata datapipes end-2-end.
To avoid download at every epoch, we cache the data on-disk
We do sanity check on dowloaded and extracted data
"""
def __init__(self, root, split, validate_hash=True):
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` is required to be installed to use this dataset."
"Please refer to https://github.com/pytorch/data for instructions on "
"how to install the package."
)
self._dp = self._get_datapipe(root, split, validate_hash)
def __iter__(self):
for data in self._dp:
yield data
def _get_datapipe(self, root, split, validate_hash):
# Validate integrity of dataset using md5 checksum
hash_dict = {os.path.join(root, "SST-2.zip"): MD5} if validate_hash else None
hash_type = "md5" if validate_hash else None
# cache data on-disk
cache_dp = IterableWrapper([URL]).on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict=hash_dict,
hash_type=hash_type,
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
# Load from cached file
cache_dp = FileLoader(cache_dp, mode="rb")
# extract data from zip
extracted_files = cache_dp.read_from_zip().filter(
lambda x: f"{split}.tsv" in x[0]
)
# Parse CSV file and yield data samples
return extracted_files.parse_csv(skip_lines=1, delimiter="\t").map(
lambda x: (x[0], x[1])
)
| [
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.FileLoader"
] | [((314, 346), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (333, 346), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((1378, 1435), 'torchtext.data.datasets_utils._add_docstring_header', '_add_docstring_header', ([], {'num_lines': 'NUM_LINES', 'num_classes': '(2)'}), '(num_lines=NUM_LINES, num_classes=2)\n', (1399, 1435), False, 'from torchtext.data.datasets_utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument\n'), ((1437, 1489), 'torchtext.data.datasets_utils._create_dataset_directory', '_create_dataset_directory', ([], {'dataset_name': 'DATASET_NAME'}), '(dataset_name=DATASET_NAME)\n', (1462, 1489), False, 'from torchtext.data.datasets_utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument\n'), ((1491, 1537), 'torchtext.data.datasets_utils._wrap_split_argument', '_wrap_split_argument', (["('train', 'dev', 'test')"], {}), "(('train', 'dev', 'test'))\n", (1511, 1537), False, 'from torchtext.data.datasets_utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument\n'), ((3028, 3059), 'torchdata.datapipes.iter.FileLoader', 'FileLoader', (['cache_dp'], {'mode': '"""rb"""'}), "(cache_dp, mode='rb')\n", (3038, 3059), False, 'from torchdata.datapipes.iter import IterableWrapper, FileLoader\n'), ((1944, 1976), 'torchtext._internal.module_utils.is_module_available', 'is_module_available', (['"""torchdata"""'], {}), "('torchdata')\n", (1963, 1976), False, 'from torchtext._internal.module_utils import is_module_available\n'), ((2534, 2565), 'os.path.join', 'os.path.join', (['root', '"""SST-2.zip"""'], {}), "(root, 'SST-2.zip')\n", (2546, 2565), False, 'import os\n'), ((2701, 2723), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (2716, 2723), False, 'from torchdata.datapipes.iter import IterableWrapper, FileLoader\n'), ((2909, 2929), 'torchtext._download_hooks.HttpReader', 'HttpReader', (['cache_dp'], {}), '(cache_dp)\n', (2919, 2929), False, 'from torchtext._download_hooks import HttpReader\n'), ((2792, 2811), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (2808, 2811), False, 'import os\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper
from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument
URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
MD5 = "fe39f8b653cada45afd5792e0f0e8f9b"
NUM_LINES = {
"train": 3600000,
"test": 400000,
}
_PATH = "amazon_review_polarity_csv.tar.gz"
_EXTRACTED_FILES = {
"train": os.path.join("amazon_review_polarity_csv", "train.csv"),
"test": os.path.join("amazon_review_polarity_csv", "test.csv"),
}
DATASET_NAME = "AmazonReviewPolarity"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AmazonReviewPolarity(root, split):
"""Demonstrating caching, extraction and sanity check pipelines."""
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _PATH), hash_dict={os.path.join(root, _PATH): MD5}, hash_type="md5"
)
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split])
)
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").read_from_tar().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, mode="b")
return data_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| [
"torchdata.datapipes.iter.GDriveReader",
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.IterableWrapper"
] | [((499, 554), 'os.path.join', 'os.path.join', (['"""amazon_review_polarity_csv"""', '"""train.csv"""'], {}), "('amazon_review_polarity_csv', 'train.csv')\n", (511, 554), False, 'import os\n'), ((568, 622), 'os.path.join', 'os.path.join', (['"""amazon_review_polarity_csv"""', '"""test.csv"""'], {}), "('amazon_review_polarity_csv', 'test.csv')\n", (580, 622), False, 'import os\n'), ((946, 968), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[URL]'], {}), '([URL])\n', (961, 968), False, 'from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper\n'), ((1655, 1698), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (1665, 1698), False, 'from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper\n'), ((1166, 1199), 'torchdata.datapipes.iter.GDriveReader', 'GDriveReader', (['cache_compressed_dp'], {}), '(cache_compressed_dp)\n', (1178, 1199), False, 'from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper\n'), ((1047, 1072), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (1059, 1072), False, 'import os\n'), ((1085, 1110), 'os.path.join', 'os.path.join', (['root', '_PATH'], {}), '(root, _PATH)\n', (1097, 1110), False, 'import os\n'), ((1340, 1383), 'os.path.join', 'os.path.join', (['root', '_EXTRACTED_FILES[split]'], {}), '(root, _EXTRACTED_FILES[split])\n', (1352, 1383), False, 'import os\n'), ((1428, 1471), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['cache_decompressed_dp'], {'mode': '"""b"""'}), "(cache_decompressed_dp, mode='b')\n", (1438, 1471), False, 'from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from io import BytesIO
from typing import Iterator, Tuple
import torchdata
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
@functional_datapipe("list_file_by_s3")
class S3FileListerIterDataPipe(IterDataPipe[str]):
r"""
Iterable DataPipe that lists Amazon S3 file URLs with the given prefixes (functional name: ``list_file_by_s3``).
Acceptable prefixes include ``s3://bucket-name``, ``s3://bucket-name/``, ``s3://bucket-name/folder``,
``s3://bucket-name/folder/``, and ``s3://bucket-name/prefix``. You may also set ``length``, ``request_timeout_ms``
(default 3000 ms in aws-sdk-cpp), and ``region``.
Note:
1. Input **must** be a list and direct S3 URLs are skipped.
2. ``length`` is `-1` by default, and any call to ``__len__()`` is invalid, because the length is unknown
until all files are iterated.
3. ``request_timeout_ms`` and ``region`` will overwrite settings in the configuration file or
environment variables.
4. AWS_CPP_SDK is necessary to use the S3 DataPipe(s).
Args:
source_datapipe: a DataPipe that contains URLs/URL prefixes to s3 files
length: Nominal length of the datapipe
requestTimeoutMs: optional, overwrite the default timeout setting for this datapipe
region: optional, overwrite the default region inferred from credentials for this datapipe
Example:
>>> from torchdata.datapipes.iter import S3FileLister, S3FileLoader
>>> s3_prefixes = ['s3://bucket-name/folder/', ...]
>>> dp_s3_urls = S3FileLister(s3_prefixes)
>>> dp_s3_files = S3FileLoader(s3_urls) # outputs in (url, StreamWrapper(BytesIO))
>>> # more datapipes to convert loaded bytes, e.g.
>>> datapipe = dp_s3_files.parse_csv(delimiter=' ')
>>> for d in datapipe: # Start loading data
... pass
"""
def __init__(self, source_datapipe: IterDataPipe[str], length: int = -1, request_timeout_ms=-1, region="") -> None:
if not hasattr(torchdata, "_torchdata") or not hasattr(torchdata._torchdata, "S3Handler"):
raise ModuleNotFoundError("Torchdata must be built with BUILD_S3=1 to use this datapipe.")
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.length: int = length
self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region)
def __iter__(self) -> Iterator[str]:
for prefix in self.source_datapipe:
while True:
urls = self.handler.list_files(prefix)
yield from urls
if not urls:
break
self.handler.clear_marker()
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
@functional_datapipe("load_file_by_s3")
class S3FileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Iterable DataPipe that loads Amazon S3 files from the given S3 URLs (functional name: ``load_file_by_s3``).
``S3FileLoader`` iterates all given S3 URLs in ``BytesIO`` format with ``(url, BytesIO)`` tuples.
You may also set ``request_timeout_ms`` (default 3000 ms in aws-sdk-cpp), ``region``,
``buffer_size`` (default 120Mb), and ``multi_part_download`` (default to use multi-part downloading).
Note:
1. Input **must** be a list and S3 URLs must be valid.
2. ``request_timeout_ms`` and ``region`` will overwrite settings in the
configuration file or environment variables.
3. AWS_CPP_SDK is necessary to use the S3 DataPipe(s).
Args:
source_datapipe: a DataPipe that contains URLs to s3 files
requestTimeoutMs: optional, overwrite the default timeout setting for this datapipe
region: optional, overwrite the default region inferred from credentials for this datapipe
Example:
>>> from torchdata.datapipes.iter import S3FileLister, S3FileLoader
>>> s3_prefixes = ['s3://bucket-name/folder/', ...]
>>> dp_s3_urls = S3FileLister(s3_prefixes)
>>> dp_s3_files = S3FileLoader(s3_urls) # outputs in (url, StreamWrapper(BytesIO))
>>> # more datapipes to convert loaded bytes, e.g.
>>> datapipe = dp_s3_files.parse_csv(delimiter=' ')
>>> for d in datapipe: # Start loading data
... pass
"""
def __init__(
self,
source_datapipe: IterDataPipe[str],
request_timeout_ms=-1,
region="",
buffer_size=None,
multi_part_download=None,
) -> None:
if not hasattr(torchdata, "_torchdata") or not hasattr(torchdata._torchdata, "S3Handler"):
raise ModuleNotFoundError("Torchdata must be built with BUILD_S3=1 to use this datapipe.")
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region)
if buffer_size:
self.handler.set_buffer_size(buffer_size)
if multi_part_download:
self.handler.set_multi_part_download(multi_part_download)
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
yield url, StreamWrapper(BytesIO(self.handler.s3_read(url)))
def __len__(self) -> int:
return len(self.source_datapipe)
| [
"torchdata._torchdata.S3Handler",
"torchdata.datapipes.functional_datapipe"
] | [((442, 480), 'torchdata.datapipes.functional_datapipe', 'functional_datapipe', (['"""list_file_by_s3"""'], {}), "('list_file_by_s3')\n", (461, 480), False, 'from torchdata.datapipes import functional_datapipe\n'), ((3172, 3210), 'torchdata.datapipes.functional_datapipe', 'functional_datapipe', (['"""load_file_by_s3"""'], {}), "('load_file_by_s3')\n", (3191, 3210), False, 'from torchdata.datapipes import functional_datapipe\n'), ((2641, 2699), 'torchdata._torchdata.S3Handler', 'torchdata._torchdata.S3Handler', (['request_timeout_ms', 'region'], {}), '(request_timeout_ms, region)\n', (2671, 2699), False, 'import torchdata\n'), ((5233, 5291), 'torchdata._torchdata.S3Handler', 'torchdata._torchdata.S3Handler', (['request_timeout_ms', 'region'], {}), '(request_timeout_ms, region)\n', (5263, 5291), False, 'import torchdata\n')] |
import enum
import functools
import io
import pathlib
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Shuffler,
Filter,
IterKeyZipper,
Demultiplexer,
LineReader,
CSVParser,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
hint_sharding,
path_comparator,
getitem,
)
from torchvision.prototype.features import Label
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
class DTD(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"dtd",
type=DatasetType.IMAGE,
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
valid_options=dict(
split=("train", "test", "val"),
fold=tuple(str(fold) for fold in range(1, 11)),
),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
decompress=True,
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[str, List[str]], Tuple[str, io.IOBase]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label(self.info.categories.index(category), category=category),
path=path,
image=decoder(buffer) if decoder else buffer,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{config.split}{config.fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self, root: pathlib.Path) -> List[str]:
resources = self.resources(self.default_config)
dp = resources[0].load(root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
| [
"torchdata.datapipes.iter.Filter",
"torchdata.datapipes.iter.Shuffler",
"torchdata.datapipes.iter.LineReader",
"torchdata.datapipes.iter.CSVParser",
"torchdata.datapipes.iter.Demultiplexer"
] | [((1203, 1397), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"""'], {'sha256': '"""e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205"""', 'decompress': '(True)'}), "(\n 'https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz',\n sha256=\n 'e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205',\n decompress=True)\n", (1215, 1397), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource, DatasetType\n'), ((1541, 1562), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1553, 1562), False, 'import pathlib\n'), ((1936, 1957), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1948, 1957), False, 'import pathlib\n'), ((3130, 3236), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['archive_dp', '(3)', 'self._classify_archive'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(archive_dp, 3, self._classify_archive, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (3143, 3236), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((3375, 3428), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['splits_dp'], {'decode': '(True)', 'return_path': '(False)'}), '(splits_dp, decode=True, return_path=False)\n', (3385, 3428), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((3449, 3502), 'torchdata.datapipes.iter.Shuffler', 'Shuffler', (['splits_dp'], {'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)\n', (3457, 3502), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((3523, 3547), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['splits_dp'], {}), '(splits_dp)\n', (3536, 3547), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((3579, 3624), 'torchdata.datapipes.iter.CSVParser', 'CSVParser', (['joint_categories_dp'], {'delimiter': '""" """'}), "(joint_categories_dp, delimiter=' ')\n", (3588, 3624), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((4425, 4456), 'torchdata.datapipes.iter.Filter', 'Filter', (['dp', 'self._filter_images'], {}), '(dp, self._filter_images)\n', (4431, 4456), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser\n'), ((3294, 3353), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', 'f"""{config.split}{config.fold}.txt"""'], {}), "('name', f'{config.split}{config.fold}.txt')\n", (3309, 3353), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((4054, 4121), 'functools.partial', 'functools.partial', (['self._collate_and_decode_sample'], {'decoder': 'decoder'}), '(self._collate_and_decode_sample, decoder=decoder)\n', (4071, 4121), False, 'import functools\n'), ((2502, 2520), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2514, 2520), False, 'import pathlib\n'), ((3729, 3738), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', ([], {}), '()\n', (3736, 3738), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((3763, 3773), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(0)'], {}), '(0)\n', (3770, 3773), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((3917, 3927), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(0)'], {}), '(0)\n', (3924, 3927), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, path_comparator, getitem\n'), ((4481, 4499), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (4493, 4499), False, 'import pathlib\n')] |
import functools
import io
import os
import os.path
import pathlib
from typing import Callable, Optional, Collection
from typing import Union, Tuple, List, Dict, Any
import torch
from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter
from torchvision.prototype.datasets.decoder import pil
from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _collate_and_decode_data(
data: Tuple[str, io.IOBase],
*,
root: pathlib.Path,
categories: List[str],
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
path, buffer = data
data = decoder(buffer) if decoder else buffer
category = pathlib.Path(path).relative_to(root).parts[0]
label = torch.tensor(categories.index(category))
return dict(
path=path,
data=data,
label=label,
category=category,
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]] = None,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = Shuffler(dp, buffer_size=INFINITE_BUFFER_SIZE)
dp = FileOpener(dp, mode="rb")
return (
Mapper(dp, functools.partial(_collate_and_decode_data, root=root, categories=categories, decoder=decoder)),
categories,
)
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
sample["image"] = sample.pop("data")
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]] = pil,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, decoder=decoder, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
| [
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Shuffler",
"torchdata.datapipes.iter.FileOpener"
] | [((1848, 1865), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['dp'], {}), '(dp)\n', (1861, 1865), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding\n'), ((1875, 1921), 'torchdata.datapipes.iter.Shuffler', 'Shuffler', (['dp'], {'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(dp, buffer_size=INFINITE_BUFFER_SIZE)\n', (1883, 1921), False, 'from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter\n'), ((1931, 1956), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['dp'], {'mode': '"""rb"""'}), "(dp, mode='rb')\n", (1941, 1956), False, 'from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter\n'), ((1785, 1837), 'functools.partial', 'functools.partial', (['_is_not_top_level_file'], {'root': 'root'}), '(_is_not_top_level_file, root=root)\n', (1802, 1837), False, 'import functools\n'), ((2773, 2803), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', '_data_to_image_key'], {}), '(dp, _data_to_image_key)\n', (2779, 2803), False, 'from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter\n'), ((571, 589), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (583, 589), False, 'import pathlib\n'), ((659, 676), 'pathlib.Path', 'pathlib.Path', (['"""."""'], {}), "('.')\n", (671, 676), False, 'import pathlib\n'), ((1989, 2088), 'functools.partial', 'functools.partial', (['_collate_and_decode_data'], {'root': 'root', 'categories': 'categories', 'decoder': 'decoder'}), '(_collate_and_decode_data, root=root, categories=\n categories, decoder=decoder)\n', (2006, 2088), False, 'import functools\n'), ((1534, 1550), 'os.scandir', 'os.scandir', (['root'], {}), '(root)\n', (1544, 1550), False, 'import os\n'), ((970, 988), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (982, 988), False, 'import pathlib\n'), ((1444, 1462), 'pathlib.Path', 'pathlib.Path', (['root'], {}), '(root)\n', (1456, 1462), False, 'import pathlib\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import warnings
from functools import partial
import expecttest
import numpy as np
import torch
from _utils._common_utils_for_test import reset_after_n_next_calls
from torchdata.datapipes.iter import (
FileLister,
FileOpener,
FSSpecFileLister,
FSSpecFileOpener,
FSSpecSaver,
IterableWrapper,
TFRecordLoader,
)
class TestDataPipeTFRecord(expecttest.TestCase):
def setUp(self):
self.temp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "_fakedata", "tfrecord")
def assertArrayEqual(self, arr1, arr2):
np.testing.assert_array_equal(arr1, arr2)
def _ground_truth_data(self):
for i in range(4):
x = torch.range(i * 10, (i + 1) * 10 - 1)
yield {
"x_float": x,
"x_int": (x * 10).long(),
"x_byte": [b"test str"],
}
def _ground_truth_seq_data(self):
for i in range(4):
x = torch.range(i * 10, (i + 1) * 10 - 1)
rep = 2 * i + 3
yield {"x_float": x, "x_int": (x * 10).long(), "x_byte": [b"test str"]}, {
"x_float_seq": [x] * rep,
"x_int_seq": [(x * 10).long()] * rep,
"x_byte_seq": [[b"test str"]] * rep,
}
@torch.no_grad()
def test_tfrecord_loader_example_iterdatapipe(self):
filename = f"{self.temp_dir}/example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = final_expected_res = list(self._ground_truth_data())
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(loaded_data["x_byte"]), 1)
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
# Functional Test: test if the shape of the returned data is correct when using spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"].reshape(5, 2),
"x_int": x["x_int"].reshape(5, 2),
"x_byte": x["x_byte"][0],
}
for x in self._ground_truth_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
self.assertArrayEqual(true_data["x_int"].numpy(), loaded_data["x_int"].long().numpy())
self.assertEqual(loaded_data["x_float"].dtype, torch.float64)
self.assertEqual(loaded_data["x_int"].dtype, torch.int32)
self.assertEqual(true_data["x_byte"], loaded_data["x_byte"])
# Functional Test: ignore features missing from spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((10,), torch.float32),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"],
}
for x in self._ground_truth_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float_unknown": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for true_data, loaded_data in zip(expected_res[:n_elements_before_reset], res_before_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
self.assertEqual(len(expected_res), len(res_after_reset))
for true_data, loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "doesn't have valid length"):
len(tfrecord_parser)
@torch.no_grad()
def test_tfrecord_loader_sequence_example_iterdatapipe(self):
filename = f"{self.temp_dir}/sequence_example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = final_expected_res = list(self._ground_truth_seq_data())
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# Functional Test: test if the shape of the returned data is correct when using spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
"x_float_seq": ((-1, 5, 2), torch.float64),
"x_int_seq": ((-1, 5, 2), torch.int32),
"x_byte_seq": ((-1,), None),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
(
{
"x_float": x["x_float"].reshape(5, 2),
"x_int": x["x_int"].reshape(5, 2),
"x_byte": x["x_byte"][0],
},
{
"x_float_seq": [y.reshape(5, 2).numpy() for y in z["x_float_seq"]],
"x_int_seq": [y.reshape(5, 2).numpy() for y in z["x_int_seq"]],
"x_byte_seq": [y[0] for y in z["x_byte_seq"]],
},
)
for x, z in self._ground_truth_seq_data()
]
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
l_loaded_data = loaded_data[key]
if key == "x_float":
l_loaded_data = l_loaded_data.float()
else:
l_loaded_data = l_loaded_data.int()
self.assertArrayEqual(true_data_ctx[key].numpy(), l_loaded_data.numpy())
self.assertArrayEqual(true_data_seq[key + "_seq"], loaded_data[key + "_seq"])
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# Functional Test: ignore features missing from spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((10,), torch.float32),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"],
}
for x, z in self._ground_truth_seq_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{"x_float_unknown": ((5, 2), torch.float64), "x_int": ((5, 2), torch.int32), "x_byte": None}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for (true_data_ctx, true_data_seq), loaded_data in zip(
expected_res[:n_elements_before_reset], res_before_reset
):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
self.assertEqual(len(expected_res), len(res_after_reset))
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "doesn't have valid length"):
len(tfrecord_parser)
if __name__ == "__main__":
unittest.main()
| [
"torchdata.datapipes.iter.TFRecordLoader",
"torchdata.datapipes.iter.IterableWrapper",
"torchdata.datapipes.iter.FileOpener"
] | [((1515, 1530), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1528, 1530), False, 'import torch\n'), ((6173, 6188), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6186, 6188), False, 'import torch\n'), ((12867, 12882), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12880, 12882), False, 'import unittest\n'), ((806, 847), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (835, 847), True, 'import numpy as np\n'), ((1663, 1690), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[filename]'], {}), '([filename])\n', (1678, 1690), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((1711, 1742), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (1721, 1742), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((4835, 4860), 'torchdata.datapipes.iter.TFRecordLoader', 'TFRecordLoader', (['datapipe2'], {}), '(datapipe2)\n', (4849, 4860), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((4983, 5049), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['tfrecord_parser', 'n_elements_before_reset'], {}), '(tfrecord_parser, n_elements_before_reset)\n', (5007, 5049), False, 'from _utils._common_utils_for_test import reset_after_n_next_calls\n'), ((6339, 6366), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (['[filename]'], {}), '([filename])\n', (6354, 6366), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((6387, 6418), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (6397, 6418), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((10570, 10595), 'torchdata.datapipes.iter.TFRecordLoader', 'TFRecordLoader', (['datapipe2'], {}), '(datapipe2)\n', (10584, 10595), False, 'from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader\n'), ((10718, 10784), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['tfrecord_parser', 'n_elements_before_reset'], {}), '(tfrecord_parser, n_elements_before_reset)\n', (10742, 10784), False, 'from _utils._common_utils_for_test import reset_after_n_next_calls\n'), ((926, 963), 'torch.range', 'torch.range', (['(i * 10)', '((i + 1) * 10 - 1)'], {}), '(i * 10, (i + 1) * 10 - 1)\n', (937, 963), False, 'import torch\n'), ((1193, 1230), 'torch.range', 'torch.range', (['(i * 10)', '((i + 1) * 10 - 1)'], {}), '(i * 10, (i + 1) * 10 - 1)\n', (1204, 1230), False, 'import torch\n'), ((700, 725), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (715, 725), False, 'import os\n')] |
import functools
import io
import pathlib
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
hint_sharding,
hint_shuffling,
path_comparator,
path_accessor,
getitem,
)
from torchvision.prototype.features import Label
class CLEVR(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"clevr",
type=DatasetType.IMAGE,
homepage="https://cs.stanford.edu/people/jcjohns/clevr/",
valid_options=dict(split=("train", "val", "test")),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, io.IOBase]) -> Tuple[Tuple[str, io.IOBase], None]:
return data, None
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[str, io.IOBase], Optional[Dict[str, Any]]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=decoder(buffer) if decoder else buffer,
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", config.split))
images_dp = hint_sharding(images_dp)
images_dp = hint_shuffling(images_dp)
if config.split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{config.split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
| [
"torchdata.datapipes.iter.JsonParser",
"torchdata.datapipes.iter.Mapper",
"torchdata.datapipes.iter.Demultiplexer",
"torchdata.datapipes.iter.UnBatcher"
] | [((1009, 1156), 'torchvision.prototype.datasets.utils.HttpResource', 'HttpResource', (['"""https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip"""'], {'sha256': '"""5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1"""'}), "('https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip', sha256=\n '5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1')\n", (1021, 1156), False, 'from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource, DatasetType\n'), ((1301, 1322), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (1313, 1322), False, 'import pathlib\n'), ((2515, 2621), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['archive_dp', '(2)', 'self._classify_archive'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(archive_dp, 2, self._classify_archive, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (2528, 2621), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher\n'), ((2794, 2818), 'torchvision.prototype.datasets.utils._internal.hint_sharding', 'hint_sharding', (['images_dp'], {}), '(images_dp)\n', (2807, 2818), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((2839, 2864), 'torchvision.prototype.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['images_dp'], {}), '(images_dp)\n', (2853, 2864), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((2728, 2772), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""parent.name"""', 'config.split'], {}), "('parent.name', config.split)\n", (2743, 2772), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((3029, 3050), 'torchdata.datapipes.iter.JsonParser', 'JsonParser', (['scenes_dp'], {}), '(scenes_dp)\n', (3039, 3050), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher\n'), ((3139, 3159), 'torchdata.datapipes.iter.UnBatcher', 'UnBatcher', (['scenes_dp'], {}), '(scenes_dp)\n', (3148, 3159), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher\n'), ((3442, 3481), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['images_dp', 'self._add_empty_anns'], {}), '(images_dp, self._add_empty_anns)\n', (3448, 3481), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher\n'), ((3509, 3576), 'functools.partial', 'functools.partial', (['self._collate_and_decode_sample'], {'decoder': 'decoder'}), '(self._collate_and_decode_sample, decoder=decoder)\n', (3526, 3576), False, 'import functools\n'), ((2943, 3003), 'torchvision.prototype.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', 'f"""CLEVR_{config.split}_scenes.json"""'], {}), "('name', f'CLEVR_{config.split}_scenes.json')\n", (2958, 3003), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((3093, 3113), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['(1)', '"""scenes"""'], {}), "(1, 'scenes')\n", (3100, 3113), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((3270, 3291), 'torchvision.prototype.datasets.utils._internal.path_accessor', 'path_accessor', (['"""name"""'], {}), "('name')\n", (3283, 3291), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n'), ((3320, 3345), 'torchvision.prototype.datasets.utils._internal.getitem', 'getitem', (['"""image_filename"""'], {}), "('image_filename')\n", (3327, 3345), False, 'from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, hint_sharding, hint_shuffling, path_comparator, path_accessor, getitem\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import hashlib
import itertools
import lzma
import os
import subprocess
import tarfile
import unittest
import warnings
import zipfile
from json.decoder import JSONDecodeError
import expecttest
from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls
from torchdata.datapipes.iter import (
CSVDictParser,
CSVParser,
Decompressor,
FileLister,
FileOpener,
HashChecker,
IoPathFileLister,
IoPathFileOpener,
IoPathSaver,
IterableWrapper,
JsonParser,
RarArchiveLoader,
Saver,
TarArchiveLoader,
XzFileLoader,
ZipArchiveLoader,
)
try:
import iopath
HAS_IOPATH = True
except ImportError:
HAS_IOPATH = False
skipIfNoIoPath = unittest.skipIf(not HAS_IOPATH, "no iopath")
try:
import rarfile
HAS_RAR_TOOLS = True
try:
rarfile.tool_setup()
subprocess.run(("rar", "-?"), check=True)
except (rarfile.RarCannotExec, subprocess.CalledProcessError):
HAS_RAR_TOOLS = False
except (ModuleNotFoundError, FileNotFoundError):
HAS_RAR_TOOLS = False
skipIfNoRarTools = unittest.skipIf(not HAS_RAR_TOOLS, "no rar tools")
class TestDataPipeLocalIO(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
self.temp_files = create_temp_files(self.temp_dir)
self.temp_sub_dir = create_temp_dir(self.temp_dir.name)
self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False)
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestDataPipeLocalIO was not able to cleanup temp dir due to {e}")
def _custom_files_set_up(self, files):
for fname, content in files.items():
temp_file_path = os.path.join(self.temp_dir.name, fname)
with open(temp_file_path, "w") as f:
f.write(content)
def _compressed_files_comparison_helper(self, expected_files, result, check_length: bool = True):
if check_length:
self.assertEqual(len(expected_files), len(result))
for res, expected_file in itertools.zip_longest(result, expected_files):
self.assertTrue(res is not None and expected_file is not None)
self.assertEqual(os.path.basename(res[0]), os.path.basename(expected_file))
with open(expected_file, "rb") as f:
self.assertEqual(res[1].read(), f.read())
res[1].close()
def _unordered_compressed_files_comparison_helper(self, expected_files, result, check_length: bool = True):
expected_names_to_files = {os.path.basename(f): f for f in expected_files}
if check_length:
self.assertEqual(len(expected_files), len(result))
for res in result:
fname = os.path.basename(res[0])
self.assertTrue(fname is not None)
self.assertTrue(fname in expected_names_to_files)
with open(expected_names_to_files[fname], "rb") as f:
self.assertEqual(res[1].read(), f.read())
res[1].close()
def test_csv_parser_iterdatapipe(self):
def make_path(fname):
return f"{self.temp_dir.name}/{fname}"
csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"}
self._custom_files_set_up(csv_files)
datapipe1 = IterableWrapper([make_path(fname) for fname in ["1.csv", "empty.csv", "empty2.csv"]])
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
# Functional Test: yield one row at time from each file, skipping over empty content
csv_parser_dp = datapipe3.parse_csv()
expected_res = [["key", "item"], ["a", "1"], ["b", "2"], []]
self.assertEqual(expected_res, list(csv_parser_dp))
# Functional Test: yield one row at time from each file, skipping over empty content and header
csv_parser_dp = datapipe3.parse_csv(skip_lines=1)
expected_res = [["a", "1"], ["b", "2"]]
self.assertEqual(expected_res, list(csv_parser_dp))
# Functional Test: yield one row at time from each file with file name, skipping over empty content
csv_parser_dp = datapipe3.parse_csv(return_path=True)
expected_res = [("1.csv", ["key", "item"]), ("1.csv", ["a", "1"]), ("1.csv", ["b", "2"]), ("empty2.csv", [])]
self.assertEqual(expected_res, list(csv_parser_dp))
# Reset Test:
csv_parser_dp = CSVParser(datapipe3, return_path=True)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(csv_parser_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(csv_parser_dp)
def test_csv_dict_parser_iterdatapipe(self):
def get_name(path_and_stream):
return os.path.basename(path_and_stream[0]), path_and_stream[1]
csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"}
self._custom_files_set_up(csv_files)
datapipe1 = FileLister(self.temp_dir.name, "*.csv")
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
# Functional Test: yield one row at a time as dict, with the first row being the header (key)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict()
expected_res1 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}]
self.assertEqual(expected_res1, list(csv_dict_parser_dp))
# Functional Test: yield one row at a time as dict, skip over first row, with the second row being the header
csv_dict_parser_dp = datapipe3.parse_csv_as_dict(skip_lines=1)
expected_res2 = [{"a": "b", "1": "2"}]
self.assertEqual(expected_res2, list(csv_dict_parser_dp))
# Functional Test: yield one row at a time as dict with file name, and the first row being the header (key)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict(return_path=True)
expected_res3 = [("1.csv", {"key": "a", "item": "1"}), ("1.csv", {"key": "b", "item": "2"})]
self.assertEqual(expected_res3, list(csv_dict_parser_dp))
# Reset Test
csv_dict_parser_dp = CSVDictParser(datapipe3)
expected_res4 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}]
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(csv_dict_parser_dp, n_elements_before_reset)
self.assertEqual(expected_res4[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res4, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(csv_dict_parser_dp)
def test_hash_checker_iterdatapipe(self):
hash_dict = {}
def fill_hash_dict():
for path in self.temp_files:
with open(path) as f:
hash_func = hashlib.sha256()
content = f.read().encode("utf-8")
hash_func.update(content)
hash_dict[path] = hash_func.hexdigest()
fill_hash_dict()
datapipe1 = FileLister(self.temp_dir.name, "*")
datapipe2 = FileOpener(datapipe1, mode="b")
hash_check_dp = HashChecker(datapipe2, hash_dict)
# Functional Test: Ensure the DataPipe values are unchanged if the hashes are the same
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, hash_check_dp):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
# Functional Test: Ensure the rewind option works, and the stream is empty when there is no rewind
hash_check_dp_no_reset = HashChecker(datapipe2, hash_dict, rewind=False)
for (expected_path, _), (actual_path, actual_stream) in zip(datapipe2, hash_check_dp_no_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(b"", actual_stream.read())
# Functional Test: Error when file/path is not in hash_dict
hash_check_dp = HashChecker(datapipe2, {})
it = iter(hash_check_dp)
with self.assertRaisesRegex(RuntimeError, "Unspecified hash for file"):
next(it)
# Functional Test: Error when the hash is different
hash_dict[self.temp_files[0]] = "WRONG HASH"
hash_check_dp = HashChecker(datapipe2, hash_dict)
with self.assertRaisesRegex(RuntimeError, "does not match"):
list(hash_check_dp)
# Reset Test:
fill_hash_dict() # Reset the dict with correct values because we changed it in the last test case
hash_check_dp = datapipe2.check_hash(hash_dict)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(hash_check_dp, n_elements_before_reset)
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, res_before_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, res_after_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
# __len__ Test: returns the length of source DataPipe
with self.assertRaisesRegex(TypeError, "FileOpenerIterDataPipe instance doesn't have valid length"):
len(hash_check_dp)
def test_json_parser_iterdatapipe(self):
def is_empty_json(path_and_stream):
return path_and_stream[0] == "empty.json"
def is_nonempty_json(path_and_stream):
return path_and_stream[0] != "empty.json"
json_files = {
"1.json": '["foo", {"bar":["baz", null, 1.0, 2]}]',
"empty.json": "",
"2.json": '{"__complex__": true, "real": 1, "imag": 2}',
}
self._custom_files_set_up(json_files)
datapipe1 = IterableWrapper([f"{self.temp_dir.name}/{fname}" for fname in ["empty.json", "1.json", "2.json"]])
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
datapipe_empty = datapipe3.filter(is_empty_json)
datapipe_nonempty = datapipe3.filter(is_nonempty_json)
empty_json_dp = datapipe_empty.parse_json_files()
it = iter(empty_json_dp)
# Functional Test: dp fails when empty JSON file is given
with self.assertRaisesRegex(JSONDecodeError, "Expecting value"):
next(it)
# Functional Test: dp yields one json file at a time
json_dp = datapipe_nonempty.parse_json_files()
expected_res = [
("1.json", ["foo", {"bar": ["baz", None, 1.0, 2]}]),
("2.json", {"__complex__": True, "real": 1, "imag": 2}),
]
self.assertEqual(expected_res, list(json_dp))
# Reset Test:
json_dp = JsonParser(datapipe_nonempty)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(json_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "len"):
len(json_dp)
def test_saver_iterdatapipe(self):
def filepath_fn(name: str) -> str:
return os.path.join(self.temp_dir.name, os.path.basename(name))
# Functional Test: Saving some data
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode="wb")
res_file_paths = list(saver_dp)
expected_paths = [filepath_fn(name) for name in name_to_data.keys()]
self.assertEqual(expected_paths, res_file_paths)
for name in name_to_data.keys():
p = filepath_fn(name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# Reset Test:
saver_dp = Saver(source_dp, filepath_fn=filepath_fn, mode="wb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual([filepath_fn("1.txt"), filepath_fn("2.txt")], res_before_reset)
self.assertEqual(expected_paths, res_after_reset)
for name in name_to_data.keys():
p = filepath_fn(name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# __len__ Test: returns the length of source DataPipe
self.assertEqual(3, len(saver_dp))
def _write_test_tar_files(self):
path = os.path.join(self.temp_dir.name, "test_tar.tar")
with tarfile.open(path, "w:tar") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def _write_test_tar_gz_files(self):
path = os.path.join(self.temp_dir.name, "test_gz.tar.gz")
with tarfile.open(path, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def test_tar_archive_reader_iterdatapipe(self):
self._write_test_tar_files()
datapipe1 = FileLister(self.temp_dir.name, "*.tar")
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
self._write_test_tar_gz_files()
datapipe_gz_1 = FileLister(self.temp_dir.name, "*.tar.gz")
datapipe_gz_2 = FileOpener(datapipe_gz_1, mode="b")
gz_reader_dp = TarArchiveLoader(datapipe_gz_2)
# Functional Test: Read extracted files before reaching the end of the tarfile
self._compressed_files_comparison_helper(self.temp_files, tar_loader_dp, check_length=False)
self._compressed_files_comparison_helper(self.temp_files, gz_reader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the tarfile
data_refs = list(tar_loader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs)
data_refs_gz = list(gz_reader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs_gz)
# Reset Test: reset the DataPipe after reading part of it
tar_loader_dp = datapipe2.load_from_tar()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(tar_loader_dp, n_elements_before_reset)
# Check result accumulated before reset
self._compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check result accumulated after reset
self._compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(tar_loader_dp)
def _write_test_zip_files(self):
path = os.path.join(self.temp_dir.name, "test_zip.zip")
with zipfile.ZipFile(path, "w") as myzip:
myzip.write(self.temp_files[0], arcname=os.path.basename(self.temp_files[0]))
myzip.write(self.temp_files[1], arcname=os.path.basename(self.temp_files[1]))
myzip.write(self.temp_files[2], arcname=os.path.basename(self.temp_files[2]))
def test_zip_archive_reader_iterdatapipe(self):
self._write_test_zip_files()
datapipe1 = FileLister(self.temp_dir.name, "*.zip")
datapipe2 = FileOpener(datapipe1, mode="b")
zip_loader_dp = ZipArchiveLoader(datapipe2)
# Functional Test: read extracted files before reaching the end of the zipfile
self._compressed_files_comparison_helper(self.temp_files, zip_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the zipile
data_refs = list(zip_loader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
zip_loader_dp = datapipe2.load_from_zip()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(zip_loader_dp, n_elements_before_reset)
# Check the results accumulated before reset
self._compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check the results accumulated after reset
self._compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(zip_loader_dp)
def _write_test_xz_files(self):
for path in self.temp_files:
fname = os.path.basename(path)
temp_xzfile_pathname = os.path.join(self.temp_dir.name, f"{fname}.xz")
with open(path) as f:
with lzma.open(temp_xzfile_pathname, "w") as xz:
xz.write(f.read().encode("utf-8"))
def test_xz_archive_reader_iterdatapipe(self):
# Worth noting that the .tar and .zip tests write multiple files into the same compressed file
# Whereas we create multiple .xz files in the same directories below.
self._write_test_xz_files()
datapipe1 = FileLister(self.temp_dir.name, "*.xz")
datapipe2 = FileOpener(datapipe1, mode="b")
xz_loader_dp = XzFileLoader(datapipe2)
# Functional Test: Read extracted files before reaching the end of the xzfile
self._unordered_compressed_files_comparison_helper(self.temp_files, xz_loader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the xzfile
data_refs = list(xz_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
xz_loader_dp = datapipe2.load_from_xz()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(xz_loader_dp, n_elements_before_reset)
# Check result accumulated before reset
self.assertEqual(n_elements_before_reset, len(res_before_reset))
self._unordered_compressed_files_comparison_helper(self.temp_files, res_before_reset, check_length=False)
# Check result accumulated after reset
self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset)
# Reset Test: Ensure the order is consistent between iterations
for r1, r2 in zip(xz_loader_dp, xz_loader_dp):
self.assertEqual(r1[0], r2[0])
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(xz_loader_dp)
def _decompressor_tar_test_helper(self, expected_files, tar_decompress_dp):
for _file, child_obj in tar_decompress_dp:
for expected_file, tarinfo in zip(expected_files, child_obj):
if not tarinfo.isfile():
continue
extracted_fobj = child_obj.extractfile(tarinfo)
with open(expected_file, "rb") as f:
self.assertEqual(f.read(), extracted_fobj.read())
def _decompressor_xz_test_helper(self, xz_decompress_dp):
for xz_file_name, xz_stream in xz_decompress_dp:
expected_file = xz_file_name[:-3]
with open(expected_file, "rb") as f:
self.assertEqual(f.read(), xz_stream.read())
def _write_single_gz_file(self):
import gzip
with gzip.open(f"{self.temp_dir.name}/temp.gz", "wb") as k:
with open(self.temp_files[0], "rb") as f:
k.write(f.read())
def test_decompressor_iterdatapipe(self):
self._write_test_tar_files()
self._write_test_tar_gz_files()
self._write_single_gz_file()
self._write_test_zip_files()
self._write_test_xz_files()
# Functional Test: work with .tar files
tar_file_dp = FileLister(self.temp_dir.name, "*.tar")
tar_load_dp = FileOpener(tar_file_dp, mode="b")
tar_decompress_dp = Decompressor(tar_load_dp, file_type="tar")
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional test: work with .tar.gz files
tar_gz_file_dp = FileLister(self.temp_dir.name, "*.tar.gz")
tar_gz_load_dp = FileOpener(tar_gz_file_dp, mode="b")
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type="tar")
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: work with .gz files
gz_file_dp = IterableWrapper([f"{self.temp_dir.name}/temp.gz"])
gz_load_dp = FileOpener(gz_file_dp, mode="b")
gz_decompress_dp = Decompressor(gz_load_dp, file_type="gzip")
for _, gz_stream in gz_decompress_dp:
with open(self.temp_files[0], "rb") as f:
self.assertEqual(f.read(), gz_stream.read())
# Functional Test: work with .zip files
zip_file_dp = FileLister(self.temp_dir.name, "*.zip")
zip_load_dp = FileOpener(zip_file_dp, mode="b")
zip_decompress_dp = zip_load_dp.decompress(file_type="zip")
for _, zip_stream in zip_decompress_dp:
for fname in self.temp_files:
with open(fname, "rb") as f:
self.assertEqual(f.read(), zip_stream.read(name=os.path.basename(fname)))
# Functional Test: work with .xz files
xz_file_dp = FileLister(self.temp_dir.name, "*.xz")
xz_load_dp = FileOpener(xz_file_dp, mode="b")
xz_decompress_dp = Decompressor(xz_load_dp, file_type="lzma")
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar files
tar_decompress_dp = Decompressor(tar_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: work without file type as input for .xz files
xz_decompress_dp = Decompressor(xz_load_dp)
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar.gz files
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: Compression Type is works for both upper and lower case strings
tar_decompress_dp = Decompressor(tar_load_dp, file_type="TAr")
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: Compression Type throws error for invalid file type
with self.assertRaisesRegex(ValueError, "not a valid CompressionType"):
Decompressor(tar_load_dp, file_type="ABC")
# Reset Test: Ensure the order is consistent between iterations
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(xz_decompress_dp, n_elements_before_reset)
self._decompressor_xz_test_helper(res_before_reset)
self._decompressor_xz_test_helper(res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "has no len"):
len(tar_decompress_dp)
def _write_text_files(self):
def filepath_fn(name: str) -> str:
return os.path.join(self.temp_dir.name, os.path.basename(name))
name_to_data = {"1.text": b"DATA", "2.text": b"DATA", "3.text": b"DATA"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode="wb")
list(saver_dp)
# TODO(120): this test currently only covers reading from local
# filesystem. It needs to be modified once test data can be stored on
# gdrive/s3/onedrive
@skipIfNoIoPath
def test_io_path_file_lister_iterdatapipe(self):
datapipe = IoPathFileLister(root=self.temp_sub_dir.name)
# check all file paths within sub_folder are listed
for path in datapipe:
self.assertTrue(path in self.temp_sub_files)
@skipIfNoIoPath
def test_io_path_file_loader_iterdatapipe(self):
datapipe1 = IoPathFileLister(root=self.temp_sub_dir.name)
datapipe2 = IoPathFileOpener(datapipe1)
# check contents of file match
for _, f in datapipe2:
self.assertEqual(f.read(), "0123456789abcdef")
# Reset Test: Ensure the resulting streams are still readable after the DataPipe is reset/exhausted
self._write_text_files()
lister_dp = FileLister(self.temp_dir.name, "*.text")
iopath_file_loader_dp = IoPathFileOpener(lister_dp, mode="rb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(iopath_file_loader_dp, n_elements_before_reset)
self.assertEqual(2, len(res_before_reset))
self.assertEqual(3, len(res_after_reset))
for _name, stream in res_before_reset:
self.assertEqual(b"DATA", stream.read())
for _name, stream in res_after_reset:
self.assertEqual(b"DATA", stream.read())
@skipIfNoIoPath
def test_io_path_saver_iterdatapipe(self):
def filepath_fn(name: str) -> str:
return os.path.join(self.temp_dir.name, os.path.basename(name))
# Functional Test: Saving some data
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_by_iopath(filepath_fn=filepath_fn, mode="wb")
res_file_paths = list(saver_dp)
expected_paths = [filepath_fn(name) for name in name_to_data.keys()]
self.assertEqual(expected_paths, res_file_paths)
for name in name_to_data.keys():
p = filepath_fn(name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# Reset Test:
saver_dp = IoPathSaver(source_dp, filepath_fn=filepath_fn, mode="wb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual([filepath_fn("1.txt"), filepath_fn("2.txt")], res_before_reset)
self.assertEqual(expected_paths, res_after_reset)
for name in name_to_data.keys():
p = filepath_fn(name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# __len__ Test: returns the length of source DataPipe
self.assertEqual(3, len(saver_dp))
def _write_test_rar_files(self):
# `rarfile` can only read but not write .rar archives so we use to system utilities
rar_archive_name = os.path.join(self.temp_dir.name, "test_rar")
subprocess.run(("rar", "a", rar_archive_name + ".rar", *self.temp_files), check=True)
# Nested RAR
subprocess.run(("rar", "a", rar_archive_name + "1.rar", self.temp_files[0]), check=True)
subprocess.run(("rar", "a", rar_archive_name + "2.rar", *self.temp_files[1:]), check=True)
subprocess.run(
("rar", "a", rar_archive_name + "_nested.rar", rar_archive_name + "1.rar", rar_archive_name + "2.rar"),
check=True,
)
# Nested RAR in TAR
with tarfile.open(rar_archive_name + "_nested.tar", "w:tar") as tar:
tar.add(rar_archive_name + "1.rar")
tar.add(rar_archive_name + "2.rar")
@skipIfNoRarTools
def test_rar_archive_loader(self):
self._write_test_rar_files()
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp = RarArchiveLoader(datapipe2)
# Functional Test: read extracted files before reaching the end of the rarfile
self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the rarfile
data_refs = list(rar_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
rar_loader_dp = datapipe2.load_from_rar()
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(rar_loader_dp, n_elements_before_reset)
# Check the results accumulated before reset
self._unordered_compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check the results accumulated after reset
self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(rar_loader_dp)
# Nested RAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp_1 = RarArchiveLoader(datapipe2)
rar_loader_dp_2 = RarArchiveLoader(rar_loader_dp_1)
with self.assertRaisesRegex(ValueError, "Nested RAR archive is not supported"):
list(rar_loader_dp_2)
# Nested RAR in TAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.tar")])
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
rar_loader_dp = RarArchiveLoader(tar_loader_dp)
# Functional Test: read extracted files before reaching the end of the rarfile
self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the rarfile
data_refs = list(rar_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
if __name__ == "__main__":
unittest.main()
| [
"torchdata.datapipes.iter.IoPathFileOpener",
"torchdata.datapipes.iter.Decompressor",
"torchdata.datapipes.iter.TarArchiveLoader",
"torchdata.datapipes.iter.FileOpener",
"torchdata.datapipes.iter.RarArchiveLoader",
"torchdata.datapipes.iter.FileLister",
"torchdata.datapipes.iter.Saver",
"torchdata.dat... | [((802, 846), 'unittest.skipIf', 'unittest.skipIf', (['(not HAS_IOPATH)', '"""no iopath"""'], {}), "(not HAS_IOPATH, 'no iopath')\n", (817, 846), False, 'import unittest\n'), ((1177, 1227), 'unittest.skipIf', 'unittest.skipIf', (['(not HAS_RAR_TOOLS)', '"""no rar tools"""'], {}), "(not HAS_RAR_TOOLS, 'no rar tools')\n", (1192, 1227), False, 'import unittest\n'), ((31198, 31213), 'unittest.main', 'unittest.main', ([], {}), '()\n', (31211, 31213), False, 'import unittest\n'), ((915, 935), 'rarfile.tool_setup', 'rarfile.tool_setup', ([], {}), '()\n', (933, 935), False, 'import rarfile\n'), ((944, 985), 'subprocess.run', 'subprocess.run', (["('rar', '-?')"], {'check': '(True)'}), "(('rar', '-?'), check=True)\n", (958, 985), False, 'import subprocess\n'), ((1323, 1340), '_utils._common_utils_for_test.create_temp_dir', 'create_temp_dir', ([], {}), '()\n', (1338, 1340), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((1367, 1399), '_utils._common_utils_for_test.create_temp_files', 'create_temp_files', (['self.temp_dir'], {}), '(self.temp_dir)\n', (1384, 1399), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((1428, 1463), '_utils._common_utils_for_test.create_temp_dir', 'create_temp_dir', (['self.temp_dir.name'], {}), '(self.temp_dir.name)\n', (1443, 1463), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((1494, 1540), '_utils._common_utils_for_test.create_temp_files', 'create_temp_files', (['self.temp_sub_dir', '(4)', '(False)'], {}), '(self.temp_sub_dir, 4, False)\n', (1511, 1540), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((2245, 2290), 'itertools.zip_longest', 'itertools.zip_longest', (['result', 'expected_files'], {}), '(result, expected_files)\n', (2266, 2290), False, 'import itertools\n'), ((3592, 3623), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (3602, 3623), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((4604, 4642), 'torchdata.datapipes.iter.CSVParser', 'CSVParser', (['datapipe3'], {'return_path': '(True)'}), '(datapipe3, return_path=True)\n', (4613, 4642), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((4723, 4787), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['csv_parser_dp', 'n_elements_before_reset'], {}), '(csv_parser_dp, n_elements_before_reset)\n', (4747, 4787), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((5429, 5468), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.csv"""'], {}), "(self.temp_dir.name, '*.csv')\n", (5439, 5468), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((5489, 5520), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (5499, 5520), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((6585, 6609), 'torchdata.datapipes.iter.CSVDictParser', 'CSVDictParser', (['datapipe3'], {}), '(datapipe3)\n', (6598, 6609), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((6769, 6838), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['csv_dict_parser_dp', 'n_elements_before_reset'], {}), '(csv_dict_parser_dp, n_elements_before_reset)\n', (6793, 6838), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((7604, 7639), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*"""'], {}), "(self.temp_dir.name, '*')\n", (7614, 7639), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((7660, 7691), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (7670, 7691), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((7716, 7749), 'torchdata.datapipes.iter.HashChecker', 'HashChecker', (['datapipe2', 'hash_dict'], {}), '(datapipe2, hash_dict)\n', (7727, 7749), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((8228, 8275), 'torchdata.datapipes.iter.HashChecker', 'HashChecker', (['datapipe2', 'hash_dict'], {'rewind': '(False)'}), '(datapipe2, hash_dict, rewind=False)\n', (8239, 8275), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((8586, 8612), 'torchdata.datapipes.iter.HashChecker', 'HashChecker', (['datapipe2', '{}'], {}), '(datapipe2, {})\n', (8597, 8612), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((8885, 8918), 'torchdata.datapipes.iter.HashChecker', 'HashChecker', (['datapipe2', 'hash_dict'], {}), '(datapipe2, hash_dict)\n', (8896, 8918), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((9286, 9350), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['hash_check_dp', 'n_elements_before_reset'], {}), '(hash_check_dp, n_elements_before_reset)\n', (9310, 9350), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((10550, 10652), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[f'{self.temp_dir.name}/{fname}' for fname in ['empty.json', '1.json',\n '2.json']]"], {}), "([f'{self.temp_dir.name}/{fname}' for fname in ['empty.json',\n '1.json', '2.json']])\n", (10565, 10652), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((10669, 10700), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (10679, 10700), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((11498, 11527), 'torchdata.datapipes.iter.JsonParser', 'JsonParser', (['datapipe_nonempty'], {}), '(datapipe_nonempty)\n', (11508, 11527), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((11608, 11666), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['json_dp', 'n_elements_before_reset'], {}), '(json_dp, n_elements_before_reset)\n', (11632, 11666), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((12798, 12850), 'torchdata.datapipes.iter.Saver', 'Saver', (['source_dp'], {'filepath_fn': 'filepath_fn', 'mode': '"""wb"""'}), "(source_dp, filepath_fn=filepath_fn, mode='wb')\n", (12803, 12850), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((12931, 12990), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['saver_dp', 'n_elements_before_reset'], {}), '(saver_dp, n_elements_before_reset)\n', (12955, 12990), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((13475, 13523), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_tar.tar"""'], {}), "(self.temp_dir.name, 'test_tar.tar')\n", (13487, 13523), False, 'import os\n'), ((13749, 13799), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_gz.tar.gz"""'], {}), "(self.temp_dir.name, 'test_gz.tar.gz')\n", (13761, 13799), False, 'import os\n'), ((14078, 14117), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.tar"""'], {}), "(self.temp_dir.name, '*.tar')\n", (14088, 14117), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14138, 14169), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (14148, 14169), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14194, 14221), 'torchdata.datapipes.iter.TarArchiveLoader', 'TarArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (14210, 14221), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14287, 14329), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.tar.gz"""'], {}), "(self.temp_dir.name, '*.tar.gz')\n", (14297, 14329), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14354, 14389), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe_gz_1'], {'mode': '"""b"""'}), "(datapipe_gz_1, mode='b')\n", (14364, 14389), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((14413, 14444), 'torchdata.datapipes.iter.TarArchiveLoader', 'TarArchiveLoader', (['datapipe_gz_2'], {}), '(datapipe_gz_2)\n', (14429, 14444), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((15257, 15321), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['tar_loader_dp', 'n_elements_before_reset'], {}), '(tar_loader_dp, n_elements_before_reset)\n', (15281, 15321), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((15831, 15879), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_zip.zip"""'], {}), "(self.temp_dir.name, 'test_zip.zip')\n", (15843, 15879), False, 'import os\n'), ((16310, 16349), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.zip"""'], {}), "(self.temp_dir.name, '*.zip')\n", (16320, 16349), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((16370, 16401), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (16380, 16401), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((16426, 16453), 'torchdata.datapipes.iter.ZipArchiveLoader', 'ZipArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (16442, 16453), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((17043, 17107), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['zip_loader_dp', 'n_elements_before_reset'], {}), '(zip_loader_dp, n_elements_before_reset)\n', (17067, 17107), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((18217, 18255), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.xz"""'], {}), "(self.temp_dir.name, '*.xz')\n", (18227, 18255), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((18276, 18307), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (18286, 18307), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((18331, 18354), 'torchdata.datapipes.iter.XzFileLoader', 'XzFileLoader', (['datapipe2'], {}), '(datapipe2)\n', (18343, 18354), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((18959, 19022), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['xz_loader_dp', 'n_elements_before_reset'], {}), '(xz_loader_dp, n_elements_before_reset)\n', (18983, 19022), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((20995, 21034), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.tar"""'], {}), "(self.temp_dir.name, '*.tar')\n", (21005, 21034), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21057, 21090), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['tar_file_dp'], {'mode': '"""b"""'}), "(tar_file_dp, mode='b')\n", (21067, 21090), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21119, 21161), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_load_dp'], {'file_type': '"""tar"""'}), "(tar_load_dp, file_type='tar')\n", (21131, 21161), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21318, 21360), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.tar.gz"""'], {}), "(self.temp_dir.name, '*.tar.gz')\n", (21328, 21360), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21386, 21422), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['tar_gz_file_dp'], {'mode': '"""b"""'}), "(tar_gz_file_dp, mode='b')\n", (21396, 21422), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21454, 21499), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_gz_load_dp'], {'file_type': '"""tar"""'}), "(tar_gz_load_dp, file_type='tar')\n", (21466, 21499), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21651, 21701), 'torchdata.datapipes.iter.IterableWrapper', 'IterableWrapper', (["[f'{self.temp_dir.name}/temp.gz']"], {}), "([f'{self.temp_dir.name}/temp.gz'])\n", (21666, 21701), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21723, 21755), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['gz_file_dp'], {'mode': '"""b"""'}), "(gz_file_dp, mode='b')\n", (21733, 21755), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((21783, 21825), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['gz_load_dp'], {'file_type': '"""gzip"""'}), "(gz_load_dp, file_type='gzip')\n", (21795, 21825), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22058, 22097), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.zip"""'], {}), "(self.temp_dir.name, '*.zip')\n", (22068, 22097), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22120, 22153), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['zip_file_dp'], {'mode': '"""b"""'}), "(zip_file_dp, mode='b')\n", (22130, 22153), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22520, 22558), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.xz"""'], {}), "(self.temp_dir.name, '*.xz')\n", (22530, 22558), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22580, 22612), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['xz_file_dp'], {'mode': '"""b"""'}), "(xz_file_dp, mode='b')\n", (22590, 22612), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22640, 22682), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['xz_load_dp'], {'file_type': '"""lzma"""'}), "(xz_load_dp, file_type='lzma')\n", (22652, 22682), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((22846, 22887), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_load_dp'], {'file_type': 'None'}), '(tar_load_dp, file_type=None)\n', (22858, 22887), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((23068, 23092), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['xz_load_dp'], {}), '(xz_load_dp)\n', (23080, 23092), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((23262, 23306), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_gz_load_dp'], {'file_type': 'None'}), '(tar_gz_load_dp, file_type=None)\n', (23274, 23306), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((23509, 23551), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_load_dp'], {'file_type': '"""TAr"""'}), "(tar_load_dp, file_type='TAr')\n", (23521, 23551), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((23999, 24066), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['xz_decompress_dp', 'n_elements_before_reset'], {}), '(xz_decompress_dp, n_elements_before_reset)\n', (24023, 24066), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((24996, 25041), 'torchdata.datapipes.iter.IoPathFileLister', 'IoPathFileLister', ([], {'root': 'self.temp_sub_dir.name'}), '(root=self.temp_sub_dir.name)\n', (25012, 25041), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25284, 25329), 'torchdata.datapipes.iter.IoPathFileLister', 'IoPathFileLister', ([], {'root': 'self.temp_sub_dir.name'}), '(root=self.temp_sub_dir.name)\n', (25300, 25329), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25350, 25377), 'torchdata.datapipes.iter.IoPathFileOpener', 'IoPathFileOpener', (['datapipe1'], {}), '(datapipe1)\n', (25366, 25377), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25670, 25710), 'torchdata.datapipes.iter.FileLister', 'FileLister', (['self.temp_dir.name', '"""*.text"""'], {}), "(self.temp_dir.name, '*.text')\n", (25680, 25710), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25743, 25781), 'torchdata.datapipes.iter.IoPathFileOpener', 'IoPathFileOpener', (['lister_dp'], {'mode': '"""rb"""'}), "(lister_dp, mode='rb')\n", (25759, 25781), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((25863, 25935), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['iopath_file_loader_dp', 'n_elements_before_reset'], {}), '(iopath_file_loader_dp, n_elements_before_reset)\n', (25887, 25935), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((27089, 27147), 'torchdata.datapipes.iter.IoPathSaver', 'IoPathSaver', (['source_dp'], {'filepath_fn': 'filepath_fn', 'mode': '"""wb"""'}), "(source_dp, filepath_fn=filepath_fn, mode='wb')\n", (27100, 27147), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((27228, 27287), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['saver_dp', 'n_elements_before_reset'], {}), '(saver_dp, n_elements_before_reset)\n', (27252, 27287), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((27876, 27920), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_rar"""'], {}), "(self.temp_dir.name, 'test_rar')\n", (27888, 27920), False, 'import os\n'), ((27929, 28018), 'subprocess.run', 'subprocess.run', (["('rar', 'a', rar_archive_name + '.rar', *self.temp_files)"], {'check': '(True)'}), "(('rar', 'a', rar_archive_name + '.rar', *self.temp_files),\n check=True)\n", (27943, 28018), False, 'import subprocess\n'), ((28045, 28137), 'subprocess.run', 'subprocess.run', (["('rar', 'a', rar_archive_name + '1.rar', self.temp_files[0])"], {'check': '(True)'}), "(('rar', 'a', rar_archive_name + '1.rar', self.temp_files[0]),\n check=True)\n", (28059, 28137), False, 'import subprocess\n'), ((28142, 28237), 'subprocess.run', 'subprocess.run', (["('rar', 'a', rar_archive_name + '2.rar', *self.temp_files[1:])"], {'check': '(True)'}), "(('rar', 'a', rar_archive_name + '2.rar', *self.temp_files[1:\n ]), check=True)\n", (28156, 28237), False, 'import subprocess\n'), ((28241, 28376), 'subprocess.run', 'subprocess.run', (["('rar', 'a', rar_archive_name + '_nested.rar', rar_archive_name + '1.rar', \n rar_archive_name + '2.rar')"], {'check': '(True)'}), "(('rar', 'a', rar_archive_name + '_nested.rar', \n rar_archive_name + '1.rar', rar_archive_name + '2.rar'), check=True)\n", (28255, 28376), False, 'import subprocess\n'), ((28817, 28848), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (28827, 28848), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((28873, 28900), 'torchdata.datapipes.iter.RarArchiveLoader', 'RarArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (28889, 28900), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((29511, 29575), '_utils._common_utils_for_test.reset_after_n_next_calls', 'reset_after_n_next_calls', (['rar_loader_dp', 'n_elements_before_reset'], {}), '(rar_loader_dp, n_elements_before_reset)\n', (29535, 29575), False, 'from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls\n'), ((30199, 30230), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (30209, 30230), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30257, 30284), 'torchdata.datapipes.iter.RarArchiveLoader', 'RarArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (30273, 30284), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30311, 30344), 'torchdata.datapipes.iter.RarArchiveLoader', 'RarArchiveLoader', (['rar_loader_dp_1'], {}), '(rar_loader_dp_1)\n', (30327, 30344), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30612, 30643), 'torchdata.datapipes.iter.FileOpener', 'FileOpener', (['datapipe1'], {'mode': '"""b"""'}), "(datapipe1, mode='b')\n", (30622, 30643), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30668, 30695), 'torchdata.datapipes.iter.TarArchiveLoader', 'TarArchiveLoader', (['datapipe2'], {}), '(datapipe2)\n', (30684, 30695), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((30720, 30751), 'torchdata.datapipes.iter.RarArchiveLoader', 'RarArchiveLoader', (['tar_loader_dp'], {}), '(tar_loader_dp)\n', (30736, 30751), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((1898, 1937), 'os.path.join', 'os.path.join', (['self.temp_dir.name', 'fname'], {}), '(self.temp_dir.name, fname)\n', (1910, 1937), False, 'import os\n'), ((2737, 2756), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (2753, 2756), False, 'import os\n'), ((2920, 2944), 'os.path.basename', 'os.path.basename', (['res[0]'], {}), '(res[0])\n', (2936, 2944), False, 'import os\n'), ((13537, 13564), 'tarfile.open', 'tarfile.open', (['path', '"""w:tar"""'], {}), "(path, 'w:tar')\n", (13549, 13564), False, 'import tarfile\n'), ((13813, 13839), 'tarfile.open', 'tarfile.open', (['path', '"""w:gz"""'], {}), "(path, 'w:gz')\n", (13825, 13839), False, 'import tarfile\n'), ((15893, 15919), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path', '"""w"""'], {}), "(path, 'w')\n", (15908, 15919), False, 'import zipfile\n'), ((17668, 17690), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (17684, 17690), False, 'import os\n'), ((17726, 17773), 'os.path.join', 'os.path.join', (['self.temp_dir.name', 'f"""{fname}.xz"""'], {}), "(self.temp_dir.name, f'{fname}.xz')\n", (17738, 17773), False, 'import os\n'), ((20547, 20595), 'gzip.open', 'gzip.open', (['f"""{self.temp_dir.name}/temp.gz"""', '"""wb"""'], {}), "(f'{self.temp_dir.name}/temp.gz', 'wb')\n", (20556, 20595), False, 'import gzip\n'), ((23803, 23845), 'torchdata.datapipes.iter.Decompressor', 'Decompressor', (['tar_load_dp'], {'file_type': '"""ABC"""'}), "(tar_load_dp, file_type='ABC')\n", (23815, 23845), False, 'from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader\n'), ((28449, 28504), 'tarfile.open', 'tarfile.open', (["(rar_archive_name + '_nested.tar')", '"""w:tar"""'], {}), "(rar_archive_name + '_nested.tar', 'w:tar')\n", (28461, 28504), False, 'import tarfile\n'), ((1698, 1784), 'warnings.warn', 'warnings.warn', (['f"""TestDataPipeLocalIO was not able to cleanup temp dir due to {e}"""'], {}), "(\n f'TestDataPipeLocalIO was not able to cleanup temp dir due to {e}')\n", (1711, 1784), False, 'import warnings\n'), ((2396, 2420), 'os.path.basename', 'os.path.basename', (['res[0]'], {}), '(res[0])\n', (2412, 2420), False, 'import os\n'), ((2422, 2453), 'os.path.basename', 'os.path.basename', (['expected_file'], {}), '(expected_file)\n', (2438, 2453), False, 'import os\n'), ((5217, 5253), 'os.path.basename', 'os.path.basename', (['path_and_stream[0]'], {}), '(path_and_stream[0])\n', (5233, 5253), False, 'import os\n'), ((12110, 12132), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (12126, 12132), False, 'import os\n'), ((24463, 24485), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (24479, 24485), False, 'import os\n'), ((26399, 26421), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (26415, 26421), False, 'import os\n'), ((28746, 28794), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_rar.rar"""'], {}), "(self.temp_dir.name, 'test_rar.rar')\n", (28758, 28794), False, 'import os\n'), ((30121, 30176), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_rar_nested.rar"""'], {}), "(self.temp_dir.name, 'test_rar_nested.rar')\n", (30133, 30176), False, 'import os\n'), ((30534, 30589), 'os.path.join', 'os.path.join', (['self.temp_dir.name', '"""test_rar_nested.tar"""'], {}), "(self.temp_dir.name, 'test_rar_nested.tar')\n", (30546, 30589), False, 'import os\n'), ((7379, 7395), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (7393, 7395), False, 'import hashlib\n'), ((15982, 16018), 'os.path.basename', 'os.path.basename', (['self.temp_files[0]'], {}), '(self.temp_files[0])\n', (15998, 16018), False, 'import os\n'), ((16072, 16108), 'os.path.basename', 'os.path.basename', (['self.temp_files[1]'], {}), '(self.temp_files[1])\n', (16088, 16108), False, 'import os\n'), ((16162, 16198), 'os.path.basename', 'os.path.basename', (['self.temp_files[2]'], {}), '(self.temp_files[2])\n', (16178, 16198), False, 'import os\n'), ((17829, 17865), 'lzma.open', 'lzma.open', (['temp_xzfile_pathname', '"""w"""'], {}), "(temp_xzfile_pathname, 'w')\n", (17838, 17865), False, 'import lzma\n'), ((22425, 22448), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (22441, 22448), False, 'import os\n')] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 5