seed stringlengths 53 1.87k | seed_api stringlengths 22 51 | index int64 0 259 |
|---|---|---|
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._SPLIT_NAME_MAPPER[config.split]))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self, root: pathlib.Path) -> List[str]:
resources = self.resources(self.default_config)
dp = resources[0].load(root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
| torchdata.datapipes.iter.Mapper | 0 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
@_wrap_split_argument(('train', 'valid', 'test'))
def WikiText2(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL])
# cache data on-disk
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL)): MD5},
hash_type="md5",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split]))
# Extract zip and filter the appropriate split file
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b").read_from_zip().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, mode='b')
return data_dp.readlines(strip_newline=False, decode=True, return_path=False)
| torchdata.datapipes.iter.FileLister | 1 |
from torchdata.datapipes.utils import StreamWrapper
def _get_response_from_http(url: str, *, timeout: Optional[float]) -> Tuple[str, StreamWrapper]:
try:
with requests.Session() as session:
if timeout is None:
r = session.get(url, stream=True)
else:
r = session.get(url, timeout=timeout, stream=True)
return url, StreamWrapper(r.raw)
except HTTPError as e:
raise Exception(f"Could not get the file. [HTTP Error] {e.response}.")
except RequestException as e:
raise Exception(f"Could not get the file at {url}. [RequestException] {e.response}.")
except Exception:
raise
| torchdata.datapipes.iter.ParagraphAggregator | 2 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL[split])): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
return cache_dp.parse_json_files().read_squad()
| torchdata.datapipes.iter.FileLister | 3 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
for _, gz_stream in gz_decompress_dp:
with open(self.temp_files[0], "rb") as f:
self.assertEqual(f.read(), gz_stream.read())
# Functional Test: work with .zip files
zip_file_dp = FileLister(self.temp_dir.name, "*.zip")
zip_load_dp = FileOpener(zip_file_dp, mode="b")
zip_decompress_dp = zip_load_dp.decompress(file_type="zip")
for _, zip_stream in zip_decompress_dp:
for fname in self.temp_files:
with open(fname, "rb") as f:
self.assertEqual(f.read(), zip_stream.read(name=os.path.basename(fname)))
# Functional Test: work with .xz files
xz_file_dp = FileLister(self.temp_dir.name, "*.xz")
xz_load_dp = FileOpener(xz_file_dp, mode="b")
xz_decompress_dp = Decompressor(xz_load_dp, file_type="lzma")
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar files
tar_decompress_dp = Decompressor(tar_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: work without file type as input for .xz files
xz_decompress_dp = Decompressor(xz_load_dp)
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar.gz files
| torchdata.datapipes.iter.Demultiplexer | 4 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
def test_rar_archive_loader(self):
self._write_test_rar_files()
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp = RarArchiveLoader(datapipe2)
# Functional Test: read extracted files before reaching the end of the rarfile
self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False)
| torchdata.datapipes.iter.IterableWrapper | 5 |
from torchdata.dataloader2 import DataLoader2
expected_batch = 0
for batch in iter(data_loader):
self.assertEqual(batch, expected_batch)
expected_batch += 1
def test_dataloader2_shutdown(self) -> None:
test_data_pipe = IterableWrapper(range(3))
data_loader = DataLoader2(datapipe=test_data_pipe)
data_loader.shutdown()
def test_dataloader2_state_dict(self) -> None:
test_data_pipe = IterableWrapper(range(3))
data_loader = DataLoader2(datapipe=test_data_pipe)
state = data_loader.state_dict()
self.assertIsNotNone(state)
self.assertIsNotNone(state[SERIALIZED_DATAPIPE_KEY_NAME])
self.assertIsNone(state[READING_SERVICE_STATE_KEY_NAME])
data_loader.shutdown()
| torchdata.datapipes.iter.Mapper | 6 |
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
class TestGraph(expecttest.TestCase):
def _get_datapipes(self) -> Tuple[IterDataPipe, IterDataPipe, IterDataPipe]:
src_dp = IterableWrapper(range(20))
m1 = src_dp.map(_x_and_x_plus_5)
ub = m1.unbatch()
c1, c2 = ub.demux(2, _x_mod_2)
dm = c1.main_datapipe
m2 = c1.map(_x_mult_2)
dp = m2.zip(c2)
return traverse(dp, only_datapipe=True), (src_dp, m1, ub, dm, c1, c2, m2, dp)
def test_find_dps(self) -> None:
graph, (_, m1, *_, m2, _) = self._get_datapipes() # pyre-ignore
dps = find_dps(graph, Mapper)
expected_dps = {m1, m2}
for dp in dps:
self.assertTrue(dp in expected_dps)
def test_replace_dps(self) -> None:
# pyre-fixme[23]: Unable to unpack 3 values, 2 were expected.
graph, (
src_dp,
m1,
ub,
dm,
c1,
c2,
| torchdata.datapipes.iter.HttpReader | 7 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
# Functional Test: work with .xz files
xz_file_dp = FileLister(self.temp_dir.name, "*.xz")
xz_load_dp = FileOpener(xz_file_dp, mode="b")
xz_decompress_dp = Decompressor(xz_load_dp, file_type="lzma")
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar files
| torchdata.datapipes.utils.StreamWrapper | 8 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
DATASET_NAME = "AG_NEWS"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=4)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AG_NEWS(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL[split]])
cache_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, split + ".csv"),
hash_dict={os.path.join(root, split + ".csv"): MD5[split]},
hash_type="md5"
)
cache_dp = HttpReader(cache_dp)
cache_dp = cache_dp.end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="r")
return cache_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| torchdata.datapipes.iter.FileOpener | 9 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
# Nested RAR in TAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.tar")])
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
rar_loader_dp = RarArchiveLoader(tar_loader_dp)
# Functional Test: read extracted files before reaching the end of the rarfile
| torchdata.datapipes.iter.HttpReader | 10 |
from torchdata.datapipes.iter import S3FileLister, S3FileLoader
from torchrec.datasets.utils import (
LoadFiles,
ReadLinesFromCSV)
from torch.utils.data import IterDataPipe
from torchrec.datasets.criteo import _default_row_mapper
s3_prefixes = ['s3://criteo-dataset/day_0']
dp_s3_urls = S3FileLister(s3_prefixes)
dp_s3_files = S3FileLoader(dp_s3_urls) # outputs in (url, BytesIO)
# more datapipes to convert loaded bytes, e.g.
class LoadWithTextIOWrapper(IterDataPipe):
def __init__(self, paths, **open_kw):
| torchdata.datapipes.iter.Mapper | 11 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
# Functional Test: work without file type as input for .xz files
xz_decompress_dp = Decompressor(xz_load_dp)
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar.gz files
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: Compression Type is works for both upper and lower case strings
tar_decompress_dp = Decompressor(tar_load_dp, file_type="TAr")
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: Compression Type throws error for invalid file type
with self.assertRaisesRegex(ValueError, "not a valid CompressionType"):
Decompressor(tar_load_dp, file_type="ABC")
# Reset Test: Ensure the order is consistent between iterations
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(xz_decompress_dp, n_elements_before_reset)
| torchdata.datapipes.iter.HttpReader | 12 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL[split])): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
return cache_dp.parse_json_files().read_squad()
| torchdata.datapipes.iter.Decompressor | 13 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
def fill_hash_dict():
for path in self.temp_files:
with open(path) as f:
hash_func = hashlib.sha256()
content = f.read().encode("utf-8")
hash_func.update(content)
hash_dict[path] = hash_func.hexdigest()
fill_hash_dict()
datapipe1 = FileLister(self.temp_dir.name, "*")
datapipe2 = FileOpener(datapipe1, mode="b")
hash_check_dp = HashChecker(datapipe2, hash_dict)
# Functional Test: Ensure the DataPipe values are unchanged if the hashes are the same
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, hash_check_dp):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
# Functional Test: Ensure the rewind option works, and the stream is empty when there is no rewind
hash_check_dp_no_reset = HashChecker(datapipe2, hash_dict, rewind=False)
for (expected_path, _), (actual_path, actual_stream) in zip(datapipe2, hash_check_dp_no_reset):
| torchdata.datapipes.iter.FileOpener | 14 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
config = self.info.make_config(split="val")
resources = self.resources(config)
devkit_dp = resources[1].load(root)
meta_dp = Filter(devkit_dp, path_comparator("name", "meta.mat"))
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
categories_and_wnids = cast(List[Tuple[str, ...]], next(iter(meta_dp)))
categories_and_wnids.sort(key=lambda category_and_wnid: category_and_wnid[1])
return categories_and_wnids
| torchdata.datapipes.iter.FileOpener | 15 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
dp = IterKeyZipper(
label_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor(self._val_test_image_key),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = Mapper(dp, self._prepare_val_data)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 1_281_167,
"val": 50_000,
| torchdata.datapipes.iter.S3FileLoader | 16 |
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
def initialize(self, datapipe: IterDataPipe) -> IterDataPipe:
graph = traverse(datapipe, only_datapipe=True)
dps = find_dps(graph, Mapper)
| torchdata.datapipes.iter.FileOpener | 17 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
# Functional Test: Compression Type throws error for invalid file type
with self.assertRaisesRegex(ValueError, "not a valid CompressionType"):
Decompressor(tar_load_dp, file_type="ABC")
# Reset Test: Ensure the order is consistent between iterations
| torchdata.datapipes.iter.IterableWrapper | 18 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)
| torchdata.datapipes.iter.IterableWrapper | 19 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
dp = Mapper(dp, self._prepare_train_data if config.split == "train" else self._prepare_test_data)
else: # config.split == "val":
images_dp, devkit_dp = resource_dps
meta_dp, label_dp = Demultiplexer(
devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
_, wnids = zip(*next(iter(meta_dp)))
label_dp = LineReader(label_dp, decode=True, return_path=False)
label_dp = Mapper(label_dp, functools.partial(self._imagenet_label_to_wnid, wnids=wnids))
label_dp: IterDataPipe[Tuple[int, str]] = Enumerator(label_dp, 1)
label_dp = hint_shuffling(label_dp)
label_dp = hint_sharding(label_dp)
dp = IterKeyZipper(
label_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._val_test_image_key,
| torchdata.datapipes.iter.Mapper | 20 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
split_dp, images_dp, anns_dp = Demultiplexer(
archive_dp,
3,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
split_dp = Filter(split_dp, functools.partial(self._is_in_folder, name=self._split_folder))
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True)
split_dp = hint_shuffling(split_dp)
split_dp = hint_sharding(split_dp)
dp = split_dp
for level, data_dp in enumerate((images_dp, anns_dp)):
dp = IterKeyZipper(
dp,
data_dp,
key_fn=getitem(*[0] * level, 1),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
| torchdata.datapipes.iter.Shuffler | 21 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
def test_header_iterdatapipe(self) -> None:
# Functional Test: ensure the limit is enforced
source_dp = IterableWrapper(range(20))
header_dp = source_dp.header(5)
self.assertEqual(list(range(5)), list(header_dp))
# Functional Test: ensure it works when the source has less elements than the limit
source_dp = IterableWrapper(range(5))
header_dp = source_dp.header(100)
self.assertEqual(list(range(5)), list(header_dp))
# Reset Test:
source_dp = IterableWrapper(range(20))
header_dp = Header(source_dp, 5)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(header_dp, n_elements_before_reset)
self.assertEqual(list(range(2)), res_before_reset)
self.assertEqual(list(range(5)), res_after_reset)
self.assertEqual(list(range(5)), list(header_dp))
# __len__ Test: returns the limit when it is less than the length of source
self.assertEqual(5, len(header_dp))
# __len__ Test: returns the length of source when it is less than the limit
header_dp = source_dp.header(30)
self.assertEqual(20, len(header_dp))
| torchdata.dataloader2.graph.replace_dp | 22 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
xz_decompress_dp = Decompressor(xz_load_dp)
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar.gz files
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: Compression Type is works for both upper and lower case strings
| torchdata.datapipes.iter.IterableWrapper | 23 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
self._write_test_xz_files()
datapipe1 = FileLister(self.temp_dir.name, "*.xz")
datapipe2 = FileOpener(datapipe1, mode="b")
xz_loader_dp = XzFileLoader(datapipe2)
| torchdata.datapipes.functional_datapipe | 24 |
from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe
# Yes, we had to scan files twice. Alternativelly it is possible to use
# `fork` DataPipe, but it will require buffer equal to the size of all
# full file names
# TODO(125): Make sure that `fork` complains when buffer becomes
# too large
list_files_0 = FileLister(root=IMAGES_ROOT, recursive=True)
list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).sharding_filter()
else:
list_files_0, list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).fork(2)
list_files_1 = list_files_1.sharding_filter()
| torchdata.datapipes.iter.Enumerator | 25 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
# Functional Test: work with .gz files
gz_file_dp = IterableWrapper([f"{self.temp_dir.name}/temp.gz"])
gz_load_dp = FileOpener(gz_file_dp, mode="b")
gz_decompress_dp = Decompressor(gz_load_dp, file_type="gzip")
| torchdata.datapipes.iter.Mapper | 26 |
from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe
category = cat_to_dp[self.parse_category_fn(data)]
yield (data, category)
def MyImageFolder(root=IMAGES_ROOT, transform=None):
if not USE_FORK_DATAPIPE:
# Yes, we had to scan files twice. Alternativelly it is possible to use
# `fork` DataPipe, but it will require buffer equal to the size of all
# full file names
# TODO(125): Make sure that `fork` complains when buffer becomes
# too large
list_files_0 = FileLister(root=IMAGES_ROOT, recursive=True)
list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).sharding_filter()
else:
list_files_0, list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).fork(2)
list_files_1 = list_files_1.sharding_filter()
categories = ObtainCategories(list_files_0)
with_categories = AttributeCategories(list_files_1, categories)
using_default_loader = with_categories.map(lambda x: (torchvision.datasets.folder.default_loader(x[0]), x[1]))
transformed = using_default_loader.map(lambda x: (transform(x[0]), x[1]))
return transformed
| torchdata.datapipes.iter.Mapper | 27 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
myzip.write(self.temp_files[2], arcname=os.path.basename(self.temp_files[2]))
def test_zip_archive_reader_iterdatapipe(self):
self._write_test_zip_files()
datapipe1 = FileLister(self.temp_dir.name, "*.zip")
datapipe2 = FileOpener(datapipe1, mode="b")
zip_loader_dp = ZipArchiveLoader(datapipe2)
# Functional Test: read extracted files before reaching the end of the zipfile
self._compressed_files_comparison_helper(self.temp_files, zip_loader_dp, check_length=False)
| torchdata.datapipes.iter.FileLister | 28 |
from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = Shuffler(dp, buffer_size=INFINITE_BUFFER_SIZE)
dp = FileOpener(dp, mode="rb")
return (
Mapper(dp, functools.partial(_collate_and_decode_data, root=root, categories=categories, decoder=decoder)),
categories,
)
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
| torchdata.datapipes.iter.FileOpener | 29 |
from torchdata.datapipes.utils import StreamWrapper
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.pathmgr = g_pathmgr
self.mode: str = mode
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for file_uri in self.source_datapipe:
with self.pathmgr.open(file_uri, self.mode) as file:
yield file_uri, StreamWrapper(file)
def __len__(self) -> int:
return len(self.source_datapipe)
| torchdata.datapipes.iter.LineReader | 30 |
from torchdata.datapipes.iter import FileOpener, IterableWrapper
:returns: DataPipe that yields rows from QQP dataset (label (int), question1 (str), question2 (str))
:rtype: (int, str, str)
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root),
hash_dict={_filepath_fn(root): MD5},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
# some context stored at top of the file needs to be removed
parsed_data = cache_dp.parse_csv(skip_lines=1, delimiter="\t").map(_modify_res)
return parsed_data.shuffle().set_shuffle(False).sharding_filter()
| torchdata.datapipes.iter.FileLister | 31 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# Reset Test:
saver_dp = IoPathSaver(source_dp, filepath_fn=filepath_fn, mode="wb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual([filepath_fn("1.txt"), filepath_fn("2.txt")], res_before_reset)
| torchdata.datapipes.iter.Header | 32 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
enum_dp = source_dp.enumerate(starting_index=10)
self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], list(enum_dp))
# Reset Test:
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(enum_dp, n_elements_before_reset)
self.assertEqual([(10, "a"), (11, "b")], res_before_reset)
self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], res_after_reset)
# __len__ Test: returns length of source DataPipe
self.assertEqual(5, len(enum_dp))
def test_index_adder_iterdatapipe(self) -> None:
letters = "abcdefg"
source_dp = IterableWrapper([{i: i} for i in letters])
index_adder_dp = source_dp.add_index()
it = iter(index_adder_dp)
def dict_content_test_helper(iterator):
for i, curr_dict in enumerate(iterator):
self.assertEqual(i, curr_dict["index"])
self.assertTrue(letters[i] in curr_dict)
# Functional Test: ensure that the correct index value is added to each element (dict)
dict_content_test_helper(it)
# Functional Test: raises error when the elements of source_dp is not of type Dict
source_dp = IterableWrapper(range(10))
| torchdata.datapipes.iter.FileLister | 33 |
from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split])
)
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").read_from_tar().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, mode="b")
return data_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| torchdata.datapipes.iter.IoPathFileOpener | 34 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
("file2", "Line2,1\r\n"),
("file2", "Line2,2\r\n"),
("file2", "Line2,3"),
]
self.assertEqual(expected_result, list(line_reader_dp))
# Reset Test:
source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))])
line_reader_dp = LineReader(source_dp, strip_newline=False)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(line_reader_dp, n_elements_before_reset)
self.assertEqual(expected_result[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_result, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
| torchdata.datapipes.iter.IterableWrapper | 35 |
from torchdata.datapipes.iter import HttpReader, IterableWrapper
# Reset Test: http_reader_dp has been read, but we reset when calling check_hash()
check_cache_dp = http_reader_dp.check_hash({file_url: expected_MD5_hash}, "md5", rewind=False)
it = iter(check_cache_dp)
path, stream = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(io.BufferedReader, type(stream))
# __len__ Test: returns the length of source DataPipe
source_dp = IterableWrapper([file_url])
http_dp = HttpReader(source_dp)
self.assertEqual(1, len(http_dp))
if __name__ == "__main__":
unittest.main()
| torchdata.datapipes.iter.FileLister | 36 |
from torchdata.datapipes.iter import FileOpener, IterableWrapper, HttpReader
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL])
| torchdata.datapipes.iter.FileLister | 37 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Demultiplexer, Filter, IterKeyZipper, LineReader
def _generate_categories(self, root: pathlib.Path) -> Tuple[str, ...]:
dp = self.resources(self.default_config)[0].load(pathlib.Path(root) / self.name)
dp = Filter(dp, path_comparator("name", "category_names.m"))
dp = LineReader(dp)
dp = Mapper(dp, bytes.decode, input_col=1)
lines = tuple(zip(*iter(dp)))[1]
| torchdata.datapipes.iter.FileOpener | 38 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
hash_dict={os.path.join(root, split + ".csv"): MD5[split]},
hash_type="md5"
)
cache_dp = HttpReader(cache_dp)
cache_dp = cache_dp.end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="r")
| torchdata.datapipes.iter.FileOpener | 39 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def test_tar_archive_reader_iterdatapipe(self):
self._write_test_tar_files()
datapipe1 = FileLister(self.temp_dir.name, "*.tar")
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
self._write_test_tar_gz_files()
| torchdata.datapipes.iter.CSVDictParser | 40 |
from torchdata.datapipes import functional_datapipe
| torchdata.datapipes.iter.LineReader | 41 |
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
exp_g1 = {
dp: {
m2: {c1: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}}},
c2: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}},
}
}
self.assertEqual(graph, exp_g1)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g1)
graph = replace_dp(graph, m2, new_dp2)
exp_g2 = {
dp: {
new_dp2: {m2: {c1: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}}}},
c2: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}},
}
}
self.assertEqual(graph, exp_g2)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g2)
| torchdata.datapipes.iter.FileLister | 42 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
self.assertEqual(f.read(), gz_stream.read())
# Functional Test: work with .zip files
zip_file_dp = FileLister(self.temp_dir.name, "*.zip")
zip_load_dp = FileOpener(zip_file_dp, mode="b")
zip_decompress_dp = zip_load_dp.decompress(file_type="zip")
for _, zip_stream in zip_decompress_dp:
for fname in self.temp_files:
with open(fname, "rb") as f:
self.assertEqual(f.read(), zip_stream.read(name=os.path.basename(fname)))
# Functional Test: work with .xz files
xz_file_dp = FileLister(self.temp_dir.name, "*.xz")
xz_load_dp = FileOpener(xz_file_dp, mode="b")
xz_decompress_dp = Decompressor(xz_load_dp, file_type="lzma")
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar files
tar_decompress_dp = Decompressor(tar_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: work without file type as input for .xz files
xz_decompress_dp = Decompressor(xz_load_dp)
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar.gz files
| torchdata.datapipes.iter.Mapper | 43 |
from torchdata.datapipes.iter import IterableWrapper
),
(iterdp.SampleMultiplexer, {IterableWrapper([0] * 10): 0.5, IterableWrapper([1] * 10): 0.5}, (), {}),
| torchdata.datapipes.iter.FileOpener | 44 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
# Reset Test
source_dp = IterableWrapper([{i: i} for i in "abcdefg"])
index_adder_dp = IndexAdder(source_dp)
n_elements_before_reset = 2
| torchdata.dataloader2.graph.remove_dp | 45 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, CSVDictParser, Zipper, Demultiplexer
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
return Mapper(dp, self._prepare_sample)
| torchdata.datapipes.iter.FileOpener | 46 |
from torchdata.datapipes.iter import FileOpener, IterableWrapper
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root),
hash_dict={_filepath_fn(root, URL): MD5},
hash_type="md5",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=partial(_extracted_filepath_fn, root, split))
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").load_from_zip().filter(partial(_filter_fn, split))
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8")
parsed_data = (
data_dp.parse_csv(skip_lines=1, delimiter="\t", quoting=csv.QUOTE_NONE).filter(_filter_res).map(_modify_res)
)
return parsed_data.shuffle().set_shuffle(False).sharding_filter()
| torchdata.datapipes.iter.Mapper | 47 |
from torchdata.datapipes.iter import S3FileLister, S3FileLoader
s3_urls = list(filter(is_final_day, s3_urls))
rank = (
dist.get_rank()
if stage == "val"
else dist.get_rank() + dist.get_world_size()
)
world_size = dist.get_world_size() * 2
s3_urls_buffers = S3FileLoader(s3_urls)
def row_mapper(row: List[str]) -> Tuple[List[int], List[int], int]:
label = safe_cast(row[0], int, 0)
dense = [safe_cast(row[i], int, 0) for i in range(1, 1 + INT_FEATURE_COUNT)]
sparse = [
int(safe_cast(row[i], str, "0") or "0", 16)
for i in range(
| torchdata.datapipes.iter.Mapper | 48 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
self.assertEqual(expected_res, list(result_dp))
# Functional Test: ensure that a custom merge function works
def custom_merge(a, b):
return f"{a} is a {b} number."
result_dp = source_dp.zip_with_map(map_dp, odd_even, custom_merge)
expected_res2 = [f"{i} is a {odd_even_string(i)} number." for i in range(10)]
self.assertEqual(expected_res2, list(result_dp))
# Functional Test: raises error when key is invalid
def odd_even_bug(i: int) -> int:
return 2 if i == 0 else i % 2
result_dp = MapKeyZipper(source_dp, map_dp, odd_even_bug)
it = iter(result_dp)
with self.assertRaisesRegex(KeyError, "is not a valid key in the given MapDataPipe"):
next(it)
# Reset Test:
n_elements_before_reset = 4
result_dp = source_dp.zip_with_map(map_dp, odd_even)
res_before_reset, res_after_reset = reset_after_n_next_calls(result_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: returns the length of source DataPipe
result_dp = source_dp.zip_with_map(map_dp, odd_even)
| torchdata.dataloader2.graph.find_dps | 49 |
from torchdata.datapipes.iter import FileOpener, IterableWrapper
- train: 87599
- dev: 10570
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev`)
:returns: DataPipe that yields data points from SQuaAD1 dataset which consist of context, question, list of answers and corresponding index in context
:rtype: (str, str, list(str), list(int))
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root, split),
hash_dict={_filepath_fn(root, split): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
return cache_dp.parse_json_files().read_squad().shuffle().set_shuffle(False).sharding_filter()
| torchdata.datapipes.iter.TFRecordLoader | 50 |
from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService, ReadingServiceInterface
mp.set_start_method("fork")
rs = MultiProcessingReadingService(2, persistent_workers=True)
dl = DataLoader2(dp, reading_service=rs)
d1 = list(dl)
d2 = list(dl)
self.assertEqual(d1, d2)
| torchdata.datapipes.iter.Mapper | 51 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split]))
# Extract zip and filter the appropriate split file
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b").read_from_zip().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
| torchdata.datapipes.iter.IoPathFileOpener | 52 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
@_wrap_split_argument(("train", "test"))
def AG_NEWS(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL[split]])
cache_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, split + ".csv"),
hash_dict={os.path.join(root, split + ".csv"): MD5[split]},
hash_type="md5"
)
cache_dp = HttpReader(cache_dp)
cache_dp = cache_dp.end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="r")
return cache_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| torchdata.datapipes.iter.Decompressor | 53 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
| torchdata.datapipes.functional_datapipe | 54 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(rar_loader_dp)
# Nested RAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp_1 = RarArchiveLoader(datapipe2)
rar_loader_dp_2 = RarArchiveLoader(rar_loader_dp_1)
with self.assertRaisesRegex(ValueError, "Nested RAR archive is not supported"):
| torchdata.datapipes.iter.Mapper | 55 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
dp = Mapper(dp, self._prepare_train_data if self._split == "train" else self._prepare_test_data)
else: # config.split == "val":
images_dp, devkit_dp = resource_dps
meta_dp, label_dp = Demultiplexer(
devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
_, wnids = zip(*next(iter(meta_dp)))
label_dp = LineReader(label_dp, decode=True, return_path=False)
# We cannot use self._wnids here, since we use a different order than the dataset
label_dp = Mapper(label_dp, functools.partial(self._imagenet_label_to_wnid, wnids=wnids))
label_dp: IterDataPipe[Tuple[int, str]] = Enumerator(label_dp, 1)
label_dp = hint_shuffling(label_dp)
label_dp = hint_sharding(label_dp)
| torchdata.datapipes.iter.S3FileLister | 56 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
| torchdata.datapipes.iter.IoPathFileLister | 57 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
def _write_test_xz_files(self):
for path in self.temp_files:
fname = os.path.basename(path)
temp_xzfile_pathname = os.path.join(self.temp_dir.name, f"{fname}.xz")
with open(path) as f:
with lzma.open(temp_xzfile_pathname, "w") as xz:
xz.write(f.read().encode("utf-8"))
def test_xz_archive_reader_iterdatapipe(self):
# Worth noting that the .tar and .zip tests write multiple files into the same compressed file
# Whereas we create multiple .xz files in the same directories below.
self._write_test_xz_files()
datapipe1 = FileLister(self.temp_dir.name, "*.xz")
datapipe2 = FileOpener(datapipe1, mode="b")
xz_loader_dp = XzFileLoader(datapipe2)
# Functional Test: Read extracted files before reaching the end of the xzfile
self._unordered_compressed_files_comparison_helper(self.temp_files, xz_loader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the xzfile
data_refs = list(xz_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
xz_loader_dp = datapipe2.load_from_xz()
| torchdata.datapipes.iter.Demultiplexer | 58 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, IterKeyZipper, Demultiplexer, JsonParser, UnBatcher
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
| torchdata.datapipes.iter.HttpReader | 59 |
from torchdata.datapipes.iter import IterDataPipe, Filter, Mapper
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, functools.partial(self._is_data_file, split=config.split))
dp = Mapper(dp, self._unpickle)
dp = CifarFileReader(dp, labels_key=self._LABELS_KEY)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
return Mapper(dp, functools.partial(self._collate_and_decode, decoder=decoder))
def _generate_categories(self, root: pathlib.Path) -> List[str]:
resources = self.resources(self.default_config)
| torchdata.datapipes.iter.FileOpener | 60 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
# Functional Test: aggregate lines correctly with different joiner
para_agg_dp = source_dp.lines_to_paragraphs(joiner=lambda ls: " ".join(ls))
self.assertEqual([("file1", "Line1 Line2"), ("file2", "Line2,1 Line2,2 Line2,3")], list(para_agg_dp))
# Reset Test: each yield is for a single file
para_agg_dp = ParagraphAggregator(source_dp)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(para_agg_dp, n_elements_before_reset)
self.assertEqual([("file1", "Line1\nLine2")], res_before_reset)
self.assertEqual([("file1", "Line1\nLine2"), ("file2", "Line2,1\nLine2,2\nLine2,3")], res_after_reset)
| torchdata.dataloader2.graph.remove_dp | 61 |
from torchdata.datapipes.iter import IterableWrapper
(iterdp.BatchMapper, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), (_fake_batch_fn, 2, 1), {}),
(iterdp.BucketBatcher, IterableWrapper([0, 0, 0, 0, 0, 0, 0]), (5,), {}),
| torchdata.datapipes.iter.HashChecker | 62 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
hash_func = hashlib.sha256()
content = f.read().encode("utf-8")
hash_func.update(content)
hash_dict[path] = hash_func.hexdigest()
fill_hash_dict()
datapipe1 = FileLister(self.temp_dir.name, "*")
datapipe2 = FileOpener(datapipe1, mode="b")
hash_check_dp = HashChecker(datapipe2, hash_dict)
# Functional Test: Ensure the DataPipe values are unchanged if the hashes are the same
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, hash_check_dp):
self.assertEqual(expected_path, actual_path)
| torchdata.datapipes.iter.Shuffler | 63 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
self.assertEqual(30, len(header_dp))
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"length of this HeaderIterDataPipe is inferred to be equal to its limit"
)
# __len__ Test: returns limit if source doesn't have length, but it has been iterated through once
for _ in header_dp:
pass
self.assertEqual(20, len(header_dp))
def test_enumerator_iterdatapipe(self) -> None:
letters = "abcde"
source_dp = IterableWrapper(letters)
enum_dp = source_dp.enumerate()
# Functional Test: ensure that the correct index value is added to each element (tuple)
self.assertEqual([(0, "a"), (1, "b"), (2, "c"), (3, "d"), (4, "e")], list(enum_dp))
# Functional Test: start index from non-zero
enum_dp = source_dp.enumerate(starting_index=10)
self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], list(enum_dp))
# Reset Test:
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(enum_dp, n_elements_before_reset)
| torchdata.datapipes.iter.XzFileLoader | 64 |
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
m1,
ub,
dm,
c1,
c2,
m2,
dp,
) = self._get_datapipes()
graph = remove_dp(graph, m1)
exp_g1 = {dp: {m2: {c1: {dm: {ub: {src_dp: {}}}}}, c2: {dm: {ub: {src_dp: {}}}}}}
self.assertEqual(graph, exp_g1)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g1)
graph = remove_dp(graph, m2)
exp_g2 = {dp: {c1: {dm: {ub: {src_dp: {}}}}, c2: {dm: {ub: {src_dp: {}}}}}}
self.assertEqual(graph, exp_g2)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g2)
with self.assertRaisesRegex(
Exception,
"Cannot remove source DataPipe that is the first DataPipe in the pipeline",
):
remove_dp(graph, src_dp)
| torchdata.dataloader2.graph.remove_dp | 65 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
lister_dp = FileLister(self.temp_dir.name, "*.text")
iopath_file_loader_dp = IoPathFileOpener(lister_dp, mode="rb")
| torchdata.datapipes.iter.Cycler | 66 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
# Functional Test: raises error when the elements of source_dp is not of type Dict
source_dp = IterableWrapper(range(10))
index_adder_dp = source_dp.add_index()
it = iter(index_adder_dp)
with self.assertRaisesRegex(NotImplementedError, "We only support adding index to row or batch in dict type"):
next(it)
# Reset Test
source_dp = IterableWrapper([{i: i} for i in "abcdefg"])
index_adder_dp = IndexAdder(source_dp)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(index_adder_dp, n_elements_before_reset)
dict_content_test_helper(iter(res_before_reset))
dict_content_test_helper(iter(res_after_reset))
# __len__ Test: returns length of source DataPipe
self.assertEqual(7, len(index_adder_dp))
| torchdata.datapipes.iter.FileOpener | 67 |
from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter
def from_image_folder(
root: Union[str, pathlib.Path],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]] = pil,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, decoder=decoder, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
| torchdata.datapipes.iter.Filter | 68 |
from torchdata.datapipes import functional_datapipe
def __iter__(self) -> Iterator[str]:
if self.pathmgr.isfile(self.root):
yield self.root
else:
for file_name in self.pathmgr.ls(self.root):
yield os.path.join(self.root, file_name)
@functional_datapipe("load_file_by_iopath")
class IoPathFileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r""":class:`IoPathFileLoaderIterDataPipe`.
Iterable DataPipe to load files from input datapipe which contains
URIs. This yields a tuple of pathname and an opened filestream.
Args:
source_datapipe: Iterable DataPipe that provides the pathname
| torchdata.datapipes.iter.Mapper | 69 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
tar_gz_load_dp = FileOpener(tar_gz_file_dp, mode="b")
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type="tar")
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: work with .gz files
gz_file_dp = IterableWrapper([f"{self.temp_dir.name}/temp.gz"])
gz_load_dp = FileOpener(gz_file_dp, mode="b")
gz_decompress_dp = Decompressor(gz_load_dp, file_type="gzip")
for _, gz_stream in gz_decompress_dp:
with open(self.temp_files[0], "rb") as f:
self.assertEqual(f.read(), gz_stream.read())
# Functional Test: work with .zip files
zip_file_dp = FileLister(self.temp_dir.name, "*.zip")
zip_load_dp = FileOpener(zip_file_dp, mode="b")
zip_decompress_dp = zip_load_dp.decompress(file_type="zip")
for _, zip_stream in zip_decompress_dp:
for fname in self.temp_files:
with open(fname, "rb") as f:
self.assertEqual(f.read(), zip_stream.read(name=os.path.basename(fname)))
# Functional Test: work with .xz files
xz_file_dp = FileLister(self.temp_dir.name, "*.xz")
xz_load_dp = FileOpener(xz_file_dp, mode="b")
xz_decompress_dp = Decompressor(xz_load_dp, file_type="lzma")
self._decompressor_xz_test_helper(xz_decompress_dp)
| torchdata.datapipes.iter.Mapper | 70 |
from torchdata.mpii import MPII_Joint_Names, MpiiData
def run_gui(preds, subset, model=None):
mpii_data = MpiiData('/datasets/mpii')
| torchdata.datapipes.iter.ZipArchiveReader | 71 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
dict(zip(("label", "wnid"), label_data if label_data else (None, None))),
path=path,
image=EncodedImage.from_file(buffer),
)
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
if config.split in {"train", "test"}:
dp = resource_dps[0]
# the train archive is a tar of tars
if config.split == "train":
dp = TarArchiveLoader(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
dp = Mapper(dp, self._prepare_train_data if config.split == "train" else self._prepare_test_data)
else: # config.split == "val":
images_dp, devkit_dp = resource_dps
meta_dp, label_dp = Demultiplexer(
devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
| torchdata.datapipes.iter.CSVDictParser | 72 |
from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader
}
@torch.no_grad()
def test_tfrecord_loader_example_iterdatapipe(self):
filename = f"{self.temp_dir}/example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
| torchdata.datapipes.iter.RarArchiveLoader | 73 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
key_fn=getitem(0),
ref_key_fn=self._val_test_image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = Mapper(dp, self._prepare_val_data)
return Mapper(dp, self._prepare_sample)
# Although the WordNet IDs (wnids) are unique, the corresponding categories are not. For example, both n02012849
# and n03126707 are labeled 'crane' while the first means the bird and the latter means the construction equipment
_WNID_MAP = {
"n03126707": "construction crane",
| torchdata.datapipes.iter.HttpReader | 74 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser
) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{config.split}{config.fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
| torchdata.datapipes.iter.IterableWrapper | 75 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
self.assertEqual(2, len(result_dp))
def test_sample_multiplexer_iterdatapipe(self) -> None:
# Functional Test: yields all values from the sources
source_dp1 = IterableWrapper([0] * 10)
source_dp2 = IterableWrapper([1] * 10)
d: Dict[IterDataPipe, float] = {source_dp1: 99999999, source_dp2: 0.0000001}
sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
result = list(sample_mul_dp)
self.assertEqual([0] * 10 + [1] * 10, result)
# Functional Test: raises error for empty dict
with self.assertRaisesRegex(ValueError, "Empty dictionary"):
SampleMultiplexer(pipes_to_weights_dict={}, seed=0) # type: ignore[arg-type]
# Functional Test: raises error for negative or zero weight
d = {source_dp1: 99999999, source_dp2: 0}
with self.assertRaisesRegex(ValueError, "Expecting a positive and non-zero weight"):
SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
# Reset Test
d = {source_dp1: 99999999, source_dp2: 0.0000001}
sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(sample_mul_dp, n_elements_before_reset)
self.assertEqual([0] * n_elements_before_reset, res_before_reset)
| torchdata.dataloader2.graph.replace_dp | 76 |
from torchdata.datapipes.iter import IterDataPipe, FileLister, FileOpener, Mapper, Shuffler, Filter
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = Shuffler(dp, buffer_size=INFINITE_BUFFER_SIZE)
dp = FileOpener(dp, mode="rb")
return (
| torchdata.datapipes.iter.IterableWrapper | 77 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(rar_loader_dp)
# Nested RAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp_1 = RarArchiveLoader(datapipe2)
rar_loader_dp_2 = RarArchiveLoader(rar_loader_dp_1)
with self.assertRaisesRegex(ValueError, "Nested RAR archive is not supported"):
list(rar_loader_dp_2)
| torchdata.datapipes.iter.IterableWrapper | 78 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def _write_test_tar_gz_files(self):
path = os.path.join(self.temp_dir.name, "test_gz.tar.gz")
with tarfile.open(path, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def test_tar_archive_reader_iterdatapipe(self):
self._write_test_tar_files()
datapipe1 = FileLister(self.temp_dir.name, "*.tar")
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
self._write_test_tar_gz_files()
datapipe_gz_1 = FileLister(self.temp_dir.name, "*.tar.gz")
datapipe_gz_2 = FileOpener(datapipe_gz_1, mode="b")
gz_reader_dp = TarArchiveLoader(datapipe_gz_2)
# Functional Test: Read extracted files before reaching the end of the tarfile
self._compressed_files_comparison_helper(self.temp_files, tar_loader_dp, check_length=False)
self._compressed_files_comparison_helper(self.temp_files, gz_reader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the tarfile
data_refs = list(tar_loader_dp)
| torchdata.datapipes.iter.RarArchiveLoader | 79 |
from torchdata.datapipes.iter import IterableWrapper
),
(),
{},
),
(iterdp.Cycler, None, (2,), {}),
(iterdp.DataFrameMaker, IterableWrapper([(i,) for i in range(3)]), (), {"dtype": DTYPE}),
(iterdp.Decompressor, None, (), {}),
(iterdp.Enumerator, None, (2,), {}),
(iterdp.FlatMapper, None, (_fake_fn_ls,), {}),
(iterdp.FSSpecFileLister, ".", (), {}),
(iterdp.FSSpecFileOpener, None, (), {}),
(
iterdp.FSSpecSaver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(iterdp.GDriveReader, None, (), {}),
(iterdp.HashChecker, None, ({},), {}),
(iterdp.Header, None, (3,), {}),
(iterdp.HttpReader, None, (), {}),
# TODO (ejguan): Deterministic serialization is required
# (iterdp.InBatchShuffler, IterableWrapper(range(10)).batch(3), (), {}),
(iterdp.InMemoryCacheHolder, None, (), {}),
(iterdp.IndexAdder, IterableWrapper([{"a": 1, "b": 2}, {"c": 3, "a": 1}]), ("label",), {}),
(iterdp.IoPathFileLister, ".", (), {}),
| torchdata.datapipes.iter.HashChecker | 80 |
from torchdata.datapipes.iter import HttpReader
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AG_NEWS(root, split):
"""Demonstrating streaming use case
This might be useful when we do not want to cache or download the data.
The limitation is that we do not have any checking mechanism or data sanity check.
"""
# Stack CSV Parser directly on top of web-stream
return HttpReader([URL[split]]).parse_csv().map(lambda t: (int(t[0]), " ".join(t[1:])))
| torchdata.datapipes.iter.FileOpener | 81 |
from torchdata.datapipes.iter import Mapper
train_dp = train_img_dp.zip(train_height_dp).zip(train_label_dp)
val_dp = val_img_dp.zip(val_height_dp).zip(val_label_dp)
test_dp = test_img_dp.zip(test_height_dp).zip(test_label_dp)
'''tfs = transforms.Compose(transforms.Resize((256,256)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomResizedCrop((224, 224), scale=[0.5, 1]))'''
ndp = eval(self._split+'_dp')
ndp = hint_shuffling(ndp)
ndp = hint_sharding(ndp)
ndp = Mapper(ndp, self._prepare_sample)
#ndp = ndp.map(tfs)
return ndp
def __len__(self) -> int:
return {
'train': _TRAIN_LEN,
'val': _VAL_LEN,
'test': _TEST_LEN
}[self._split]
| torchdata.datapipes.iter.GDriveReader | 82 |
from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float_unknown": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for true_data, loaded_data in zip(expected_res[:n_elements_before_reset], res_before_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
self.assertEqual(len(expected_res), len(res_after_reset))
for true_data, loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
| torchdata.datapipes.iter.IterableWrapper | 83 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
_EXTRACTED_FILES = {
'train': os.path.join('wikitext-2', 'wiki.train.tokens'),
'test': os.path.join('wikitext-2', 'wiki.test.tokens'),
'valid': os.path.join('wikitext-2', 'wiki.valid.tokens'),
}
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'valid', 'test'))
def WikiText2(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`")
url_dp = IterableWrapper([URL])
# cache data on-disk
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(x)),
hash_dict={os.path.join(root, os.path.basename(URL)): MD5},
hash_type="md5",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split]))
# Extract zip and filter the appropriate split file
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b").read_from_zip().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, mode='b')
| torchdata.datapipes.iter.JsonParser | 84 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
("2.json", {"__complex__": True, "real": 1, "imag": 2}),
]
self.assertEqual(expected_res, list(json_dp))
# Reset Test:
json_dp = JsonParser(datapipe_nonempty)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(json_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
| torchdata.datapipes.iter.IterableWrapper | 85 |
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
try: # TODO: Create dependency on TorchArrow?
import pyarrow.parquet as parquet
import torcharrow
except ImportError:
torcharrow = None
parquet = None
T_co = TypeVar("T_co")
@functional_datapipe("dataframe")
class DataFrameMakerIterDataPipe(IterDataPipe): # IterDataPipe[torcharrow.IDataFrame[T_co]]
r"""
Takes rows of data, batches a number of them together and creates `TorchArrow`
DataFrames (functional name: ``dataframe``).
Note:
There is a trade-off between having a large number of rows within a DataFrame and usage of memory. Please
choose a value carefully.
Args:
source_dp: IterDataPipe containing rows of data
dataframe_size: number of rows of data within each DataFrame
| torchdata.datapipes.iter.IterableWrapper | 86 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
# Functional Test: using sort_key, without in_batch_shuffle
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=True, batch_num=100, bucket_num=2, in_batch_shuffle=False, sort_key=_return_self
)
self.assertEqual(9, len(list(batch_dp.unbatch())))
# Reset Test:
batch_dp = BucketBatcher(
source_dp,
batch_size=3,
drop_last=True,
batch_num=100,
bucket_num=2,
in_batch_shuffle=False,
| torchdata.dataloader2.graph.traverse | 87 |
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
self.assertEqual(traverse(dp, only_datapipe=True), exp_g2)
def test_remove_dps(self) -> None:
# pyre-fixme[23]: Unable to unpack 3 values, 2 were expected.
graph, (
src_dp,
m1,
ub,
dm,
c1,
c2,
m2,
dp,
) = self._get_datapipes()
graph = remove_dp(graph, m1)
exp_g1 = {dp: {m2: {c1: {dm: {ub: {src_dp: {}}}}}, c2: {dm: {ub: {src_dp: {}}}}}}
self.assertEqual(graph, exp_g1)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g1)
graph = remove_dp(graph, m2)
exp_g2 = {dp: {c1: {dm: {ub: {src_dp: {}}}}, c2: {dm: {ub: {src_dp: {}}}}}}
self.assertEqual(graph, exp_g2)
self.assertEqual(traverse(dp, only_datapipe=True), exp_g2)
with self.assertRaisesRegex(
Exception,
"Cannot remove source DataPipe that is the first DataPipe in the pipeline",
):
remove_dp(graph, src_dp)
| torchdata.datapipes.iter.FileLister | 88 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader
)
return Mapper(dp, self._prepare_sample)
| torchdata.datapipes.iter.Demultiplexer | 89 |
from torchdata.datapipes.iter import S3FileLister, S3FileLoader
) -> DataLoader:
s3_urls = S3FileLister([args.s3_criteo_prefix])
| torchdata.datapipes.iter.Mapper | 90 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
# Functional Test: Cache DP should just return the data without changing the values
res1 = list(cache_dp)
self.assertEqual(list(range(10)), res1)
# Functional Test: Ensure the objects are the same ones from source DataPipe
res1 = list(cache_dp)
res2 = list(cache_dp)
self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res1))
self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res2))
# TODO(122): Figure out a way to consistently test caching when size is in megabytes
# Reset Test: reset the DataPipe after reading part of it
cache_dp = InMemoryCacheHolder(source_dp, size=5)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(cache_dp, n_elements_before_reset)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(list(range(10)), res_after_reset)
# __len__ Test: inherits length from source_dp
self.assertEqual(10, len(cache_dp))
# __len__ Test: source_dp has no len and cache is not yet loaded
source_dp_no_len = IDP_NoLen(range(10))
cache_dp = InMemoryCacheHolder(source_dp_no_len, size=5)
with self.assertRaisesRegex(TypeError, "doesn't have valid length until the cache is loaded"):
| torchdata.datapipes.iter.S3FileLister | 91 |
from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler
def _make_sharded_datapipe(root: str, dataset_size: int) -> IterDataPipe[Dict[str, Any]]:
dp = IoPathFileLister(root=root)
dp = SharderDataPipe(dp)
dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE)
| torchdata.datapipes.iter.IterKeyZipper | 92 |
from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader
@torch.no_grad()
def test_tfrecord_loader_example_iterdatapipe(self):
filename = f"{self.temp_dir}/example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
| torchdata.datapipes.iter.CSVParser | 93 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
dict(zip(("label", "wnid"), label_data if label_data else (None, None))),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split in {"train", "test"}:
dp = resource_dps[0]
# the train archive is a tar of tars
if self._split == "train":
dp = TarArchiveLoader(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
dp = Mapper(dp, self._prepare_train_data if self._split == "train" else self._prepare_test_data)
else: # config.split == "val":
images_dp, devkit_dp = resource_dps
meta_dp, label_dp = Demultiplexer(
devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
meta_dp = Mapper(meta_dp, self._extract_categories_and_wnids)
_, wnids = zip(*next(iter(meta_dp)))
label_dp = LineReader(label_dp, decode=True, return_path=False)
# We cannot use self._wnids here, since we use a different order than the dataset
label_dp = Mapper(label_dp, functools.partial(self._imagenet_label_to_wnid, wnids=wnids))
label_dp: IterDataPipe[Tuple[int, str]] = Enumerator(label_dp, 1)
| torchdata.maps.Flatten | 94 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
with self.assertRaisesRegex(TypeError, "has no len"):
len(csv_parser_dp)
def test_csv_dict_parser_iterdatapipe(self):
def get_name(path_and_stream):
return os.path.basename(path_and_stream[0]), path_and_stream[1]
csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"}
self._custom_files_set_up(csv_files)
datapipe1 = FileLister(self.temp_dir.name, "*.csv")
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
# Functional Test: yield one row at a time as dict, with the first row being the header (key)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict()
expected_res1 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}]
self.assertEqual(expected_res1, list(csv_dict_parser_dp))
| torchdata.datapipes.iter.ShardingFilter | 95 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, CSVDictParser
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self.categories) if label_id is not None else None,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
| torchdata.datapipes.iter.FileOpener | 96 |
from torchdata.datapipes.iter import IterableWrapper
]
),
(),
{},
),
(
iterdp.LineReader,
IterableWrapper(
[("file1", StringIO("Line1\nLine2")), ("file2", StringIO("Line2,1\r\nLine2,2\r\nLine2,3"))]
),
(),
{},
),
(
iterdp.MaxTokenBucketizer,
IterableWrapper(["1", "22", "1", "4444", "333", "1", "22", "22", "333"]),
(4,),
{},
),
(
iterdp.MapKeyZipper,
IterableWrapper([("a", 1), ("b", 2), ("c", 3)]),
(SequenceWrapper({"a": 100, "b": 200, "c": 300}), itemgetter(0)),
{},
),
(iterdp.OnDiskCacheHolder, None, (), {}),
(iterdp.OnlineReader, None, (), {}),
(
iterdp.ParagraphAggregator,
IterableWrapper([("f1", "L1"), ("f1", "L2"), ("f2", "21"), ("f2", "22")]),
| torchdata.datapipes.iter.FileOpener | 97 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
list(rar_loader_dp_2)
# Nested RAR in TAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.tar")])
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
rar_loader_dp = RarArchiveLoader(tar_loader_dp)
| torchdata.datapipes.iter.Mapper | 98 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
# Reset Test:
result_dp = Rows2Columnar(source_dp, column_names_list)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(result_dp, n_elements_before_reset)
self.assertEqual([expected_output[0]], res_before_reset)
self.assertEqual(expected_output, res_after_reset)
# __len__ Test: returns length of source DataPipe
self.assertEqual(2, len(result_dp))
def test_sample_multiplexer_iterdatapipe(self) -> None:
# Functional Test: yields all values from the sources
source_dp1 = IterableWrapper([0] * 10)
source_dp2 = IterableWrapper([1] * 10)
d: Dict[IterDataPipe, float] = {source_dp1: 99999999, source_dp2: 0.0000001}
sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
result = list(sample_mul_dp)
self.assertEqual([0] * 10 + [1] * 10, result)
# Functional Test: raises error for empty dict
with self.assertRaisesRegex(ValueError, "Empty dictionary"):
SampleMultiplexer(pipes_to_weights_dict={}, seed=0) # type: ignore[arg-type]
# Functional Test: raises error for negative or zero weight
| torchdata.datapipes.iter.FileLister | 99 |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 4