seed stringlengths 53 1.87k | seed_api stringlengths 22 51 | index int64 0 259 |
|---|---|---|
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
| torchdata.datapipes.iter.FileOpener | 200 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
self.assertTrue(path in self.temp_sub_fil... | torchdata.datapipes.iter.MapKeyZipper | 201 |
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
def initialize(self, datapipe: IterDataPipe) -> IterDataPipe:
graph = traverse(datapipe, only_datapipe=True)
| torchdata.dataloader2.graph.traverse | 202 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
| torchdata.datapipes.iter.IoPathFileOpener | 203 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
}
self._custom_files_set_up(json_file... | torchdata.datapipes.iter.FileOpener | 204 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
return path_and_stream[0] == "empty.json"... | torchdata.datapipes.iter.TarArchiveLoader | 205 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
# Reset Test:
result_dp = Rows2Columnar(source_dp, column_names_list)
... | torchdata.datapipes.iter.FileOpener | 206 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
# Worth noting that the .tar and .zip tests w... | torchdata.datapipes.iter.Mapper | 207 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self.... | torchdata.datapipes.iter.FileLoader | 208 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
# Functional test: work with .tar.gz files
... | torchdata.datapipes.iter.CSVDictParser | 209 |
from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
| torchdata.datapipes.iter.FileOpener | 210 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
datapipe_gz_1 = FileLister(self.temp_dir.name... | torchdata.datapipes.iter.Decompressor | 211 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
def test_decompressor_iterdatapipe(self):
... | torchdata.datapipes.iter.Mapper | 212 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
"test": 100_000,
}[self._split]
def _filter_meta(self, data: Tuple[str, Any]) -> bool:
return self._classifiy_devkit(data) == ImageNetDemux.META
... | torchdata.datapipes.iter.IterableWrapper | 213 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader
return (img_info, img, mask)
return (img_info, img)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
self._decompress_dir()
dp = Se... | torchdata.datapipes.iter.IterableWrapper | 214 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
csv_files = {"1.csv": "key,item\na,1\nb,2", "... | torchdata.datapipes.iter.RarArchiveLoader | 215 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
self._write_test_xz_files()
# Functi... | torchdata.mpii.MpiiData | 216 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
with zipfile.ZipFile(path, "w") as myzip:
... | torchdata.datapipes.iter.Mapper | 217 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
self.assertEqual(list(range(5)) * 2, res_after_reset)
# __len__ Test: retur... | torchdata.dataloader2.graph.replace_dp | 218 |
from torchdata.datapipes.iter import IterableWrapper
(iterdp.FSSpecFileOpener, None, (), {}),
(
iterdp.FSSpecSaver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn":... | torchdata.datapipes.iter.HashChecker | 219 |
from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe
def MyImageFolder(root=IMAGES_ROOT, transform=None):
if not USE_FORK_DATAPIPE:
# Yes, we had to scan files twice. Alternativelly it is possible to use
# `fork` DataPipe, but it will require buffer equal to the size of all
... | torchdata.datapipes.iter.TarArchiveLoader | 220 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser
label=Label(self.info.categories.index(category), category=category),
path=path,
image=decoder(buffer) if decoder else buffer,
)
def _make_da... | torchdata.datapipes.functional_datapipe | 221 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
self.assertEqual(expected_res, list(csv_parse... | torchdata.datapipes.iter.IterableWrapper | 222 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, IterKeyZipper, Demultiplexer, LineReader, CSVParser
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp... | torchdata.datapipes.iter.CSVParser | 223 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
meta_dp = Mapper(meta_dp, self._extract_categories_... | torchdata.datapipes.iter.IterableWrapper | 224 |
from torchdata import load_traindata
return parser.parse_args()
if __name__ == '__main__':
params = parse_args()
train_loader = load_traindata(params)
model = PHMModel(params, trainable=True)
if params.restore:
model.load_model(params.load_file)
print(f'done loading checkpoint: {... | torchdata.datapipes.iter.BucketBatcher | 225 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split in {"train", "test"}:
dp = resource_dps[0]
... | torchdata.datapipes.iter.IterableWrapper | 226 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
xz_decompress_dp = Decompressor(xz_load_dp, f... | torchdata.datapipes.iter.Mapper | 227 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
self._write_test_tar_gz_files()
data... | torchdata.datapipes.iter.Zipper | 228 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_shar... | torchdata.datapipes.iter.Mapper | 229 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
dp = hint_sharding(dp)
dp = Mapper(dp, self._prepare_train_data if config.split == "train" else self._prepare_test_data)
| torchdata.datapipes.iter.Zipper | 230 |
import torchdata
raise ModuleNotFoundError("Torchdata must be built with BUILD_S3=1 to use this datapipe.")
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region)
if buffer_size:
self.handler.s... | torchdata.datapipes.iter.IterableWrapper | 231 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, ZipArchiveReader, Zipper, IterKeyZipper
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
splits_dp, images_dp, identities_dp, attributes_dp, bboxes_d... | torchdata.datapipes.iter.IterableWrapper | 232 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
csv_dict_parser_dp = datapipe3.parse_csv_as_d... | torchdata.load_traindata | 233 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD1(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
... | torchdata.datapipes.iter.TarArchiveLoader | 234 |
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
with self.assertRaisesRegex(
Exception,
"Cannot remove source DataPipe that is the first DataPipe in the pipeline",
):
remove_dp(graph, src_dp)
with self.assertRaisesRegex(
... | torchdata.datapipes.iter.FileOpener | 235 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
# Functional Test: work with .tar files
... | torchdata.datapipes.utils.StreamWrapper | 236 |
from torchdata.datapipes.iter import FileLister
def get_dataloader(
parquet_directory, world_size, rank, num_embeddings=4096, salt=0, batch_size=16
):
source_dp = FileLister(parquet_directory, masks="*.parquet")
# TODO support batch_size for load_parquet_as_df.
# TODO use OSSArrowDataPipe once it is re... | torchdata.datapipes.iter.LineReader | 237 |
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, Filter, Demultiplexer, TarArchiveLoader, Enumerator
label_dp: IterDataPipe[Tuple[int, str]] = Enumerator(label_dp, 1)
label_dp = hint_shuffling(label_dp)
label_dp = hint_sharding(label_dp)
... | torchdata.datapipes.iter.HttpReader | 238 |
from torchdata.datapipes.iter import BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, ParagraphAggregator, Rows2Columnar, SampleMultiplexer
self.assertEqual(expected_result[:n_elements_before_reset], res_before_reset)
... | torchdata.datapipes.iter.Decompressor | 239 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
tar_gz_load_dp = FileOpener(tar_gz_file_dp, m... | torchdata.datapipes.utils.StreamWrapper | 240 |
from torchdata.datapipes.iter import FileOpener, IterableWrapper
hash_dict={_filepath_fn(root, split): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, encoding="utf-8")
return cache_dp.parse_js... | torchdata.datapipes.iter.Decompressor | 241 |
from torchdata.dataloader2.graph import find_dps, remove_dp, replace_dp, traverse
new_dp2 = Adaptor(m2)
graph = replace_dp(graph, m1, new_dp1)
exp_g1 = {
dp: {
m2: {c1: {dm: {ub: {new_dp1: {m1: {src_dp: {}}}}}}},
c2: {dm: {ub: {new_dp1: {m1: {src_dp:... | torchdata.dataloader2.graph.traverse | 242 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Demultiplexer, Filter, IterKeyZipper, LineReader
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, config=config, decoder=decoder))
def _generate_categories(self, r... | torchdata.datapipes.iter.IterableWrapper | 243 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, CSVDictParser
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffl... | torchdata.dataloader2.DataLoader2 | 244 |
from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe
def MyHTTPImageFolder(transform=None):
# HTTP Protocol doesn't support listing files, so we had to provide it explicitly
list_files = ExpandURLPatternDataPipe(HTTP_PATH_CAT) + ExpandURLPatternDataPipe(HTTP_PATH_DOG)
list_files_0, l... | torchdata.datapipes.iter.Decompressor | 245 |
from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService, ReadingServiceInterface
@unittest.skipIf(IS_WINDOWS, "Fork is required for lambda")
def test_multiprocessing_reading_service(self) -> None:
_, (*_, dp) = self._get_datapipes() # pyre-ignore
import torch.multiprocess... | torchdata.datapipes.iter.Mapper | 246 |
from torchdata.datapipes.iter import HttpReader, IterableWrapper
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(b"BSD" in line)
# Reset Test: http_reader_dp has been read, but we reset when calling check_hash()
check_cache_dp = http_reader_dp.check_hash({f... | torchdata.datapipes.iter.Decompressor | 247 |
from torchdata.datapipes.iter import IterDataPipe, Mapper, Shuffler, Filter, ZipArchiveReader, Zipper, IterKeyZipper
splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)
images_dp = ZipArchiveReader(images_dp)
| torchdata.datapipes.iter.IterableWrapper | 248 |
from torchdata.datapipes.iter import IterDataPipe, Filter, Mapper
dp = CifarFileReader(dp, labels_key=self._LABELS_KEY)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
return Mapper(dp, functools.partial(self._collate_and_decode, decoder=decoder))
def _generate_categories(self, root... | torchdata.mpii.MpiiData | 249 |
from torchdata.datapipes.iter import FileOpener, IterableWrapper
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=partial(_extracted_filepath_fn, root, split))
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").read_from_tar().filter(partial(_filter_fn, split))
... | torchdata.datapipes.iter.Mapper | 250 |
from torchdata.datapipes.iter import IterableWrapper
(iterdp.Header, None, (3,), {}),
(iterdp.HttpReader, None, (), {}),
# TODO (ejguan): Deterministic serialization is required
# (iterdp.InBatchShuffler, IterableWrapper(range(10)).batch(3), (), {}),
(iterdp... | torchdata.datapipes.iter.HashChecker | 251 |
from torchdata.datapipes.utils import StreamWrapper
else:
response = session.get(url, timeout=timeout, stream=True)
if "content-disposition" not in response.headers:
raise RuntimeError("Internal error: headers don't contain content-disposition.")
filename = re.findall(... | torchdata.datapipes.iter.IterableWrapper | 252 |
from torchdata.datapipes.iter import FileOpener, IterableWrapper
:rtype: Tuple[int, str, str]
"""
# TODO Remove this after removing conditional dependency
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following ins... | torchdata.datapipes.iter.FileLister | 253 |
from torchdata.datapipes.iter import IterableWrapper
(iterdp.OnDiskCacheHolder, None, (), {}),
(iterdp.OnlineReader, None, (), {}),
(
iterdp.ParagraphAggregator,
IterableWrapper([("f1", "L1"), ("f1", "L2"), ("f2", "21"), ("f2", "22")]),
... | torchdata.datapipes.iter.FileLister | 254 |
from torchdata.datapipes.iter import FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader
}
for x, z in self._ground_truth_seq_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(se... | torchdata.datapipes.map.SequenceWrapper | 255 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
| torchdata.datapipes.iter.LineReader | 256 |
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `dev`)
:returns: DataPipe that... | torchdata.datapipes.iter.Demultiplexer | 257 |
from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler
def hint_shuffling(datapipe: IterDataPipe[D]) -> Shuffler[D]:
return Shuffler(datapipe, default=False, buffer_size=INFINITE_BUFFER_SIZE)
| torchdata.datapipes.iter.IterableWrapper | 258 |
from torchdata.datapipes.iter import CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, JsonParser, RarArchiveLoader, Saver, TarArchiveLoader, XzFileLoader, ZipArchiveLoader
hash_dict[path] = hash_func.hexdi... | torchdata.dataloader2.DataLoader2 | 259 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.