Datasets:
Edward J. Schwartz
commited on
Commit
·
a4aefde
1
Parent(s):
73b8481
Switch to arrow builder
Browse files- oo-method-test.py +31 -15
oo-method-test.py
CHANGED
|
@@ -2,6 +2,9 @@
|
|
| 2 |
|
| 3 |
import datasets
|
| 4 |
|
|
|
|
|
|
|
|
|
|
| 5 |
_DATA_FILES = ['data/combined-00009-of-00013-97a88bccf4215954.parquet',
|
| 6 |
'data/combined-00004-of-00013-119d653561443d7b.parquet',
|
| 7 |
'data/combined-00007-of-00013-ab54cce4ee6331d0.parquet',
|
|
@@ -16,25 +19,21 @@ _DATA_FILES = ['data/combined-00009-of-00013-97a88bccf4215954.parquet',
|
|
| 16 |
'data/combined-00011-of-00013-4c21766cedd5a4a0.parquet',
|
| 17 |
'data/combined-00008-of-00013-674f74b6f2288c61.parquet']
|
| 18 |
|
| 19 |
-
class OOMethodTestDataset(datasets.
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
kwargs['data_files'] = _DATA_FILES
|
| 23 |
-
super().__init__(*args, **kwargs)
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
downloaded_files = dl_manager.download(files)
|
| 32 |
-
downloaded_files = [dl_manager.iter_files(file) for file in files]
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
|
| 37 |
-
|
| 38 |
datasets.SplitGenerator(
|
| 39 |
name="combined",
|
| 40 |
gen_kwargs={
|
|
@@ -42,4 +41,21 @@ class OOMethodTestDataset(datasets.packaged_modules.parquet.Parquet):
|
|
| 42 |
},
|
| 43 |
),
|
| 44 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
|
|
|
| 2 |
|
| 3 |
import datasets
|
| 4 |
|
| 5 |
+
import pyarrow as pa
|
| 6 |
+
import pyarrow.parquet as pq
|
| 7 |
+
|
| 8 |
_DATA_FILES = ['data/combined-00009-of-00013-97a88bccf4215954.parquet',
|
| 9 |
'data/combined-00004-of-00013-119d653561443d7b.parquet',
|
| 10 |
'data/combined-00007-of-00013-ab54cce4ee6331d0.parquet',
|
|
|
|
| 19 |
'data/combined-00011-of-00013-4c21766cedd5a4a0.parquet',
|
| 20 |
'data/combined-00008-of-00013-674f74b6f2288c61.parquet']
|
| 21 |
|
| 22 |
+
class OOMethodTestDataset(datasets.ArrowBasedBuilder):
|
| 23 |
+
def __init__(self, *args, **kwargs):
|
| 24 |
+
super().__init__(*args, **kwargs)
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
def _info(self):
|
| 27 |
+
return datasets.DatasetInfo()
|
| 28 |
|
| 29 |
+
def _split_generators(self, dl_manager):
|
| 30 |
+
files = _DATA_FILES
|
| 31 |
+
downloaded_files = dl_manager.download(files)
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
#print(files)
|
| 34 |
+
#print(downloaded_files)
|
| 35 |
|
| 36 |
+
return [
|
| 37 |
datasets.SplitGenerator(
|
| 38 |
name="combined",
|
| 39 |
gen_kwargs={
|
|
|
|
| 41 |
},
|
| 42 |
),
|
| 43 |
]
|
| 44 |
+
|
| 45 |
+
def _generate_tables(self, files):
|
| 46 |
+
for file_idx, file in enumerate(files):
|
| 47 |
+
with open(file, "rb") as f:
|
| 48 |
+
parquet_file = pq.ParquetFile(f)
|
| 49 |
+
try:
|
| 50 |
+
for batch_idx, record_batch in enumerate(
|
| 51 |
+
parquet_file.iter_batches(batch_size=10_000)
|
| 52 |
+
):
|
| 53 |
+
pa_table = pa.Table.from_batches([record_batch])
|
| 54 |
+
# Uncomment for debugging (will print the Arrow table size and elements)
|
| 55 |
+
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
|
| 56 |
+
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
|
| 57 |
+
yield f"{file_idx}_{batch_idx}", pa_table
|
| 58 |
+
except ValueError as e:
|
| 59 |
+
#logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
|
| 60 |
+
raise
|
| 61 |
|