Datasets:
Update dataset.py
Browse files- dataset.py +10 -25
dataset.py
CHANGED
|
@@ -4,7 +4,6 @@ import datasets
|
|
| 4 |
import csv
|
| 5 |
import pyarrow as pa
|
| 6 |
|
| 7 |
-
# Default data files for the Hub viewer (when no data_files passed)
|
| 8 |
DEFAULT_DATA_FILES = {
|
| 9 |
"Thresholding": {
|
| 10 |
"train": "train/train.csv",
|
|
@@ -37,32 +36,18 @@ class MyDataset(GeneratorBasedBuilder):
|
|
| 37 |
]
|
| 38 |
|
| 39 |
def _info(self):
|
| 40 |
-
|
| 41 |
-
return DatasetInfo()
|
| 42 |
|
| 43 |
def _split_generators(self, dl_manager):
|
| 44 |
-
# Try
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
if not data_files:
|
| 49 |
-
if self.config.name not in DEFAULT_DATA_FILES:
|
| 50 |
-
raise ValueError(f"No default files for config {self.config.name}")
|
| 51 |
-
data_files = [
|
| 52 |
-
{"split": k, "path": v}
|
| 53 |
-
for k, v in DEFAULT_DATA_FILES[self.config.name].items()
|
| 54 |
-
]
|
| 55 |
-
|
| 56 |
-
def get_path(split_name, fallback=None):
|
| 57 |
-
for entry in data_files:
|
| 58 |
-
if entry["split"] == split_name:
|
| 59 |
-
return entry["path"]
|
| 60 |
-
return fallback
|
| 61 |
|
| 62 |
return [
|
| 63 |
-
SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath":
|
| 64 |
-
SplitGenerator(name=Split.VALIDATION, gen_kwargs={"filepath":
|
| 65 |
-
SplitGenerator(name=Split.TEST, gen_kwargs={"filepath":
|
| 66 |
]
|
| 67 |
|
| 68 |
def _generate_examples(self, filepath):
|
|
@@ -70,8 +55,8 @@ class MyDataset(GeneratorBasedBuilder):
|
|
| 70 |
try:
|
| 71 |
with open(filepath, "rb") as f:
|
| 72 |
table = pa.ipc.RecordBatchFileReader(f).read_all()
|
| 73 |
-
except pa.lib.ArrowInvalid:
|
| 74 |
-
raise ValueError(f"
|
| 75 |
data = table.to_pydict()
|
| 76 |
for i in range(len(next(iter(data.values())))):
|
| 77 |
yield i, {k: data[k][i] for k in data}
|
|
|
|
| 4 |
import csv
|
| 5 |
import pyarrow as pa
|
| 6 |
|
|
|
|
| 7 |
DEFAULT_DATA_FILES = {
|
| 8 |
"Thresholding": {
|
| 9 |
"train": "train/train.csv",
|
|
|
|
| 36 |
]
|
| 37 |
|
| 38 |
def _info(self):
|
| 39 |
+
return DatasetInfo() # no schema assumptions
|
|
|
|
| 40 |
|
| 41 |
def _split_generators(self, dl_manager):
|
| 42 |
+
# Try default files if config has no data_files
|
| 43 |
+
file_dict = DEFAULT_DATA_FILES.get(self.config.name)
|
| 44 |
+
if not file_dict:
|
| 45 |
+
raise ValueError(f"No default data files defined for config: {self.config.name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
return [
|
| 48 |
+
SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": file_dict["train"]}),
|
| 49 |
+
SplitGenerator(name=Split.VALIDATION, gen_kwargs={"filepath": file_dict.get("val") or file_dict.get("dev")}),
|
| 50 |
+
SplitGenerator(name=Split.TEST, gen_kwargs={"filepath": file_dict["test"]}),
|
| 51 |
]
|
| 52 |
|
| 53 |
def _generate_examples(self, filepath):
|
|
|
|
| 55 |
try:
|
| 56 |
with open(filepath, "rb") as f:
|
| 57 |
table = pa.ipc.RecordBatchFileReader(f).read_all()
|
| 58 |
+
except pa.lib.ArrowInvalid as e:
|
| 59 |
+
raise ValueError(f"Invalid Arrow file at {filepath}: {e}")
|
| 60 |
data = table.to_pydict()
|
| 61 |
for i in range(len(next(iter(data.values())))):
|
| 62 |
yield i, {k: data[k][i] for k in data}
|