Datasets:

Modalities:
Text
Formats:
arrow
Languages:
English
ArXiv:
Libraries:
Datasets
License:
AnnaWegmann commited on
Commit
a519e21
·
verified ·
1 Parent(s): 635cc57

Update dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +42 -11
dataset.py CHANGED
@@ -1,26 +1,57 @@
 
1
  from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, BuilderConfig
2
  import datasets
3
  import csv
4
  import pyarrow as pa
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  class CustomConfig(BuilderConfig):
7
  def __init__(self, **kwargs):
8
  super().__init__(**kwargs)
9
 
10
  class MyDataset(GeneratorBasedBuilder):
11
  BUILDER_CONFIGS = [
12
- CustomConfig(name="Contrastive Learning", version=datasets.Version("1.0.0"),
13
- description="Loads Arrow files for contrastive learning"),
14
- CustomConfig(name="Thresholding", version=datasets.Version("1.0.0"),
15
- description="Loads CSV files for thresholding"),
 
 
 
 
 
 
16
  ]
17
 
18
  def _info(self):
19
- # No features defined schema will be inferred
20
  return DatasetInfo()
21
 
22
  def _split_generators(self, dl_manager):
23
- data_files = self.config.data_files
 
 
 
 
 
 
 
 
 
 
24
 
25
  def get_path(split_name, fallback=None):
26
  for entry in data_files:
@@ -37,14 +68,14 @@ class MyDataset(GeneratorBasedBuilder):
37
  def _generate_examples(self, filepath):
38
  if filepath.endswith(".arrow"):
39
  table = pa.ipc.RecordBatchFileReader(filepath).read_all()
40
- records = table.to_pydict()
41
- keys = list(records.keys())
42
- for i in range(len(records[keys[0]])):
43
- yield i, {k: records[k][i] for k in keys}
44
  elif filepath.endswith(".csv"):
45
  with open(filepath, encoding="utf-8") as f:
46
  reader = csv.DictReader(f)
47
  for i, row in enumerate(reader):
48
  yield i, row
49
  else:
50
- raise ValueError(f"Unsupported file format for file: {filepath}")
 
1
+ """ generated with GPT-4o """
2
  from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, BuilderConfig
3
  import datasets
4
  import csv
5
  import pyarrow as pa
6
 
7
+ # Default data files for the Hub viewer (when no data_files passed)
8
+ DEFAULT_DATA_FILES = {
9
+ "Thresholding": {
10
+ "train": "train/train.csv",
11
+ "val": "validation/validation.csv",
12
+ "test": "test/test.csv",
13
+ },
14
+ "Contrastive Learning": {
15
+ "train": "train/data-00000-of-00001.arrow",
16
+ "dev": "validation/data-00000-of-00001.arrow",
17
+ "test": "test/data-00000-of-00001.arrow",
18
+ }
19
+ }
20
+
21
  class CustomConfig(BuilderConfig):
22
  def __init__(self, **kwargs):
23
  super().__init__(**kwargs)
24
 
25
  class MyDataset(GeneratorBasedBuilder):
26
  BUILDER_CONFIGS = [
27
+ CustomConfig(
28
+ name="Contrastive Learning",
29
+ version=datasets.Version("1.0.0"),
30
+ description="Loads Arrow files for contrastive learning"
31
+ ),
32
+ CustomConfig(
33
+ name="Thresholding",
34
+ version=datasets.Version("1.0.0"),
35
+ description="Loads CSV files for thresholding task"
36
+ ),
37
  ]
38
 
39
  def _info(self):
40
+ # Let Hugging Face infer features from data (no hard-coded schema)
41
  return DatasetInfo()
42
 
43
  def _split_generators(self, dl_manager):
44
+ # Try to get data_files passed in the config
45
+ data_files = getattr(self.config, "data_files", None)
46
+
47
+ # Fallback to default if not passed
48
+ if not data_files:
49
+ if self.config.name not in DEFAULT_DATA_FILES:
50
+ raise ValueError(f"No default files for config {self.config.name}")
51
+ data_files = [
52
+ {"split": k, "path": v}
53
+ for k, v in DEFAULT_DATA_FILES[self.config.name].items()
54
+ ]
55
 
56
  def get_path(split_name, fallback=None):
57
  for entry in data_files:
 
68
  def _generate_examples(self, filepath):
69
  if filepath.endswith(".arrow"):
70
  table = pa.ipc.RecordBatchFileReader(filepath).read_all()
71
+ data = table.to_pydict()
72
+ keys = list(data.keys())
73
+ for i in range(len(data[keys[0]])):
74
+ yield i, {k: data[k][i] for k in keys}
75
  elif filepath.endswith(".csv"):
76
  with open(filepath, encoding="utf-8") as f:
77
  reader = csv.DictReader(f)
78
  for i, row in enumerate(reader):
79
  yield i, row
80
  else:
81
+ raise ValueError(f"Unsupported file format: {filepath}")