shaffei commited on
Commit
b951534
·
verified ·
1 Parent(s): f3f8630

add random splits to the filtered data

Browse files
Files changed (1) hide show
  1. ppb_affinity.py +62 -32
ppb_affinity.py CHANGED
@@ -1,8 +1,9 @@
1
  import datasets
2
  import csv
 
3
 
4
  class ppb_affinity(datasets.GeneratorBasedBuilder):
5
- VERSION = datasets.Version("1.0.1")
6
 
7
  BUILDER_CONFIGS = [
8
  datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
@@ -11,38 +12,67 @@ class ppb_affinity(datasets.GeneratorBasedBuilder):
11
 
12
  def _info(self):
13
  return datasets.DatasetInfo()
14
-
15
  def _split_generators(self, dl_manager):
16
- if self.config.name == "raw":
17
- filepath = dl_manager.download_and_extract("raw.csv")
18
- return [datasets.SplitGenerator(
 
 
19
  name=datasets.Split.TRAIN,
20
- gen_kwargs={"filepath": filepath}
21
- )]
22
- else:
23
- filepath = dl_manager.download_and_extract("filtered.csv")
24
- return [
25
- datasets.SplitGenerator(
26
- name=datasets.Split.TRAIN,
27
- gen_kwargs={"filepath": filepath, "split": "train"},
28
- ),
29
- datasets.SplitGenerator(
30
- name=datasets.Split.VALIDATION,
31
- gen_kwargs={"filepath": filepath, "split": "val"},
32
- ),
33
- datasets.SplitGenerator(
34
- name=datasets.Split.TEST,
35
- gen_kwargs={"filepath": filepath, "split": "test"},
36
- ),
37
- ]
38
-
39
- def _generate_examples(self, filepath, split=None):
 
 
 
 
 
 
 
40
  with open(filepath, encoding="utf-8") as f:
41
  reader = csv.DictReader(f)
42
- for idx, row in enumerate(reader):
43
- if self.config.name == "raw":
44
- yield idx, row
45
- else:
46
- if row["split"] == split:
47
- del row["split"] # Remove split column from examples
48
- yield idx, row
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import datasets
2
  import csv
3
+ import random
4
 
5
  class ppb_affinity(datasets.GeneratorBasedBuilder):
6
+ VERSION = datasets.Version("1.0.2")
7
 
8
  BUILDER_CONFIGS = [
9
  datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
 
12
 
13
  def _info(self):
14
  return datasets.DatasetInfo()
15
+
16
  def _split_generators(self, dl_manager):
17
+ """Downloads and defines dataset splits"""
18
+ filepath = dl_manager.download_and_extract("filtered.csv")
19
+
20
+ return [
21
+ datasets.SplitGenerator(
22
  name=datasets.Split.TRAIN,
23
+ gen_kwargs={"filepath": filepath, "split": "train"},
24
+ ),
25
+ datasets.SplitGenerator(
26
+ name=datasets.Split.VALIDATION,
27
+ gen_kwargs={"filepath": filepath, "split": "val"},
28
+ ),
29
+ datasets.SplitGenerator(
30
+ name=datasets.Split.TEST,
31
+ gen_kwargs={"filepath": filepath, "split": "test"},
32
+ ),
33
+
34
+ datasets.SplitGenerator(
35
+ name="train_rand",
36
+ gen_kwargs={"filepath": filepath, "split": "train_rand", "random_split": True},
37
+ ),
38
+ datasets.SplitGenerator(
39
+ name="val_rand",
40
+ gen_kwargs={"filepath": filepath, "split": "val_rand", "random_split": True},
41
+ ),
42
+ datasets.SplitGenerator(
43
+ name="test_rand",
44
+ gen_kwargs={"filepath": filepath, "split": "test_rand", "random_split": True},
45
+ ),
46
+ ]
47
+
48
+ def _generate_examples(self, filepath, split=None, random_split=False):
49
+ """Generates examples, either using predefined splits or random splits"""
50
  with open(filepath, encoding="utf-8") as f:
51
  reader = csv.DictReader(f)
52
+ data = list(reader)
53
+
54
+ if random_split:
55
+ return self._generate_examples_rand(data, split)
56
+
57
+ for idx, row in enumerate(data):
58
+ if row["split"] == split:
59
+ del row["split"] # Remove split column from examples
60
+ yield idx, row
61
+
62
+ def _generate_examples_rand(self, data, split):
63
+ """Randomly splits the dataset into 80% train, 10% val, 10% test with a fixed seed"""
64
+ random.seed(42)
65
+ random.shuffle(data)
66
+
67
+ total = len(data)
68
+ train_end = int(0.8 * total)
69
+ val_end = train_end + int(0.1 * total)
70
+
71
+ split_map = {
72
+ "train_rand": data[:train_end],
73
+ "val_rand": data[train_end:val_end],
74
+ "test_rand": data[val_end:]
75
+ }
76
+
77
+ for idx, row in enumerate(split_map[split]):
78
+ yield idx, row