Update ppb_affinity.py
Browse files- ppb_affinity.py +9 -2
ppb_affinity.py
CHANGED
|
@@ -3,10 +3,11 @@ import csv
|
|
| 3 |
import random
|
| 4 |
|
| 5 |
class ppb_affinity(datasets.GeneratorBasedBuilder):
|
| 6 |
-
VERSION = datasets.Version("1.0.
|
| 7 |
|
| 8 |
BUILDER_CONFIGS = [
|
| 9 |
datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
|
|
|
|
| 10 |
datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."),
|
| 11 |
datasets.BuilderConfig(name="filtered_random", description="Filtered dataset with random 80-10-10 splits."),
|
| 12 |
]
|
|
@@ -21,6 +22,12 @@ class ppb_affinity(datasets.GeneratorBasedBuilder):
|
|
| 21 |
name=datasets.Split.TRAIN,
|
| 22 |
gen_kwargs={"filepath": filepath}
|
| 23 |
)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
elif self.config.name == "filtered":
|
| 25 |
filepath = dl_manager.download_and_extract("filtered.csv")
|
| 26 |
return [
|
|
@@ -83,7 +90,7 @@ class ppb_affinity(datasets.GeneratorBasedBuilder):
|
|
| 83 |
with open(filepath, encoding="utf-8") as f:
|
| 84 |
reader = csv.DictReader(f)
|
| 85 |
rows = list(reader)
|
| 86 |
-
if self.config.name
|
| 87 |
for idx, row in enumerate(rows):
|
| 88 |
yield idx, row
|
| 89 |
elif self.config.name == "filtered":
|
|
|
|
| 3 |
import random
|
| 4 |
|
| 5 |
class ppb_affinity(datasets.GeneratorBasedBuilder):
|
| 6 |
+
VERSION = datasets.Version("1.0.0")
|
| 7 |
|
| 8 |
BUILDER_CONFIGS = [
|
| 9 |
datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
|
| 10 |
+
datasets.BuilderConfig(name="raw_rec", description="Raw parsed PDBs dataset with critical filtrations and missing residues recovered."),
|
| 11 |
datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."),
|
| 12 |
datasets.BuilderConfig(name="filtered_random", description="Filtered dataset with random 80-10-10 splits."),
|
| 13 |
]
|
|
|
|
| 22 |
name=datasets.Split.TRAIN,
|
| 23 |
gen_kwargs={"filepath": filepath}
|
| 24 |
)]
|
| 25 |
+
elif self.config.name == "raw_rec":
|
| 26 |
+
filepath = dl_manager.download_and_extract("raw_recover_missing_res.csv")
|
| 27 |
+
return [datasets.SplitGenerator(
|
| 28 |
+
name=datasets.Split.TRAIN,
|
| 29 |
+
gen_kwargs={"filepath": filepath}
|
| 30 |
+
)]
|
| 31 |
elif self.config.name == "filtered":
|
| 32 |
filepath = dl_manager.download_and_extract("filtered.csv")
|
| 33 |
return [
|
|
|
|
| 90 |
with open(filepath, encoding="utf-8") as f:
|
| 91 |
reader = csv.DictReader(f)
|
| 92 |
rows = list(reader)
|
| 93 |
+
if self.config.name in ["raw", "raw_rec"]:
|
| 94 |
for idx, row in enumerate(rows):
|
| 95 |
yield idx, row
|
| 96 |
elif self.config.name == "filtered":
|