drake463 commited on
Commit
d445159
·
1 Parent(s): ae74a72

Updated pipeline and subsets

Browse files
data/fireprotdb_20251015-164116.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43dc3e44a2908346fe69ad5178a3d82d55508284a314a943d179de7df355fc0d
3
+ size 1775499132
fireprotdb.py DELETED
@@ -1,339 +0,0 @@
1
- # fireprotdb.py
2
- from __future__ import annotations
3
-
4
- import hashlib
5
- from dataclasses import dataclass
6
- from typing import Dict, List, Optional
7
-
8
- import datasets
9
-
10
-
11
- # Change these when publishing:
12
- _CITATION = """\
13
- @misc{fireprotdb2,
14
- title = {FireProtDB 2.0},
15
- note = {See original FireProtDB 2.0 publication and ProTherm sources}
16
- }
17
- """
18
-
19
- _DESCRIPTION = """\
20
- ML-ready views of FireProtDB 2.0 derived from the raw CSV:
21
- - mutation-level regression/classification (ddg, dtm, stabilizing)
22
- - protein-level aggregated landscape view
23
- - mutation language modeling view
24
-
25
- This dataset is intended for Rosetta Commons / protein ML benchmarking.
26
- """
27
-
28
- _HOMEPAGE = "https://huggingface.co/datasets/drake463/FireProtDB" # update
29
- _LICENSE = "cc-by-4.0" # update to correct license if different
30
-
31
-
32
- # If you publish to HF, include the cleaned parquet in the repo and set this relative path.
33
- # For local testing, replace with your local path.
34
- _DEFAULT_DATA_FILE = "data/fireportdb_cleaned.parquet"
35
-
36
-
37
- class FireProtDBConfig(datasets.BuilderConfig):
38
- def __init__(self, task: str, **kwargs):
39
- super().__init__(**kwargs)
40
- self.task = task
41
-
42
-
43
- _BUILDER_CONFIGS = [
44
- FireProtDBConfig(
45
- name="mutation_ddg",
46
- version=datasets.Version("1.0.0"),
47
- description="Mutation-level ΔΔG regression (row-per-experiment where ddg present).",
48
- task="mutation_ddg",
49
- ),
50
- FireProtDBConfig(
51
- name="mutation_dtm",
52
- version=datasets.Version("1.0.0"),
53
- description="Mutation-level ΔTm regression (row-per-experiment where dtm present).",
54
- task="mutation_dtm",
55
- ),
56
- FireProtDBConfig(
57
- name="mutation_binary",
58
- version=datasets.Version("1.0.0"),
59
- description="Mutation-level binary stability classification (explicit stabilizing or ddg-sign-derived).",
60
- task="mutation_binary",
61
- ),
62
- FireProtDBConfig(
63
- name="mutation_lm",
64
- version=datasets.Version("1.0.0"),
65
- description="Mutation language-modeling view: (sequence, mutation, position, target_aa).",
66
- task="mutation_lm",
67
- ),
68
- FireProtDBConfig(
69
- name="protein_landscape",
70
- version=datasets.Version("1.0.0"),
71
- description="Protein-level aggregated landscapes: one row per protein with list of variants.",
72
- task="protein_landscape",
73
- ),
74
- ]
75
-
76
-
77
- def _stable_hash(s: str) -> int:
78
- h = hashlib.sha256(s.encode("utf-8")).hexdigest()
79
- return int(h[:8], 16)
80
-
81
-
82
- def _split_by_protein(uniprot: Optional[str], sequence_id: Optional[str], ratios=(0.8, 0.1, 0.1)) -> str:
83
- """
84
- Deterministic protein-level split using (uniprotkb if present else sequence_id).
85
- """
86
- key = (uniprot or "").strip()
87
- if not key:
88
- key = f"seqid:{(sequence_id or '').strip()}"
89
- if not key.strip():
90
- key = "unknown"
91
- r = _stable_hash(key) / 0xFFFFFFFF
92
- if r < ratios[0]:
93
- return "train"
94
- if r < ratios[0] + ratios[1]:
95
- return "validation"
96
- return "test"
97
-
98
-
99
- class FireProtDB(datasets.GeneratorBasedBuilder):
100
- BUILDER_CONFIGS = _BUILDER_CONFIGS
101
- DEFAULT_CONFIG_NAME = "mutation_ddg"
102
-
103
- def _info(self) -> datasets.DatasetInfo:
104
- # Base schema for mutation-level records
105
- mutation_features = datasets.Features(
106
- {
107
- "experiment_id": datasets.Value("string"),
108
- "sequence_id": datasets.Value("string"),
109
- "uniprotkb": datasets.Value("string"),
110
- "protein_name": datasets.Value("string"),
111
- "organism": datasets.Value("string"),
112
- "sequence_length": datasets.Value("int32"),
113
- "mutation": datasets.Value("string"),
114
- "wt_residue": datasets.Value("string"),
115
- "position": datasets.Value("int32"),
116
- "mut_residue": datasets.Value("string"),
117
- "ddg": datasets.Value("float32"),
118
- "dtm": datasets.Value("float32"),
119
- "tm": datasets.Value("float32"),
120
- "ph": datasets.Value("float32"),
121
- "buffer": datasets.Value("string"),
122
- "method": datasets.Value("string"),
123
- "measure": datasets.Value("string"),
124
- "pmid": datasets.Value("string"),
125
- "doi": datasets.Value("string"),
126
- "publication_year": datasets.Value("int32"),
127
- "split": datasets.ClassLabel(names=["train", "validation", "test"]),
128
- "pdb_id": datasets.Value("string"),
129
- "pdb_ids": datasets.Sequence(datasets.Value("string")),
130
- }
131
- )
132
-
133
- if self.config.task == "mutation_lm":
134
- features = datasets.Features(
135
- {
136
- "experiment_id": datasets.Value("string"),
137
- "sequence_id": datasets.Value("string"),
138
- "uniprotkb": datasets.Value("string"),
139
- "sequence": datasets.Value("string"), # optional if you later join sequences
140
- "mutation": datasets.Value("string"),
141
- "position": datasets.Value("int32"),
142
- "target_aa": datasets.Value("string"),
143
- "split": datasets.ClassLabel(names=["train", "validation", "test"]),
144
- }
145
- )
146
- elif self.config.task == "protein_landscape":
147
- features = datasets.Features(
148
- {
149
- "protein_key": datasets.Value("string"),
150
- "uniprotkb": datasets.Value("string"),
151
- "sequence_id": datasets.Value("string"),
152
- "protein_name": datasets.Value("string"),
153
- "organism": datasets.Value("string"),
154
- "sequence_length": datasets.Value("int32"),
155
- "variants": datasets.Sequence(
156
- {
157
- "experiment_id": datasets.Value("string"),
158
- "mutation": datasets.Value("string"),
159
- "position": datasets.Value("int32"),
160
- "wt_residue": datasets.Value("string"),
161
- "mut_residue": datasets.Value("string"),
162
- "ddg": datasets.Value("float32"),
163
- "dtm": datasets.Value("float32"),
164
- "ph": datasets.Value("float32"),
165
- "buffer": datasets.Value("string"),
166
- "method": datasets.Value("string"),
167
- "pdb_id": datasets.Value("string"),
168
- "pdb_ids": datasets.Sequence(datasets.Value("string")),
169
- }
170
- ),
171
- "split": datasets.ClassLabel(names=["train", "validation", "test"]),
172
- }
173
- )
174
- else:
175
- features = mutation_features
176
-
177
- return datasets.DatasetInfo(
178
- description=_DESCRIPTION,
179
- features=features,
180
- homepage=_HOMEPAGE,
181
- license=_LICENSE,
182
- citation=_CITATION,
183
- )
184
-
185
- def _split_generators(self, dl_manager: datasets.DownloadManager):
186
- # You can also host the cleaned file and put a URL here.
187
- data_path = dl_manager.download(_DEFAULT_DATA_FILE)
188
-
189
- # We'll generate ALL examples in one pass and assign split label per protein_key.
190
- return [
191
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"path": data_path, "wanted_split": "train"}),
192
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"path": data_path, "wanted_split": "validation"}),
193
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"path": data_path, "wanted_split": "test"}),
194
- ]
195
-
196
- def _generate_examples(self, path: str, wanted_split: str):
197
- import pandas as pd
198
-
199
- # Read cleaned canonical table
200
- if path.endswith(".parquet"):
201
- df = pd.read_parquet(path)
202
- else:
203
- df = pd.read_csv(path)
204
-
205
- # Ensure types
206
- # (pandas nullable ints may appear; keep safe casting below)
207
- def _to_int(x):
208
- try:
209
- return int(x)
210
- except Exception:
211
- return None
212
-
213
- def _to_float(x):
214
- try:
215
- return float(x)
216
- except Exception:
217
- return None
218
-
219
- # Task-specific filtering and shaping
220
- if self.config.task in ("mutation_ddg", "mutation_binary", "mutation_lm"):
221
- df_task = df[df["mutation"].notna()].copy()
222
- elif self.config.task == "mutation_dtm":
223
- df_task = df[df["mutation"].notna()].copy()
224
- elif self.config.task == "protein_landscape":
225
- df_task = df[df["mutation"].notna()].copy()
226
- else:
227
- df_task = df.copy()
228
-
229
- # Apply label availability filters
230
- if self.config.task == "mutation_ddg":
231
- df_task = df_task[df_task["ddg"].notna()]
232
- elif self.config.task == "mutation_dtm":
233
- df_task = df_task[df_task["dtm"].notna()]
234
- elif self.config.task == "mutation_binary":
235
- df_task = df_task[df_task["stabilizing"].notna()]
236
- elif self.config.task == "mutation_lm":
237
- # Needs position and mut_residue for target_aa
238
- df_task = df_task[df_task["position"].notna() & df_task["mut_residue"].notna()]
239
-
240
- # Assign protein-level split deterministically
241
- def protein_split(row) -> str:
242
- return _split_by_protein(
243
- uniprot=str(row.get("uniprotkb") or "").strip() or None,
244
- sequence_id=str(row.get("sequence_id") or "").strip() or None,
245
- )
246
-
247
- df_task["split_name"] = df_task.apply(protein_split, axis=1)
248
- df_task = df_task[df_task["split_name"] == wanted_split]
249
-
250
- if self.config.task == "protein_landscape":
251
- # Aggregate into one row per protein_key (uniprot preferred)
252
- def protein_key(row) -> str:
253
- u = str(row.get("uniprotkb") or "").strip()
254
- if u:
255
- return u
256
- sid = str(row.get("sequence_id") or "").strip()
257
- return f"seqid:{sid}" if sid else "unknown"
258
-
259
- df_task["protein_key"] = df_task.apply(protein_key, axis=1)
260
-
261
- grouped = df_task.groupby("protein_key", dropna=False)
262
-
263
- idx = 0
264
- for pk, g in grouped:
265
- # Representative metadata
266
- first = g.iloc[0]
267
- record = {
268
- "protein_key": str(pk),
269
- "uniprotkb": str(first.get("uniprotkb") or ""),
270
- "sequence_id": str(first.get("sequence_id") or ""),
271
- "protein_name": str(first.get("protein_name") or ""),
272
- "organism": str(first.get("organism") or ""),
273
- "sequence_length": _to_int(first.get("sequence_length")) or 0,
274
- "variants": [],
275
- "split": wanted_split,
276
- }
277
- for _, r in g.iterrows():
278
- record["variants"].append(
279
- {
280
- "experiment_id": str(r.get("experiment_id") or ""),
281
- "mutation": str(r.get("mutation") or ""),
282
- "position": _to_int(r.get("position")) or -1,
283
- "wt_residue": str(r.get("wt_residue") or ""),
284
- "mut_residue": str(r.get("mut_residue") or ""),
285
- "ddg": _to_float(r.get("ddg")),
286
- "dtm": _to_float(r.get("dtm")),
287
- "ph": _to_float(r.get("ph")),
288
- "buffer": str(r.get("buffer_norm") or r.get("buffer_raw") or ""),
289
- "method": str(r.get("method_norm") or ""),
290
- "pdb_id": str(r.get("pdb_id") or ""),
291
- "pdb_ids": list(r.get("pdb_ids") or []),
292
- }
293
- )
294
- yield idx, record
295
- idx += 1
296
- return
297
-
298
- # Mutation LM view
299
- if self.config.task == "mutation_lm":
300
- for i, r in df_task.reset_index(drop=True).iterrows():
301
- yield i, {
302
- "experiment_id": str(r.get("experiment_id") or ""),
303
- "sequence_id": str(r.get("sequence_id") or ""),
304
- "uniprotkb": str(r.get("uniprotkb") or ""),
305
- "sequence": "", # left blank unless you join real sequences elsewhere
306
- "mutation": str(r.get("mutation") or ""),
307
- "position": _to_int(r.get("position")) or -1,
308
- "target_aa": str(r.get("mut_residue") or ""),
309
- "split": wanted_split,
310
- }
311
- return
312
-
313
- # Standard mutation-level views
314
- for i, r in df_task.reset_index(drop=True).iterrows():
315
- yield i, {
316
- "experiment_id": str(r.get("experiment_id") or ""),
317
- "sequence_id": str(r.get("sequence_id") or ""),
318
- "uniprotkb": str(r.get("uniprotkb") or ""),
319
- "protein_name": str(r.get("protein_name") or ""),
320
- "organism": str(r.get("organism") or ""),
321
- "sequence_length": _to_int(r.get("sequence_length")) or 0,
322
- "mutation": str(r.get("mutation") or ""),
323
- "wt_residue": str(r.get("wt_residue") or ""),
324
- "position": _to_int(r.get("position")) or -1,
325
- "mut_residue": str(r.get("mut_residue") or ""),
326
- "ddg": _to_float(r.get("ddg")),
327
- "dtm": _to_float(r.get("dtm")),
328
- "tm": _to_float(r.get("tm")),
329
- "ph": _to_float(r.get("ph")),
330
- "buffer": str(r.get("buffer_norm") or r.get("buffer_raw") or ""),
331
- "method": str(r.get("method_norm") or ""),
332
- "measure": str(r.get("measure_norm") or ""),
333
- "pmid": str(r.get("pmid") or ""),
334
- "doi": str(r.get("doi") or ""),
335
- "pdb_id": str(r.get("pdb_id") or ""),
336
- "pdb_ids": list(r.get("pdb_ids") or []),
337
- "publication_year": int(r.get("publication_year")) if str(r.get("publication_year") or "").isdigit() else 0,
338
- "split": wanted_split,
339
- }