drake463 commited on
Commit
39cb273
·
1 Parent(s): cb7d5e5

initial push

Browse files
.gitattributes CHANGED
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ data/cleaned_fireprotdb.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ data/fireprotdb_20251015-164116.csv
2
+ intermediate/*
data/cleaned_fireprotdb.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c12a2d0a63b9b4f3aabb51b578d2ec05767b7793a34429332d54f09b3410d81
3
+ size 1645128818
fireprot.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # fireprotdb.py
2
+ from __future__ import annotations
3
+
4
+ import hashlib
5
+ from dataclasses import dataclass
6
+ from typing import Dict, List, Optional
7
+
8
+ import datasets
9
+
10
+
11
+ # Change these when publishing:
12
+ _CITATION = """\
13
+ @misc{fireprotdb2,
14
+ title = {FireProtDB 2.0},
15
+ note = {See original FireProtDB 2.0 publication and ProTherm sources}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ ML-ready views of FireProtDB 2.0 derived from the raw CSV:
21
+ - mutation-level regression/classification (ddg, dtm, stabilizing)
22
+ - protein-level aggregated landscape view
23
+ - mutation language modeling view
24
+
25
+ This dataset is intended for Rosetta Commons / protein ML benchmarking.
26
+ """
27
+
28
+ _HOMEPAGE = "https://github.com/drake463/FireProtDB" # update
29
+ _LICENSE = "cc-by-4.0" # update to correct license if different
30
+
31
+
32
+ # If you publish to HF, include the cleaned parquet in the repo and set this relative path.
33
+ # For local testing, replace with your local path.
34
+ _DEFAULT_DATA_FILE = "../data/cleaned_fireportdb.csv"
35
+
36
+
37
+ class FireProtDBConfig(datasets.BuilderConfig):
38
+ def __init__(self, task: str, **kwargs):
39
+ super().__init__(**kwargs)
40
+ self.task = task
41
+
42
+
43
+ _BUILDER_CONFIGS = [
44
+ FireProtDBConfig(
45
+ name="mutation_ddg",
46
+ version=datasets.Version("1.0.0"),
47
+ description="Mutation-level ΔΔG regression (row-per-experiment where ddg present).",
48
+ task="mutation_ddg",
49
+ ),
50
+ FireProtDBConfig(
51
+ name="mutation_dtm",
52
+ version=datasets.Version("1.0.0"),
53
+ description="Mutation-level ΔTm regression (row-per-experiment where dtm present).",
54
+ task="mutation_dtm",
55
+ ),
56
+ FireProtDBConfig(
57
+ name="mutation_binary",
58
+ version=datasets.Version("1.0.0"),
59
+ description="Mutation-level binary stability classification (explicit stabilizing or ddg-sign-derived).",
60
+ task="mutation_binary",
61
+ ),
62
+ FireProtDBConfig(
63
+ name="mutation_lm",
64
+ version=datasets.Version("1.0.0"),
65
+ description="Mutation language-modeling view: (sequence, mutation, position, target_aa).",
66
+ task="mutation_lm",
67
+ ),
68
+ FireProtDBConfig(
69
+ name="protein_landscape",
70
+ version=datasets.Version("1.0.0"),
71
+ description="Protein-level aggregated landscapes: one row per protein with list of variants.",
72
+ task="protein_landscape",
73
+ ),
74
+ ]
75
+
76
+
77
+ def _stable_hash(s: str) -> int:
78
+ h = hashlib.sha256(s.encode("utf-8")).hexdigest()
79
+ return int(h[:8], 16)
80
+
81
+
82
+ def _split_by_protein(uniprot: Optional[str], sequence_id: Optional[str], ratios=(0.8, 0.1, 0.1)) -> str:
83
+ """
84
+ Deterministic protein-level split using (uniprotkb if present else sequence_id).
85
+ """
86
+ key = (uniprot or "").strip()
87
+ if not key:
88
+ key = f"seqid:{(sequence_id or '').strip()}"
89
+ if not key.strip():
90
+ key = "unknown"
91
+ r = _stable_hash(key) / 0xFFFFFFFF
92
+ if r < ratios[0]:
93
+ return "train"
94
+ if r < ratios[0] + ratios[1]:
95
+ return "validation"
96
+ return "test"
97
+
98
+
99
+ class FireProtDB(datasets.GeneratorBasedBuilder):
100
+ BUILDER_CONFIGS = _BUILDER_CONFIGS
101
+ DEFAULT_CONFIG_NAME = "mutation_ddg"
102
+
103
+ def _info(self) -> datasets.DatasetInfo:
104
+ # Base schema for mutation-level records
105
+ mutation_features = datasets.Features(
106
+ {
107
+ "experiment_id": datasets.Value("string"),
108
+ "sequence_id": datasets.Value("string"),
109
+ "uniprotkb": datasets.Value("string"),
110
+ "protein_name": datasets.Value("string"),
111
+ "organism": datasets.Value("string"),
112
+ "sequence_length": datasets.Value("int32"),
113
+ "mutation": datasets.Value("string"),
114
+ "wt_residue": datasets.Value("string"),
115
+ "position": datasets.Value("int32"),
116
+ "mut_residue": datasets.Value("string"),
117
+ "ddg": datasets.Value("float32"),
118
+ "dtm": datasets.Value("float32"),
119
+ "tm": datasets.Value("float32"),
120
+ "ph": datasets.Value("float32"),
121
+ "buffer": datasets.Value("string"),
122
+ "method": datasets.Value("string"),
123
+ "measure": datasets.Value("string"),
124
+ "pmid": datasets.Value("string"),
125
+ "doi": datasets.Value("string"),
126
+ "publication_year": datasets.Value("int32"),
127
+ "split": datasets.ClassLabel(names=["train", "validation", "test"]),
128
+ "pdb_id": datasets.Value("string"),
129
+ "pdb_ids": datasets.Sequence(datasets.Value("string")),
130
+ }
131
+ )
132
+
133
+ if self.config.task == "mutation_lm":
134
+ features = datasets.Features(
135
+ {
136
+ "experiment_id": datasets.Value("string"),
137
+ "sequence_id": datasets.Value("string"),
138
+ "uniprotkb": datasets.Value("string"),
139
+ "sequence": datasets.Value("string"), # optional if you later join sequences
140
+ "mutation": datasets.Value("string"),
141
+ "position": datasets.Value("int32"),
142
+ "target_aa": datasets.Value("string"),
143
+ "split": datasets.ClassLabel(names=["train", "validation", "test"]),
144
+ }
145
+ )
146
+ elif self.config.task == "protein_landscape":
147
+ features = datasets.Features(
148
+ {
149
+ "protein_key": datasets.Value("string"),
150
+ "uniprotkb": datasets.Value("string"),
151
+ "sequence_id": datasets.Value("string"),
152
+ "protein_name": datasets.Value("string"),
153
+ "organism": datasets.Value("string"),
154
+ "sequence_length": datasets.Value("int32"),
155
+ "variants": datasets.Sequence(
156
+ {
157
+ "experiment_id": datasets.Value("string"),
158
+ "mutation": datasets.Value("string"),
159
+ "position": datasets.Value("int32"),
160
+ "wt_residue": datasets.Value("string"),
161
+ "mut_residue": datasets.Value("string"),
162
+ "ddg": datasets.Value("float32"),
163
+ "dtm": datasets.Value("float32"),
164
+ "ph": datasets.Value("float32"),
165
+ "buffer": datasets.Value("string"),
166
+ "method": datasets.Value("string"),
167
+ "pdb_id": datasets.Value("string"),
168
+ "pdb_ids": datasets.Sequence(datasets.Value("string")),
169
+ }
170
+ ),
171
+ "split": datasets.ClassLabel(names=["train", "validation", "test"]),
172
+ }
173
+ )
174
+ else:
175
+ features = mutation_features
176
+
177
+ return datasets.DatasetInfo(
178
+ description=_DESCRIPTION,
179
+ features=features,
180
+ homepage=_HOMEPAGE,
181
+ license=_LICENSE,
182
+ citation=_CITATION,
183
+ )
184
+
185
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
186
+ # You can also host the cleaned file and put a URL here.
187
+ data_path = dl_manager.download(_DEFAULT_DATA_FILE)
188
+
189
+ # We'll generate ALL examples in one pass and assign split label per protein_key.
190
+ return [
191
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"path": data_path, "wanted_split": "train"}),
192
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"path": data_path, "wanted_split": "validation"}),
193
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"path": data_path, "wanted_split": "test"}),
194
+ ]
195
+
196
+ def _generate_examples(self, path: str, wanted_split: str):
197
+ import pandas as pd
198
+
199
+ # Read cleaned canonical table
200
+ if path.endswith(".parquet"):
201
+ df = pd.read_parquet(path)
202
+ else:
203
+ df = pd.read_csv(path)
204
+
205
+ # Ensure types
206
+ # (pandas nullable ints may appear; keep safe casting below)
207
+ def _to_int(x):
208
+ try:
209
+ return int(x)
210
+ except Exception:
211
+ return None
212
+
213
+ def _to_float(x):
214
+ try:
215
+ return float(x)
216
+ except Exception:
217
+ return None
218
+
219
+ # Task-specific filtering and shaping
220
+ if self.config.task in ("mutation_ddg", "mutation_binary", "mutation_lm"):
221
+ df_task = df[df["mutation"].notna()].copy()
222
+ elif self.config.task == "mutation_dtm":
223
+ df_task = df[df["mutation"].notna()].copy()
224
+ elif self.config.task == "protein_landscape":
225
+ df_task = df[df["mutation"].notna()].copy()
226
+ else:
227
+ df_task = df.copy()
228
+
229
+ # Apply label availability filters
230
+ if self.config.task == "mutation_ddg":
231
+ df_task = df_task[df_task["ddg"].notna()]
232
+ elif self.config.task == "mutation_dtm":
233
+ df_task = df_task[df_task["dtm"].notna()]
234
+ elif self.config.task == "mutation_binary":
235
+ df_task = df_task[df_task["stabilizing"].notna()]
236
+ elif self.config.task == "mutation_lm":
237
+ # Needs position and mut_residue for target_aa
238
+ df_task = df_task[df_task["position"].notna() & df_task["mut_residue"].notna()]
239
+
240
+ # Assign protein-level split deterministically
241
+ def protein_split(row) -> str:
242
+ return _split_by_protein(
243
+ uniprot=str(row.get("uniprotkb") or "").strip() or None,
244
+ sequence_id=str(row.get("sequence_id") or "").strip() or None,
245
+ )
246
+
247
+ df_task["split_name"] = df_task.apply(protein_split, axis=1)
248
+ df_task = df_task[df_task["split_name"] == wanted_split]
249
+
250
+ if self.config.task == "protein_landscape":
251
+ # Aggregate into one row per protein_key (uniprot preferred)
252
+ def protein_key(row) -> str:
253
+ u = str(row.get("uniprotkb") or "").strip()
254
+ if u:
255
+ return u
256
+ sid = str(row.get("sequence_id") or "").strip()
257
+ return f"seqid:{sid}" if sid else "unknown"
258
+
259
+ df_task["protein_key"] = df_task.apply(protein_key, axis=1)
260
+
261
+ grouped = df_task.groupby("protein_key", dropna=False)
262
+
263
+ idx = 0
264
+ for pk, g in grouped:
265
+ # Representative metadata
266
+ first = g.iloc[0]
267
+ record = {
268
+ "protein_key": str(pk),
269
+ "uniprotkb": str(first.get("uniprotkb") or ""),
270
+ "sequence_id": str(first.get("sequence_id") or ""),
271
+ "protein_name": str(first.get("protein_name") or ""),
272
+ "organism": str(first.get("organism") or ""),
273
+ "sequence_length": _to_int(first.get("sequence_length")) or 0,
274
+ "variants": [],
275
+ "split": wanted_split,
276
+ }
277
+ for _, r in g.iterrows():
278
+ record["variants"].append(
279
+ {
280
+ "experiment_id": str(r.get("experiment_id") or ""),
281
+ "mutation": str(r.get("mutation") or ""),
282
+ "position": _to_int(r.get("position")) or -1,
283
+ "wt_residue": str(r.get("wt_residue") or ""),
284
+ "mut_residue": str(r.get("mut_residue") or ""),
285
+ "ddg": _to_float(r.get("ddg")),
286
+ "dtm": _to_float(r.get("dtm")),
287
+ "ph": _to_float(r.get("ph")),
288
+ "buffer": str(r.get("buffer_norm") or r.get("buffer_raw") or ""),
289
+ "method": str(r.get("method_norm") or ""),
290
+ "pdb_id": str(r.get("pdb_id") or ""),
291
+ "pdb_ids": list(r.get("pdb_ids") or []),
292
+ }
293
+ )
294
+ yield idx, record
295
+ idx += 1
296
+ return
297
+
298
+ # Mutation LM view
299
+ if self.config.task == "mutation_lm":
300
+ for i, r in df_task.reset_index(drop=True).iterrows():
301
+ yield i, {
302
+ "experiment_id": str(r.get("experiment_id") or ""),
303
+ "sequence_id": str(r.get("sequence_id") or ""),
304
+ "uniprotkb": str(r.get("uniprotkb") or ""),
305
+ "sequence": "", # left blank unless you join real sequences elsewhere
306
+ "mutation": str(r.get("mutation") or ""),
307
+ "position": _to_int(r.get("position")) or -1,
308
+ "target_aa": str(r.get("mut_residue") or ""),
309
+ "split": wanted_split,
310
+ }
311
+ return
312
+
313
+ # Standard mutation-level views
314
+ for i, r in df_task.reset_index(drop=True).iterrows():
315
+ yield i, {
316
+ "experiment_id": str(r.get("experiment_id") or ""),
317
+ "sequence_id": str(r.get("sequence_id") or ""),
318
+ "uniprotkb": str(r.get("uniprotkb") or ""),
319
+ "protein_name": str(r.get("protein_name") or ""),
320
+ "organism": str(r.get("organism") or ""),
321
+ "sequence_length": _to_int(r.get("sequence_length")) or 0,
322
+ "mutation": str(r.get("mutation") or ""),
323
+ "wt_residue": str(r.get("wt_residue") or ""),
324
+ "position": _to_int(r.get("position")) or -1,
325
+ "mut_residue": str(r.get("mut_residue") or ""),
326
+ "ddg": _to_float(r.get("ddg")),
327
+ "dtm": _to_float(r.get("dtm")),
328
+ "tm": _to_float(r.get("tm")),
329
+ "ph": _to_float(r.get("ph")),
330
+ "buffer": str(r.get("buffer_norm") or r.get("buffer_raw") or ""),
331
+ "method": str(r.get("method_norm") or ""),
332
+ "measure": str(r.get("measure_norm") or ""),
333
+ "pmid": str(r.get("pmid") or ""),
334
+ "doi": str(r.get("doi") or ""),
335
+ "pdb_id": str(r.get("pdb_id") or ""),
336
+ "pdb_ids": list(r.get("pdb_ids") or []),
337
+ "publication_year": int(r.get("publication_year")) if str(r.get("publication_year") or "").isdigit() else 0,
338
+ "split": wanted_split,
339
+ }
src/01.1_process_csv.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Clean FireProtDB 2.0 CSV into ML-ready canonical table.
4
+
5
+ Outputs:
6
+ - A canonical row-per-experiment table with parsed mutation fields and normalized columns.
7
+ - Optionally writes Parquet for speed.
8
+
9
+ Usage:
10
+ python scripts/clean_fireprotdb.py \
11
+ --input fireprotdb_2_0.csv \
12
+ --output data/fireprotdb_clean.parquet
13
+
14
+ Notes:
15
+ - This script is conservative: it does NOT impute missing ddg/dtm.
16
+ - It standardizes a few categorical fields; extend mappings as needed.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ import argparse
22
+ import math
23
+ import re
24
+ from typing import Optional, Tuple, Dict
25
+
26
+ import pandas as pd
27
+ ###PDB parsing
28
+ _PDB_SPLIT = re.compile(r"[;,| ]+")
29
+ _PDB_ID = re.compile(r"^[0-9][A-Za-z0-9]{3}$") # 4-char PDB id, first char numeric
30
+
31
+ def parse_pdb_ids(x: object):
32
+ """
33
+ Returns (pdb_id, pdb_ids) where:
34
+ - pdb_id: first valid 4-char PDB id (lowercase), or None
35
+ - pdb_ids: sorted unique list of valid ids (lowercase)
36
+ """
37
+ if not isinstance(x, str):
38
+ return None, []
39
+ s = x.strip()
40
+ if not s:
41
+ return None, []
42
+
43
+ parts = [p.strip() for p in _PDB_SPLIT.split(s) if p.strip()]
44
+ ids = []
45
+ for p in parts:
46
+ p = p.strip()
47
+ # sometimes entries include chain like "1ABC:A" or "1ABC_A"
48
+ p = re.split(r"[:_]", p)[0].strip()
49
+ if _PDB_ID.match(p):
50
+ ids.append(p.lower())
51
+
52
+ ids = sorted(set(ids))
53
+ return (ids[0] if ids else None), ids
54
+
55
+ # --- Mutation parsing ---
56
+ # Accept common patterns:
57
+ # A123V
58
+ # p.Ala123Val (rare)
59
+ # 123A>V (rare)
60
+ _MUT_A123V = re.compile(r"^(?P<wt>[ACDEFGHIKLMNPQRSTVWY])(?P<pos>\d+)(?P<mut>[ACDEFGHIKLMNPQRSTVWY])$")
61
+ _MUT_123A_GT_V = re.compile(r"^(?P<pos>\d+)(?P<wt>[ACDEFGHIKLMNPQRSTVWY])>(?P<mut>[ACDEFGHIKLMNPQRSTVWY])$")
62
+
63
+
64
+ def parse_substitution(s: str) -> Tuple[Optional[str], Optional[int], Optional[str], Optional[str]]:
65
+ """
66
+ Returns (wt_residue, position, mut_residue, normalized_mutation_string)
67
+ """
68
+ if not isinstance(s, str) or not s.strip():
69
+ return None, None, None, None
70
+ s = s.strip()
71
+
72
+ m = _MUT_A123V.match(s)
73
+ if m:
74
+ wt = m.group("wt")
75
+ pos = int(m.group("pos"))
76
+ mut = m.group("mut")
77
+ return wt, pos, mut, f"{wt}{pos}{mut}"
78
+
79
+ m = _MUT_123A_GT_V.match(s)
80
+ if m:
81
+ pos = int(m.group("pos"))
82
+ wt = m.group("wt")
83
+ mut = m.group("mut")
84
+ return wt, pos, mut, f"{wt}{pos}{mut}"
85
+
86
+ # If it's something else (multi-mutation, insertion/deletion notation, etc.),
87
+ # keep it in "mutation_raw" but do not parse.
88
+ return None, None, None, None
89
+
90
+
91
+ # --- Categorical normalization ---
92
+ def norm_str(x: object) -> Optional[str]:
93
+ if not isinstance(x, str):
94
+ return None
95
+ x = x.strip()
96
+ return x if x else None
97
+
98
+
99
+ BUFFER_MAP: Dict[str, str] = {
100
+ "sodium tetraborate": "Sodium tetraborate",
101
+ "tetra-borate": "Sodium tetraborate",
102
+ "tetraborate": "Sodium tetraborate",
103
+ "sodium phosphate": "Sodium phosphate",
104
+ }
105
+
106
+
107
+ METHOD_MAP: Dict[str, str] = {
108
+ "dsc": "DSC",
109
+ "cd": "CD",
110
+ }
111
+
112
+
113
+ MEASURE_MAP: Dict[str, str] = {
114
+ "thermal": "Thermal",
115
+ }
116
+
117
+
118
+ def normalize_categoricals(df: pd.DataFrame) -> pd.DataFrame:
119
+ def map_lower(series: pd.Series, mapping: Dict[str, str]) -> pd.Series:
120
+ s = series.astype("string")
121
+ s_lower = s.str.lower().str.strip()
122
+ return s_lower.map(mapping).fillna(s.str.strip())
123
+
124
+ if "BUFFER" in df.columns:
125
+ df["buffer_norm"] = map_lower(df["BUFFER"], BUFFER_MAP)
126
+ else:
127
+ df["buffer_norm"] = pd.NA
128
+
129
+ if "METHOD" in df.columns:
130
+ df["method_norm"] = map_lower(df["METHOD"], METHOD_MAP)
131
+ else:
132
+ df["method_norm"] = pd.NA
133
+
134
+ if "MEASURE" in df.columns:
135
+ df["measure_norm"] = map_lower(df["MEASURE"], MEASURE_MAP)
136
+ else:
137
+ df["measure_norm"] = pd.NA
138
+
139
+ return df
140
+
141
+
142
+ # --- Numeric cleanup ---
143
+ def to_float(x: object) -> Optional[float]:
144
+ if x is None or (isinstance(x, float) and math.isnan(x)):
145
+ return None
146
+ if isinstance(x, (int, float)):
147
+ return float(x)
148
+ if isinstance(x, str):
149
+ s = x.strip()
150
+ if not s:
151
+ return None
152
+ # Handle "1mM" vs "1 mM" etc. for numeric fields by stripping units if present.
153
+ # For now: attempt raw float parse.
154
+ try:
155
+ return float(s)
156
+ except ValueError:
157
+ # try to extract first float substring
158
+ m = re.search(r"[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?", s)
159
+ if m:
160
+ try:
161
+ return float(m.group(0))
162
+ except ValueError:
163
+ return None
164
+ return None
165
+
166
+
167
+ def clean_numeric_columns(df: pd.DataFrame) -> pd.DataFrame:
168
+ # ddg-like
169
+ for col in ["DDG", "DOMAINOME_DDG", "DG", "DH", "DHVH"]:
170
+ if col in df.columns:
171
+ df[col.lower()] = df[col].map(to_float)
172
+ else:
173
+ df[col.lower()] = pd.NA
174
+
175
+ # temperature-like
176
+ for col in ["TM", "DTM", "EXP_TEMPERATURE"]:
177
+ if col in df.columns:
178
+ df[col.lower()] = df[col].map(to_float)
179
+ else:
180
+ df[col.lower()] = pd.NA
181
+
182
+ # pH
183
+ if "PH" in df.columns:
184
+ df["ph"] = df["PH"].map(to_float)
185
+ else:
186
+ df["ph"] = pd.NA
187
+
188
+ return df
189
+
190
+
191
+ def derive_labels(df: pd.DataFrame) -> pd.DataFrame:
192
+ # Stabilizing classification: prefer explicit STABILIZING column if present,
193
+ # else use ddg sign if ddg available.
194
+ if "STABILIZING" in df.columns:
195
+ s = df["STABILIZING"].astype("string").str.lower().str.strip()
196
+ df["stabilizing_explicit"] = s.map({"yes": True, "no": False})
197
+ else:
198
+ df["stabilizing_explicit"] = pd.NA
199
+
200
+ # ddg-based label (common convention: ddg < 0 stabilizing)
201
+ df["stabilizing_ddg"] = df["ddg"].apply(lambda v: True if isinstance(v, float) and v < 0 else (False if isinstance(v, float) and v > 0 else pd.NA))
202
+
203
+ # unified label: explicit if available else ddg-based
204
+ df["stabilizing"] = df["stabilizing_explicit"]
205
+ df.loc[df["stabilizing"].isna(), "stabilizing"] = df.loc[df["stabilizing"].isna(), "stabilizing_ddg"]
206
+
207
+ return df
208
+
209
+
210
+ def select_and_rename(df: pd.DataFrame) -> pd.DataFrame:
211
+ # canonical columns (keep more if you want)
212
+ keep = {
213
+ "EXPERIMENT_ID": "experiment_id",
214
+ "SEQUENCE_ID": "sequence_id",
215
+ "MUTANT_ID": "mutant_id",
216
+ "SOURCE_SEQUENCE_ID": "source_sequence_id",
217
+ "TARGET_SEQUENCE_ID": "target_sequence_id",
218
+ "SEQUENCE_LENGTH": "sequence_length",
219
+ "SUBSTITUTION": "substitution_raw",
220
+ "INSERTION": "insertion_raw",
221
+ "DELETION": "deletion_raw",
222
+ "PROTEIN": "protein_name",
223
+ "ORGANISM": "organism",
224
+ "UNIPROTKB": "uniprotkb",
225
+ "EC_NUMBER": "ec_number",
226
+ "INTERPRO": "interpro",
227
+ "PUBLICATION_PMID": "pmid",
228
+ "PUBLICATION_DOI": "doi",
229
+ "PUBLICATION_YEAR": "publication_year",
230
+ "SOURCE_DATASET": "source_dataset",
231
+ "REFERENCING_DATASET": "referencing_dataset",
232
+ "WWPDB": "wwpdb_raw",
233
+ }
234
+
235
+ out = pd.DataFrame()
236
+ for src, dst in keep.items():
237
+ out[dst] = df[src] if src in df.columns else pd.NA
238
+
239
+ # numeric & normalized categorical fields added earlier
240
+ extra_cols = [
241
+ "ddg", "domainome_ddg", "dg", "dh", "dhvh",
242
+ "tm", "dtm", "exp_temperature",
243
+ "ph",
244
+ "buffer_norm", "method_norm", "measure_norm",
245
+ "stabilizing",
246
+ ]
247
+ for c in extra_cols:
248
+ out[c] = df[c] if c in df.columns else pd.NA
249
+
250
+ # keep raw text fields that matter for conditions (optional)
251
+ for src, dst in [("BUFFER", "buffer_raw"), ("BUFFER_CONC", "buffer_conc_raw"), ("ION", "ion_raw"), ("ION_CONC", "ion_conc_raw"), ("STATE", "state")]:
252
+ out[dst] = df[src] if src in df.columns else pd.NA
253
+ out["pdb_id"] = df["pdb_id"] if "pdb_id" in df.columns else pd.NA
254
+ out["pdb_ids"] = df["pdb_ids"] if "pdb_ids" in df.columns else [[] for _ in range(len(df))]
255
+ return out
256
+
257
+
258
+ def main():
259
+ ap = argparse.ArgumentParser()
260
+ ap.add_argument("--input", required=True, help="Path to raw FireProtDB 2.0 CSV")
261
+ ap.add_argument("--output", required=True, help="Path to output .parquet or .csv")
262
+ ap.add_argument("--min_seq_len", type=int, default=1, help="Drop sequences shorter than this")
263
+ ap.add_argument("--drop_no_label", action="store_true", help="Drop rows with neither ddg nor dtm")
264
+ args = ap.parse_args()
265
+
266
+ # Load as strings to avoid pandas guessing mixed types
267
+ df = pd.read_csv(args.input, dtype="string", keep_default_na=False, na_values=["", "NA", "NaN", "nan"])
268
+ df = df.replace({"": pd.NA})
269
+
270
+ # Basic trimming
271
+ for c in df.columns:
272
+ if pd.api.types.is_string_dtype(df[c]):
273
+ df[c] = df[c].astype("string").str.strip()
274
+
275
+ # Normalize & parse
276
+ df = normalize_categoricals(df)
277
+ df = clean_numeric_columns(df)
278
+
279
+ # Parse substitution into structured columns
280
+ parsed = df["SUBSTITUTION"].apply(lambda x: parse_substitution(x) if "SUBSTITUTION" in df.columns else (None, None, None, None))
281
+ df["wt_residue"] = parsed.map(lambda t: t[0])
282
+ df["position"] = parsed.map(lambda t: t[1]).astype("Int64")
283
+ df["mut_residue"] = parsed.map(lambda t: t[2])
284
+ df["mutation"] = parsed.map(lambda t: t[3])
285
+
286
+ df = derive_labels(df)
287
+
288
+ if "WWPDB" in df.columns:
289
+ parsed_pdb = df["WWPDB"].astype("string").fillna("").apply(lambda v: parse_pdb_ids(str(v)))
290
+ df["pdb_id"] = parsed_pdb.map(lambda t: t[0])
291
+ df["pdb_ids"] = parsed_pdb.map(lambda t: t[1])
292
+ else:
293
+ df["pdb_id"] = pd.NA
294
+ df["pdb_ids"] = [[] for _ in range(len(df))]
295
+
296
+ # Filter
297
+ if "SEQUENCE_LENGTH" in df.columns:
298
+ seq_len = df["SEQUENCE_LENGTH"].map(to_float)
299
+ df["sequence_length_num"] = seq_len
300
+ df = df[df["sequence_length_num"].fillna(0) >= args.min_seq_len]
301
+
302
+ if args.drop_no_label:
303
+ df = df[~(df["ddg"].isna() & df["dtm"].isna())]
304
+
305
+ # Select final schema
306
+ out = select_and_rename(df)
307
+
308
+ # Add parsed mutation columns
309
+ out["wt_residue"] = df["wt_residue"]
310
+ out["position"] = df["position"]
311
+ out["mut_residue"] = df["mut_residue"]
312
+ out["mutation"] = df["mutation"]
313
+
314
+ # De-dupe obvious duplicates (same experiment id)
315
+ if "experiment_id" in out.columns:
316
+ out = out.drop_duplicates(subset=["experiment_id"])
317
+
318
+ # Write
319
+ if args.output.lower().endswith(".parquet"):
320
+ out.to_parquet(args.output, index=False)
321
+ elif args.output.lower().endswith(".csv"):
322
+ out.to_csv(args.output, index=False)
323
+ else:
324
+ raise ValueError("Output must end with .parquet or .csv")
325
+
326
+ print(f"Wrote {len(out):,} rows to {args.output}")
327
+
328
+
329
+ if __name__ == "__main__":
330
+ main()
src/01.2_gen_datasets.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # fireprotdb.py
2
+ from __future__ import annotations
3
+
4
+ import hashlib
5
+ from dataclasses import dataclass
6
+ from typing import Dict, List, Optional
7
+
8
+ import datasets
9
+
10
+
11
+ # Change these when publishing:
12
+ _CITATION = """\
13
+ @misc{fireprotdb2,
14
+ title = {FireProtDB 2.0},
15
+ note = {See original FireProtDB 2.0 publication and ProTherm sources}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ ML-ready views of FireProtDB 2.0 derived from the raw CSV:
21
+ - mutation-level regression/classification (ddg, dtm, stabilizing)
22
+ - protein-level aggregated landscape view
23
+ - mutation language modeling view
24
+
25
+ This dataset is intended for Rosetta Commons / protein ML benchmarking.
26
+ """
27
+
28
+ _HOMEPAGE = "https://github.com/drake463/FireProtDB" # update
29
+ _LICENSE = "cc-by-4.0" # update to correct license if different
30
+
31
+
32
+ # If you publish to HF, include the cleaned parquet in the repo and set this relative path.
33
+ # For local testing, replace with your local path.
34
+ _DEFAULT_DATA_FILE = "../data/cleaned_fireportdb.csv"
35
+
36
+
37
+ class FireProtDBConfig(datasets.BuilderConfig):
38
+ def __init__(self, task: str, **kwargs):
39
+ super().__init__(**kwargs)
40
+ self.task = task
41
+
42
+
43
+ _BUILDER_CONFIGS = [
44
+ FireProtDBConfig(
45
+ name="mutation_ddg",
46
+ version=datasets.Version("1.0.0"),
47
+ description="Mutation-level ΔΔG regression (row-per-experiment where ddg present).",
48
+ task="mutation_ddg",
49
+ ),
50
+ FireProtDBConfig(
51
+ name="mutation_dtm",
52
+ version=datasets.Version("1.0.0"),
53
+ description="Mutation-level ΔTm regression (row-per-experiment where dtm present).",
54
+ task="mutation_dtm",
55
+ ),
56
+ FireProtDBConfig(
57
+ name="mutation_binary",
58
+ version=datasets.Version("1.0.0"),
59
+ description="Mutation-level binary stability classification (explicit stabilizing or ddg-sign-derived).",
60
+ task="mutation_binary",
61
+ ),
62
+ FireProtDBConfig(
63
+ name="mutation_lm",
64
+ version=datasets.Version("1.0.0"),
65
+ description="Mutation language-modeling view: (sequence, mutation, position, target_aa).",
66
+ task="mutation_lm",
67
+ ),
68
+ FireProtDBConfig(
69
+ name="protein_landscape",
70
+ version=datasets.Version("1.0.0"),
71
+ description="Protein-level aggregated landscapes: one row per protein with list of variants.",
72
+ task="protein_landscape",
73
+ ),
74
+ ]
75
+
76
+
77
+ def _stable_hash(s: str) -> int:
78
+ h = hashlib.sha256(s.encode("utf-8")).hexdigest()
79
+ return int(h[:8], 16)
80
+
81
+
82
+ def _split_by_protein(uniprot: Optional[str], sequence_id: Optional[str], ratios=(0.8, 0.1, 0.1)) -> str:
83
+ """
84
+ Deterministic protein-level split using (uniprotkb if present else sequence_id).
85
+ """
86
+ key = (uniprot or "").strip()
87
+ if not key:
88
+ key = f"seqid:{(sequence_id or '').strip()}"
89
+ if not key.strip():
90
+ key = "unknown"
91
+ r = _stable_hash(key) / 0xFFFFFFFF
92
+ if r < ratios[0]:
93
+ return "train"
94
+ if r < ratios[0] + ratios[1]:
95
+ return "validation"
96
+ return "test"
97
+
98
+
99
+ class FireProtDB(datasets.GeneratorBasedBuilder):
100
+ BUILDER_CONFIGS = _BUILDER_CONFIGS
101
+ DEFAULT_CONFIG_NAME = "mutation_ddg"
102
+
103
+ def _info(self) -> datasets.DatasetInfo:
104
+ # Base schema for mutation-level records
105
+ mutation_features = datasets.Features(
106
+ {
107
+ "experiment_id": datasets.Value("string"),
108
+ "sequence_id": datasets.Value("string"),
109
+ "uniprotkb": datasets.Value("string"),
110
+ "protein_name": datasets.Value("string"),
111
+ "organism": datasets.Value("string"),
112
+ "sequence_length": datasets.Value("int32"),
113
+ "mutation": datasets.Value("string"),
114
+ "wt_residue": datasets.Value("string"),
115
+ "position": datasets.Value("int32"),
116
+ "mut_residue": datasets.Value("string"),
117
+ "ddg": datasets.Value("float32"),
118
+ "dtm": datasets.Value("float32"),
119
+ "tm": datasets.Value("float32"),
120
+ "ph": datasets.Value("float32"),
121
+ "buffer": datasets.Value("string"),
122
+ "method": datasets.Value("string"),
123
+ "measure": datasets.Value("string"),
124
+ "pmid": datasets.Value("string"),
125
+ "doi": datasets.Value("string"),
126
+ "publication_year": datasets.Value("int32"),
127
+ "split": datasets.ClassLabel(names=["train", "validation", "test"]),
128
+ "pdb_id": datasets.Value("string"),
129
+ "pdb_ids": datasets.Sequence(datasets.Value("string")),
130
+ }
131
+ )
132
+
133
+ if self.config.task == "mutation_lm":
134
+ features = datasets.Features(
135
+ {
136
+ "experiment_id": datasets.Value("string"),
137
+ "sequence_id": datasets.Value("string"),
138
+ "uniprotkb": datasets.Value("string"),
139
+ "sequence": datasets.Value("string"), # optional if you later join sequences
140
+ "mutation": datasets.Value("string"),
141
+ "position": datasets.Value("int32"),
142
+ "target_aa": datasets.Value("string"),
143
+ "split": datasets.ClassLabel(names=["train", "validation", "test"]),
144
+ }
145
+ )
146
+ elif self.config.task == "protein_landscape":
147
+ features = datasets.Features(
148
+ {
149
+ "protein_key": datasets.Value("string"),
150
+ "uniprotkb": datasets.Value("string"),
151
+ "sequence_id": datasets.Value("string"),
152
+ "protein_name": datasets.Value("string"),
153
+ "organism": datasets.Value("string"),
154
+ "sequence_length": datasets.Value("int32"),
155
+ "variants": datasets.Sequence(
156
+ {
157
+ "experiment_id": datasets.Value("string"),
158
+ "mutation": datasets.Value("string"),
159
+ "position": datasets.Value("int32"),
160
+ "wt_residue": datasets.Value("string"),
161
+ "mut_residue": datasets.Value("string"),
162
+ "ddg": datasets.Value("float32"),
163
+ "dtm": datasets.Value("float32"),
164
+ "ph": datasets.Value("float32"),
165
+ "buffer": datasets.Value("string"),
166
+ "method": datasets.Value("string"),
167
+ "pdb_id": datasets.Value("string"),
168
+ "pdb_ids": datasets.Sequence(datasets.Value("string")),
169
+ }
170
+ ),
171
+ "split": datasets.ClassLabel(names=["train", "validation", "test"]),
172
+ }
173
+ )
174
+ else:
175
+ features = mutation_features
176
+
177
+ return datasets.DatasetInfo(
178
+ description=_DESCRIPTION,
179
+ features=features,
180
+ homepage=_HOMEPAGE,
181
+ license=_LICENSE,
182
+ citation=_CITATION,
183
+ )
184
+
185
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
186
+ # You can also host the cleaned file and put a URL here.
187
+ data_path = dl_manager.download(_DEFAULT_DATA_FILE)
188
+
189
+ # We'll generate ALL examples in one pass and assign split label per protein_key.
190
+ return [
191
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"path": data_path, "wanted_split": "train"}),
192
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"path": data_path, "wanted_split": "validation"}),
193
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"path": data_path, "wanted_split": "test"}),
194
+ ]
195
+
196
+ def _generate_examples(self, path: str, wanted_split: str):
197
+ import pandas as pd
198
+
199
+ # Read cleaned canonical table
200
+ if path.endswith(".parquet"):
201
+ df = pd.read_parquet(path)
202
+ else:
203
+ df = pd.read_csv(path)
204
+
205
+ # Ensure types
206
+ # (pandas nullable ints may appear; keep safe casting below)
207
+ def _to_int(x):
208
+ try:
209
+ return int(x)
210
+ except Exception:
211
+ return None
212
+
213
+ def _to_float(x):
214
+ try:
215
+ return float(x)
216
+ except Exception:
217
+ return None
218
+
219
+ # Task-specific filtering and shaping
220
+ if self.config.task in ("mutation_ddg", "mutation_binary", "mutation_lm"):
221
+ df_task = df[df["mutation"].notna()].copy()
222
+ elif self.config.task == "mutation_dtm":
223
+ df_task = df[df["mutation"].notna()].copy()
224
+ elif self.config.task == "protein_landscape":
225
+ df_task = df[df["mutation"].notna()].copy()
226
+ else:
227
+ df_task = df.copy()
228
+
229
+ # Apply label availability filters
230
+ if self.config.task == "mutation_ddg":
231
+ df_task = df_task[df_task["ddg"].notna()]
232
+ elif self.config.task == "mutation_dtm":
233
+ df_task = df_task[df_task["dtm"].notna()]
234
+ elif self.config.task == "mutation_binary":
235
+ df_task = df_task[df_task["stabilizing"].notna()]
236
+ elif self.config.task == "mutation_lm":
237
+ # Needs position and mut_residue for target_aa
238
+ df_task = df_task[df_task["position"].notna() & df_task["mut_residue"].notna()]
239
+
240
+ # Assign protein-level split deterministically
241
+ def protein_split(row) -> str:
242
+ return _split_by_protein(
243
+ uniprot=str(row.get("uniprotkb") or "").strip() or None,
244
+ sequence_id=str(row.get("sequence_id") or "").strip() or None,
245
+ )
246
+
247
+ df_task["split_name"] = df_task.apply(protein_split, axis=1)
248
+ df_task = df_task[df_task["split_name"] == wanted_split]
249
+
250
+ if self.config.task == "protein_landscape":
251
+ # Aggregate into one row per protein_key (uniprot preferred)
252
+ def protein_key(row) -> str:
253
+ u = str(row.get("uniprotkb") or "").strip()
254
+ if u:
255
+ return u
256
+ sid = str(row.get("sequence_id") or "").strip()
257
+ return f"seqid:{sid}" if sid else "unknown"
258
+
259
+ df_task["protein_key"] = df_task.apply(protein_key, axis=1)
260
+
261
+ grouped = df_task.groupby("protein_key", dropna=False)
262
+
263
+ idx = 0
264
+ for pk, g in grouped:
265
+ # Representative metadata
266
+ first = g.iloc[0]
267
+ record = {
268
+ "protein_key": str(pk),
269
+ "uniprotkb": str(first.get("uniprotkb") or ""),
270
+ "sequence_id": str(first.get("sequence_id") or ""),
271
+ "protein_name": str(first.get("protein_name") or ""),
272
+ "organism": str(first.get("organism") or ""),
273
+ "sequence_length": _to_int(first.get("sequence_length")) or 0,
274
+ "variants": [],
275
+ "split": wanted_split,
276
+ }
277
+ for _, r in g.iterrows():
278
+ record["variants"].append(
279
+ {
280
+ "experiment_id": str(r.get("experiment_id") or ""),
281
+ "mutation": str(r.get("mutation") or ""),
282
+ "position": _to_int(r.get("position")) or -1,
283
+ "wt_residue": str(r.get("wt_residue") or ""),
284
+ "mut_residue": str(r.get("mut_residue") or ""),
285
+ "ddg": _to_float(r.get("ddg")),
286
+ "dtm": _to_float(r.get("dtm")),
287
+ "ph": _to_float(r.get("ph")),
288
+ "buffer": str(r.get("buffer_norm") or r.get("buffer_raw") or ""),
289
+ "method": str(r.get("method_norm") or ""),
290
+ "pdb_id": str(r.get("pdb_id") or ""),
291
+ "pdb_ids": list(r.get("pdb_ids") or []),
292
+ }
293
+ )
294
+ yield idx, record
295
+ idx += 1
296
+ return
297
+
298
+ # Mutation LM view
299
+ if self.config.task == "mutation_lm":
300
+ for i, r in df_task.reset_index(drop=True).iterrows():
301
+ yield i, {
302
+ "experiment_id": str(r.get("experiment_id") or ""),
303
+ "sequence_id": str(r.get("sequence_id") or ""),
304
+ "uniprotkb": str(r.get("uniprotkb") or ""),
305
+ "sequence": "", # left blank unless you join real sequences elsewhere
306
+ "mutation": str(r.get("mutation") or ""),
307
+ "position": _to_int(r.get("position")) or -1,
308
+ "target_aa": str(r.get("mut_residue") or ""),
309
+ "split": wanted_split,
310
+ }
311
+ return
312
+
313
+ # Standard mutation-level views
314
+ for i, r in df_task.reset_index(drop=True).iterrows():
315
+ yield i, {
316
+ "experiment_id": str(r.get("experiment_id") or ""),
317
+ "sequence_id": str(r.get("sequence_id") or ""),
318
+ "uniprotkb": str(r.get("uniprotkb") or ""),
319
+ "protein_name": str(r.get("protein_name") or ""),
320
+ "organism": str(r.get("organism") or ""),
321
+ "sequence_length": _to_int(r.get("sequence_length")) or 0,
322
+ "mutation": str(r.get("mutation") or ""),
323
+ "wt_residue": str(r.get("wt_residue") or ""),
324
+ "position": _to_int(r.get("position")) or -1,
325
+ "mut_residue": str(r.get("mut_residue") or ""),
326
+ "ddg": _to_float(r.get("ddg")),
327
+ "dtm": _to_float(r.get("dtm")),
328
+ "tm": _to_float(r.get("tm")),
329
+ "ph": _to_float(r.get("ph")),
330
+ "buffer": str(r.get("buffer_norm") or r.get("buffer_raw") or ""),
331
+ "method": str(r.get("method_norm") or ""),
332
+ "measure": str(r.get("measure_norm") or ""),
333
+ "pmid": str(r.get("pmid") or ""),
334
+ "doi": str(r.get("doi") or ""),
335
+ "pdb_id": str(r.get("pdb_id") or ""),
336
+ "pdb_ids": list(r.get("pdb_ids") or []),
337
+ "publication_year": int(r.get("publication_year")) if str(r.get("publication_year") or "").isdigit() else 0,
338
+ "split": wanted_split,
339
+ }