BULMA / tests /test_smoke.py
HarriziSaad's picture
Update tests/test_smoke.py
fe6d666 verified
import sys
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
sys.path.insert(0, str(Path(__file__).parent.parent))
SEED = 17
@pytest.fixture(scope="session")
def tmp_proc(tmp_path_factory):
"""Generate mock processed data once for the whole test session."""
proc = tmp_path_factory.mktemp("processed")
_make_mock_data(proc, seed=SEED)
return proc
def _make_mock_data(proc: Path, seed: int, n_transporters: int = 6,
n_compounds: int = 20, d_prot: int = 32, d_lig: int = 16):
"""Tiny mock dataset for fast tests."""
rng = np.random.default_rng(seed)
transporters = [f"T{i:02d}" for i in range(n_transporters)]
compounds = [f"C{i:03d}" for i in range(n_compounds)]
P = pd.DataFrame(rng.normal(0, 1, (n_transporters, d_prot)).astype("float32"),
columns=[f"d{i}" for i in range(d_prot)])
P.insert(0, "transporter", transporters)
P.to_csv(proc / "protein.csv", index=False)
L = pd.DataFrame(rng.normal(0, 1, (n_compounds, d_lig)).astype("float32"),
columns=[f"d{i}" for i in range(d_lig)])
L.insert(0, "compound", compounds)
L.to_csv(proc / "ligand.csv", index=False)
rows = [{"transporter": t, "compound": c,
"y": int(rng.random() < 0.1), "assay_id": "A1",
"condition": "YPD", "concentration": "10uM",
"replicate": 1, "media": "YPD"}
for t in transporters for c in compounds]
pd.DataFrame(rows).to_csv(proc / "labels.csv", index=False)
N = 200
C = pd.DataFrame({
"outcome": rng.normal(0, 1, N),
"ethanol_pct": rng.choice([0, 4, 8], N),
"ROS": rng.gamma(2, 0.7, N),
"PDR1_reg": rng.normal(0, 1, N),
"YAP1_reg": rng.normal(0, 1, N),
"H2O2_uM": rng.choice([0, 100], N),
"NaCl_mM": rng.choice([0, 400], N),
"batch": rng.choice(["A", "B"], N),
"accession": rng.choice(["GSE1", "GSE2"], N),
"sample_id": [f"S{i:04d}" for i in range(N)],
})
for t in transporters:
C[f"{t}_expr"] = rng.normal(0, 1, N)
C.to_csv(proc / "causal_table.csv", index=False)
return proc
class TestMockData:
def test_protein_csv_shape(self, tmp_proc):
P = pd.read_csv(tmp_proc / "protein.csv")
assert "transporter" in P.columns
assert P.shape[0] == 6
assert P.select_dtypes(float).shape[1] == 32
def test_ligand_csv_shape(self, tmp_proc):
L = pd.read_csv(tmp_proc / "ligand.csv")
assert "compound" in L.columns
assert L.shape[0] == 20
def test_labels_no_nans(self, tmp_proc):
Y = pd.read_csv(tmp_proc / "labels.csv")
assert not Y.isna().any().any()
assert set(Y["y"].unique()).issubset({0, 1})
def test_causal_table_expr_cols(self, tmp_proc):
C = pd.read_csv(tmp_proc / "causal_table.csv")
expr_cols = [c for c in C.columns if c.endswith("_expr")]
assert len(expr_cols) == 6
assert not C.isna().any().any()
def test_reproducibility(self, tmp_path):
"""Same seed → identical protein.csv MD5."""
import hashlib
def md5(path):
return hashlib.md5(Path(path).read_bytes()).hexdigest()
proc1 = tmp_path / "run1"
proc2 = tmp_path / "run2"
proc1.mkdir(); proc2.mkdir()
_make_mock_data(proc1, seed=42)
_make_mock_data(proc2, seed=42)
assert md5(proc1 / "protein.csv") == md5(proc2 / "protein.csv")
assert md5(proc1 / "labels.csv") == md5(proc2 / "labels.csv")
def test_different_seeds_differ(self, tmp_path):
proc1 = tmp_path / "s1"; proc2 = tmp_path / "s2"
proc1.mkdir(); proc2.mkdir()
_make_mock_data(proc1, seed=1)
_make_mock_data(proc2, seed=2)
P1 = pd.read_csv(proc1 / "protein.csv")
P2 = pd.read_csv(proc2 / "protein.csv")
assert not P1.equals(P2)
class TestPairDataset:
def test_length(self, tmp_proc):
pytest.importorskip("torch", reason="torch not installed")
from src.atlas.dataset import PairDataset
ds = PairDataset(tmp_proc)
assert len(ds) == 6 * 20
def test_item_shapes(self, tmp_proc):
pytest.importorskip("torch", reason="torch not installed")
from src.atlas.dataset import PairDataset
import torch
ds = PairDataset(tmp_proc)
p, l, y, ti, ci = ds[0]
assert p.shape == (32,)
assert l.shape == (16,)
assert y.shape == (1,)
assert y.item() in (0.0, 1.0)
def test_split_idx(self, tmp_proc):
pytest.importorskip("torch", reason="torch not installed")
from src.atlas.dataset import PairDataset
ds_full = PairDataset(tmp_proc)
n = len(ds_full)
ds_sub = PairDataset(tmp_proc, split_idx=list(range(10)))
assert len(ds_sub) == 10
def test_no_id_leakage_between_splits(self, tmp_proc):
"""Verify train and test pairs share no (transporter, compound) combos."""
pytest.importorskip("torch", reason="torch not installed")
from src.atlas.dataset import PairDataset
from sklearn.model_selection import GroupKFold
import numpy as np
ds = PairDataset(tmp_proc)
n = len(ds)
groups = np.array([ti for ti, _, _ in ds.pairs])
gkf = GroupKFold(n_splits=3)
train_idx, test_idx = next(gkf.split(np.arange(n), groups=groups))
train_t = {ds.pairs[i][0] for i in train_idx}
test_t = {ds.pairs[i][0] for i in test_idx}
assert train_t.isdisjoint(test_t), "Transporter leakage between splits!"
class TestAtlasMLP:
def test_forward_shape(self):
try:
import torch
from src.atlas.model_mlp import AtlasMLP
except ImportError:
pytest.skip("torch not installed")
model = AtlasMLP(d_prot=32, d_lig=16, d_hidden=64, p_drop=0.0)
p = torch.randn(4, 32)
l = torch.randn(4, 16)
out = model(p, l)
assert out.shape == (4,), f"Expected (4,), got {out.shape}"
def test_output_range_after_sigmoid(self):
try:
import torch
from src.atlas.model_mlp import AtlasMLP
except ImportError:
pytest.skip("torch not installed")
model = AtlasMLP(d_prot=32, d_lig=16, d_hidden=64, p_drop=0.0)
model.eval()
with torch.no_grad():
p = torch.randn(100, 32)
l = torch.randn(100, 16)
probs = torch.sigmoid(model(p, l))
assert (probs >= 0).all() and (probs <= 1).all()
def test_deterministic_at_eval(self):
try:
import torch
from src.atlas.model_mlp import AtlasMLP
except ImportError:
pytest.skip("torch not installed")
torch.manual_seed(SEED)
model = AtlasMLP(d_prot=32, d_lig=16).eval()
p = torch.randn(4, 32); l = torch.randn(4, 16)
with torch.no_grad():
o1 = model(p, l)
o2 = model(p, l)
assert torch.allclose(o1, o2)
class TestUtils:
def test_set_seed(self):
from src.utils.io import set_seed
set_seed(42)
a = np.random.randn(10)
set_seed(42)
b = np.random.randn(10)
np.testing.assert_array_equal(a, b)
def test_load_cfg(self):
from src.utils.io import load_cfg
cfg = load_cfg("env/config.yaml")
assert "training" in cfg
assert "model" in cfg
assert cfg["training"]["seed"] == 17
assert cfg["model"]["d_prot"] == 1280
def test_auprc_perfect(self):
from src.utils.metrics import auprc
y = np.array([0, 0, 1, 1])
p = np.array([0.1, 0.1, 0.9, 0.9])
assert auprc(y, p) == pytest.approx(1.0)
def test_bootstrap_ci_width(self):
from src.utils.metrics import bootstrap_ci, auprc
rng = np.random.default_rng(42)
y = rng.integers(0, 2, 200)
p = rng.uniform(0, 1, 200)
point, lo, hi = bootstrap_ci(y, p, auprc, n=200)
assert lo <= point <= hi
assert hi - lo < 0.3