File size: 6,839 Bytes
6d1bbc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
"""Database connection and migration runner for NegBioDB."""

import glob
import os
import sqlite3
from contextlib import contextmanager
from pathlib import Path

_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent
DEFAULT_DB_PATH = _PROJECT_ROOT / "data" / "negbiodb.db"
DEFAULT_MIGRATIONS_DIR = _PROJECT_ROOT / "migrations"


def get_connection(db_path: str | Path) -> sqlite3.Connection:
    """Open a SQLite connection with NegBioDB-standard PRAGMAs.

    Sets WAL journal mode and enables foreign key enforcement.
    The caller is responsible for closing the connection.
    """
    conn = sqlite3.connect(str(db_path))
    conn.execute("PRAGMA journal_mode = WAL")
    conn.execute("PRAGMA foreign_keys = ON")
    return conn


@contextmanager
def connect(db_path: str | Path):
    """Context manager for NegBioDB database connections.

    Usage:
        with connect("data/negbiodb.db") as conn:
            conn.execute("SELECT ...")
    """
    conn = get_connection(db_path)
    try:
        yield conn
    finally:
        conn.close()


def get_applied_versions(conn: sqlite3.Connection) -> set[str]:
    """Return the set of migration versions already applied."""
    try:
        rows = conn.execute("SELECT version FROM schema_migrations").fetchall()
        return {row[0] for row in rows}
    except sqlite3.OperationalError:
        return set()


def run_migrations(db_path: str | Path,
                   migrations_dir: str | Path | None = None) -> list[str]:
    """Apply pending SQL migrations to the database.

    Migrations are .sql files in migrations_dir, sorted by filename prefix.
    Version is extracted from filename: "001_initial_schema.sql" -> "001".
    Already-applied versions (in schema_migrations) are skipped.

    Returns:
        List of version strings applied in this run.
    """
    if migrations_dir is None:
        migrations_dir = DEFAULT_MIGRATIONS_DIR

    db_path = Path(db_path)
    migrations_dir = Path(migrations_dir)

    db_path.parent.mkdir(parents=True, exist_ok=True)

    conn = get_connection(db_path)
    try:
        applied = get_applied_versions(conn)
        migration_files = sorted(glob.glob(str(migrations_dir / "*.sql")))
        newly_applied = []

        for mf in migration_files:
            version = os.path.basename(mf).split("_")[0]
            if version not in applied:
                with open(mf) as f:
                    sql = f.read()
                conn.executescript(sql)
                newly_applied.append(version)

        return newly_applied
    finally:
        conn.close()


def refresh_all_pairs(conn: sqlite3.Connection) -> int:
    """Refresh compound_target_pairs aggregation across ALL sources.

    Deletes all existing pairs and re-aggregates from negative_results,
    merging cross-source data with best confidence tier and result type
    selection.  After inserting pairs, computes compound_degree and
    target_degree using temp-table joins for performance.

    Note: median_pchembl uses AVG (SQLite lacks MEDIAN).  This is
    acceptable since the column is informational, not used for
    thresholding.
    """
    conn.execute("DELETE FROM compound_target_pairs")
    conn.execute(
        """INSERT INTO compound_target_pairs
        (compound_id, target_id, num_assays, num_sources,
         best_confidence, best_result_type, earliest_year,
         median_pchembl, min_activity_value, max_activity_value)
        SELECT
            compound_id,
            target_id,
            COUNT(DISTINCT COALESCE(assay_id, -1)),
            COUNT(DISTINCT source_db),
            CASE MIN(CASE confidence_tier
                WHEN 'gold' THEN 1 WHEN 'silver' THEN 2
                WHEN 'bronze' THEN 3 WHEN 'copper' THEN 4 END)
                WHEN 1 THEN 'gold' WHEN 2 THEN 'silver'
                WHEN 3 THEN 'bronze' WHEN 4 THEN 'copper' END,
            CASE
                WHEN SUM(CASE WHEN result_type = 'hard_negative'
                              THEN 1 ELSE 0 END) > 0
                     THEN 'hard_negative'
                WHEN SUM(CASE WHEN result_type = 'conditional_negative'
                              THEN 1 ELSE 0 END) > 0
                     THEN 'conditional_negative'
                WHEN SUM(CASE WHEN result_type = 'methodological_negative'
                              THEN 1 ELSE 0 END) > 0
                     THEN 'methodological_negative'
                WHEN SUM(CASE WHEN result_type = 'dose_time_negative'
                              THEN 1 ELSE 0 END) > 0
                     THEN 'dose_time_negative'
                ELSE 'hypothesis_negative'
            END,
            MIN(publication_year),
            AVG(pchembl_value),
            MIN(activity_value),
            MAX(activity_value)
        FROM negative_results
        GROUP BY compound_id, target_id"""
    )

    # Compute compound_degree (number of distinct targets per compound)
    conn.execute("DROP TABLE IF EXISTS _cdeg")
    conn.execute(
        """CREATE TEMP TABLE _cdeg (
            compound_id INTEGER PRIMARY KEY, deg INTEGER)"""
    )
    conn.execute(
        """INSERT INTO _cdeg
        SELECT compound_id, COUNT(DISTINCT target_id)
        FROM compound_target_pairs GROUP BY compound_id"""
    )
    conn.execute(
        """UPDATE compound_target_pairs SET compound_degree = (
            SELECT deg FROM _cdeg d
            WHERE d.compound_id = compound_target_pairs.compound_id
        )"""
    )
    conn.execute("DROP TABLE _cdeg")

    # Compute target_degree (number of distinct compounds per target)
    conn.execute("DROP TABLE IF EXISTS _tdeg")
    conn.execute(
        """CREATE TEMP TABLE _tdeg (
            target_id INTEGER PRIMARY KEY, deg INTEGER)"""
    )
    conn.execute(
        """INSERT INTO _tdeg
        SELECT target_id, COUNT(DISTINCT compound_id)
        FROM compound_target_pairs GROUP BY target_id"""
    )
    conn.execute(
        """UPDATE compound_target_pairs SET target_degree = (
            SELECT deg FROM _tdeg d
            WHERE d.target_id = compound_target_pairs.target_id
        )"""
    )
    conn.execute("DROP TABLE _tdeg")

    count = conn.execute("SELECT COUNT(*) FROM compound_target_pairs").fetchone()[0]
    return count


def create_database(db_path: str | Path | None = None,
                    migrations_dir: str | Path | None = None) -> Path:
    """Create a new NegBioDB database by running all migrations.

    Convenience wrapper around run_migrations with sensible defaults.
    """
    if db_path is None:
        db_path = DEFAULT_DB_PATH

    db_path = Path(db_path)
    applied = run_migrations(db_path, migrations_dir)

    if applied:
        print(f"Applied {len(applied)} migration(s): {', '.join(applied)}")
    else:
        print("Database is up to date (no pending migrations).")

    return db_path