ybornachot commited on
Commit
8849cef
·
1 Parent(s): abf6c6b

feat: fine-tuning notebook prototype

Browse files
Files changed (1) hide show
  1. notebooks/03_fine_tuning.ipynb +1475 -0
notebooks/03_fine_tuning.ipynb ADDED
@@ -0,0 +1,1475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Simple PyTorch Tracks Fine-Tuning Pipeline\n",
8
+ "\n",
9
+ "This notebook implements a simple PyTorch-based deep learning pipeline for tracks prediction fine-tuning.\n",
10
+ "\n",
11
+ "## Overview\n",
12
+ "- Loads a HuggingFace model (NTv3) as backbone\n",
13
+ "- Adds a prediction head for bigwig tracks\n",
14
+ "- Fine-tunes on tracks prediction with a simple training loop\n"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {},
21
+ "outputs": [],
22
+ "source": [
23
+ "# Install useful dependencies\n",
24
+ "# !pip install -r requirements.txt"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "execution_count": null,
30
+ "metadata": {},
31
+ "outputs": [
32
+ {
33
+ "name": "stderr",
34
+ "output_type": "stream",
35
+ "text": [
36
+ "/home/y-bornachot/venvs/ntv3-env/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
37
+ " from .autonotebook import tqdm as notebook_tqdm\n"
38
+ ]
39
+ }
40
+ ],
41
+ "source": [
42
+ "# 0. Imports\n",
43
+ "import random\n",
44
+ "import functools\n",
45
+ "from typing import List, Dict, Optional, Callable\n",
46
+ "import pyBigWig\n",
47
+ "from pyfaidx import Fasta\n",
48
+ "\n",
49
+ "import torch\n",
50
+ "import torch.nn as nn\n",
51
+ "import torch.nn.functional as F\n",
52
+ "from torch.utils.data import Dataset, DataLoader\n",
53
+ "from torch.optim import AdamW\n",
54
+ "from torch.optim.lr_scheduler import LambdaLR\n",
55
+ "from transformers import AutoConfig, AutoModelForMaskedLM, AutoTokenizer\n",
56
+ "import numpy as np\n",
57
+ "from torchmetrics import PearsonCorrCoef"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "markdown",
62
+ "metadata": {},
63
+ "source": [
64
+ "# 1. Configuration setup"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": null,
70
+ "metadata": {},
71
+ "outputs": [
72
+ {
73
+ "name": "stdout",
74
+ "output_type": "stream",
75
+ "text": [
76
+ "Using device: cpu\n"
77
+ ]
78
+ }
79
+ ],
80
+ "source": [
81
+ "config = {\n",
82
+ " # Model\n",
83
+ " \"model_name\": \"InstaDeepAI/ntv3_8M_7downsample_pretrained_le_1mb\", # NTv3 model\n",
84
+ " \"pretrained\": True,\n",
85
+ " \n",
86
+ " # Data\n",
87
+ " \"sequence_length\": 1_024,\n",
88
+ " \"bigwig_file_ids\": [\"ENCFF884LDL\"], # Example track names\n",
89
+ " \"keep_target_center_fraction\": 0.375,\n",
90
+ " \n",
91
+ " # Training\n",
92
+ " \"batch_size\": 2,\n",
93
+ " \"learning_rate\": 1e-5,\n",
94
+ " \"schedule\": True,\n",
95
+ " \"num_tokens_warmup\": 10000,\n",
96
+ " \"end_learning_rate\": 5e-5,\n",
97
+ " \"weight_decay\": 0.01,\n",
98
+ " \n",
99
+ " \"num_tokens_training\": 131_072, # Total training tokens budget\n",
100
+ " \"num_tokens_per_update\": 4_096, # Target tokens per optimizer update (batch_size * seq_len * grad_accum)\n",
101
+ " \"num_tokens_per_log\": 8_192, # Tokens between training logs\n",
102
+ " \"num_tokens_per_validation\": 16_384, # Tokens between validations\n",
103
+ " \n",
104
+ " # Validation\n",
105
+ " \"num_validation_samples\": 10,\n",
106
+ " \n",
107
+ " # Loss\n",
108
+ " \"bigwig_loss_weight\": 1.0,\n",
109
+ " \"bigwig_scalar_loss_function\": \"poisson-multinomial\",\n",
110
+ " \"bigwig_shape_loss_coefficient\": 5.0,\n",
111
+ " \n",
112
+ " # General\n",
113
+ " \"seed\": 42,\n",
114
+ " \"device\": \"cuda\" if torch.cuda.is_available() else \"cpu\",\n",
115
+ " \"num_workers\": 4, # Number of worker processes for DataLoader\n",
116
+ "}\n",
117
+ "\n",
118
+ "# Set random seed\n",
119
+ "torch.manual_seed(config[\"seed\"])\n",
120
+ "np.random.seed(config[\"seed\"])\n",
121
+ "\n",
122
+ "device = torch.device(config[\"device\"])\n",
123
+ "print(f\"Using device: {device}\")"
124
+ ]
125
+ },
126
+ {
127
+ "cell_type": "markdown",
128
+ "metadata": {},
129
+ "source": [
130
+ "# 2. Data download"
131
+ ]
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "execution_count": 2,
136
+ "metadata": {},
137
+ "outputs": [
138
+ {
139
+ "name": "stdout",
140
+ "output_type": "stream",
141
+ "text": [
142
+ "--2025-12-09 18:33:50-- https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/latest_assembly_versions/GCF_000001405.40_GRCh38.p14/GCF_000001405.40_GRCh38.p14_genomic.fna.gz\n",
143
+ "Resolving ftp.ncbi.nlm.nih.gov (ftp.ncbi.nlm.nih.gov)... 2607:f220:41e:250::7, 2607:f220:41e:250::11, 2607:f220:41e:250::12, ...\n",
144
+ "Connecting to ftp.ncbi.nlm.nih.gov (ftp.ncbi.nlm.nih.gov)|2607:f220:41e:250::7|:443... connected.\n",
145
+ "HTTP request sent, awaiting response... 200 OK\n",
146
+ "Length: 972898531 (928M) [application/x-gzip]\n",
147
+ "Saving to: 'GCF_000001405.40_GRCh38.p14_genomic.fna.gz'\n",
148
+ "\n",
149
+ "GCF_000001405.40_GR 100%[===================>] 927.83M 18.4MB/s in 51s \n",
150
+ "\n",
151
+ "2025-12-09 18:34:42 (18.0 MB/s) - 'GCF_000001405.40_GRCh38.p14_genomic.fna.gz' saved [972898531/972898531]\n",
152
+ "\n"
153
+ ]
154
+ }
155
+ ],
156
+ "source": [
157
+ "!wget -c https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/latest_assembly_versions/GCF_000001405.40_GRCh38.p14/GCF_000001405.40_GRCh38.p14_genomic.fna.gz \\\n",
158
+ "&& gunzip -f GCF_000001405.40_GRCh38.p14_genomic.fna.gz"
159
+ ]
160
+ },
161
+ {
162
+ "cell_type": "code",
163
+ "execution_count": 16,
164
+ "metadata": {},
165
+ "outputs": [
166
+ {
167
+ "name": "stdout",
168
+ "output_type": "stream",
169
+ "text": [
170
+ "--2025-12-09 22:13:59-- https://www.encodeproject.org/files/ENCFF884LDL/@@download/ENCFF884LDL\n",
171
+ "Resolving www.encodeproject.org (www.encodeproject.org)... 34.211.244.144\n",
172
+ "Connecting to www.encodeproject.org (www.encodeproject.org)|34.211.244.144|:443... connected.\n",
173
+ "HTTP request sent, awaiting response... 404 Not Found\n",
174
+ "2025-12-09 22:14:00 ERROR 404: Not Found.\n",
175
+ "\n"
176
+ ]
177
+ }
178
+ ],
179
+ "source": [
180
+ "!wget -O ENCFF884LDL \"$(curl -s https://www.encodeproject.org/files/ENCFF884LDL/@@download/ENCFF884LDL | sed -n 's/.*href=\\\"\\([^\\\"]*ENCFF884LDL[^\\\"]*\\)\\\".*/\\1/p')\" \\\n",
181
+ "&& echo \"Downloaded ENCFF884LDL\""
182
+ ]
183
+ },
184
+ {
185
+ "cell_type": "code",
186
+ "execution_count": 4,
187
+ "metadata": {},
188
+ "outputs": [
189
+ {
190
+ "name": "stdout",
191
+ "output_type": "stream",
192
+ "text": [
193
+ "--2025-12-09 18:41:24-- https://www.encodeproject.org/files/ENCFF884LDL/@@download/ENCFF884LDL.bigWig\n",
194
+ "Resolving www.encodeproject.org (www.encodeproject.org)... 34.211.244.144\n",
195
+ "Connecting to www.encodeproject.org (www.encodeproject.org)|34.211.244.144|:443... connected.\n",
196
+ "HTTP request sent, awaiting response... 307 Temporary Redirect\n",
197
+ "Location: https://encode-public.s3.amazonaws.com/2020/09/19/425880b6-b323-4ee2-95ce-56bdd088d126/ENCFF884LDL.bigWig?response-content-disposition=attachment%3B%20filename%3DENCFF884LDL.bigWig&AWSAccessKeyId=ASIATGZNGCNX3AXUNFS3&Signature=Ca%2Bz1PL7zdbGzyRggtvN686q4oE%3D&x-amz-security-token=IQoJb3JpZ2luX2VjEPr%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLXdlc3QtMiJGMEQCIAggXesBwHBuGSivVx0RvF5f2vZbk09TPBdf%2FYJUt%2BLWAiAKrh58c%2Bm%2F%2ByrujtQxgltFGzGo5qXSWv%2B0zPaa3gKUTCq8BQjC%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F8BEAAaDDIyMDc0ODcxNDg2MyIMa%2FegIMq%2By2ql10quKpAFATT6r6oWCSXqrBd2gfR8S1QNvY%2BKjvbr%2BvS2ifnF5NqfByJgZxdXVC65WI8fUYqgspTQB5Az%2BE5O4jR8EnFBv%2FjO6DqrWkQQOUsHUFFGXJjarvCPdYjqJmV9SyeTuzNeV0xwFX%2Fleq1%2F4f3eAV81Nv5J%2B8UeHYn5GxtwS%2BjhzVsCJ8tqAo6yRi0wPteU8nb8yLJb%2F%2FWvQLZce7Yc9%2BZkuxKKGoEKQstRSGLCh%2FjtnNfvGp0x20mj5C7wsk61LHBJlNV3KVD7qZHZ57N1CBx5XNuJ%2BkJp6eBU8htM%2FY73tBkp4w5xHNyI5F%2B7JxjDDjo4YOikyLKk7tnTmWfC2lEGXXx33D8xyBxi4oNnK76R0N296GRSHS22esmo12YGK5QNvVbU4SuZUUWjVcrGFqtN%2F7ff1K%2FdqiRyh6TDvXbOUf%2Bk691iqwRY34LbXoJsOzcux5wwQGbHfcSdGrp2Y3KtpDGEdHiiTVHJeHi9pxBvlwvmjM5lXjJjtjOFqXIF%2F%2FygXdl4wUIMMsuinPWpA5xVIk4kg1Bv5XVNuqcPJl7Dl2ZdRzQvwc0Xl5dBL39ZAz9MvCffPV2Fb3hiL5vIQJ2ySdDnqXDhTuUsWGy81MltoznoOVbvuu64FAEp4GdwnwRH1ILlVOKQ1bHR5FSHqb8OFVqAQezRljaJY2ds1J2HMAJ2AJtg3k8XNQScR%2FutxWkI3pYDnAQQQkHHw3aFWNNYbQMfyAAptJohtNGClRoTiepBUckqxpgvMXwEOTJzpUEi0sMIxMkXMWa3ncKFHQAP6P3eKxBOjW8s%2F3BXwRlbgsNdQvqDUdf2dD5KLeHfpyKbdPnG0C6yZAxBF%2Fk4jO1F2F4o533RZGF8Ww7qMc5Ij2ww%2BbPhyQY6sgG2uZfWDKxd1yRNOufiZW%2FAtmcEQg%2BtzoWnq6TxyhU0OCY%2BN7xR8HO4UaT0Od0C06PHugNQCUS6eJusR0IfSRJ7ozZJUomphTeCPXw1G%2B6RVsni%2B9lGE8SlRLTMzNvzQJv8oJNZsoi6DVWlK%2FGt7TgwxSKH8%2BVQmal7nXUqR9f8Dh7CF1KppbVtNiGDaxTIN%2F7j%2BwIFrKHIMOYhC1dt5gPFnIQwnj1%2BuyEw5FWF3hKIkD%2Bc&Expires=1765431685 [following]\n",
198
+ "--2025-12-09 18:41:25-- https://encode-public.s3.amazonaws.com/2020/09/19/425880b6-b323-4ee2-95ce-56bdd088d126/ENCFF884LDL.bigWig?response-content-disposition=attachment%3B%20filename%3DENCFF884LDL.bigWig&AWSAccessKeyId=ASIATGZNGCNX3AXUNFS3&Signature=Ca%2Bz1PL7zdbGzyRggtvN686q4oE%3D&x-amz-security-token=IQoJb3JpZ2luX2VjEPr%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLXdlc3QtMiJGMEQCIAggXesBwHBuGSivVx0RvF5f2vZbk09TPBdf%2FYJUt%2BLWAiAKrh58c%2Bm%2F%2ByrujtQxgltFGzGo5qXSWv%2B0zPaa3gKUTCq8BQjC%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F8BEAAaDDIyMDc0ODcxNDg2MyIMa%2FegIMq%2By2ql10quKpAFATT6r6oWCSXqrBd2gfR8S1QNvY%2BKjvbr%2BvS2ifnF5NqfByJgZxdXVC65WI8fUYqgspTQB5Az%2BE5O4jR8EnFBv%2FjO6DqrWkQQOUsHUFFGXJjarvCPdYjqJmV9SyeTuzNeV0xwFX%2Fleq1%2F4f3eAV81Nv5J%2B8UeHYn5GxtwS%2BjhzVsCJ8tqAo6yRi0wPteU8nb8yLJb%2F%2FWvQLZce7Yc9%2BZkuxKKGoEKQstRSGLCh%2FjtnNfvGp0x20mj5C7wsk61LHBJlNV3KVD7qZHZ57N1CBx5XNuJ%2BkJp6eBU8htM%2FY73tBkp4w5xHNyI5F%2B7JxjDDjo4YOikyLKk7tnTmWfC2lEGXXx33D8xyBxi4oNnK76R0N296GRSHS22esmo12YGK5QNvVbU4SuZUUWjVcrGFqtN%2F7ff1K%2FdqiRyh6TDvXbOUf%2Bk691iqwRY34LbXoJsOzcux5wwQGbHfcSdGrp2Y3KtpDGEdHiiTVHJeHi9pxBvlwvmjM5lXjJjtjOFqXIF%2F%2FygXdl4wUIMMsuinPWpA5xVIk4kg1Bv5XVNuqcPJl7Dl2ZdRzQvwc0Xl5dBL39ZAz9MvCffPV2Fb3hiL5vIQJ2ySdDnqXDhTuUsWGy81MltoznoOVbvuu64FAEp4GdwnwRH1ILlVOKQ1bHR5FSHqb8OFVqAQezRljaJY2ds1J2HMAJ2AJtg3k8XNQScR%2FutxWkI3pYDnAQQQkHHw3aFWNNYbQMfyAAptJohtNGClRoTiepBUckqxpgvMXwEOTJzpUEi0sMIxMkXMWa3ncKFHQAP6P3eKxBOjW8s%2F3BXwRlbgsNdQvqDUdf2dD5KLeHfpyKbdPnG0C6yZAxBF%2Fk4jO1F2F4o533RZGF8Ww7qMc5Ij2ww%2BbPhyQY6sgG2uZfWDKxd1yRNOufiZW%2FAtmcEQg%2BtzoWnq6TxyhU0OCY%2BN7xR8HO4UaT0Od0C06PHugNQCUS6eJusR0IfSRJ7ozZJUomphTeCPXw1G%2B6RVsni%2B9lGE8SlRLTMzNvzQJv8oJNZsoi6DVWlK%2FGt7TgwxSKH8%2BVQmal7nXUqR9f8Dh7CF1KppbVtNiGDaxTIN%2F7j%2BwIFrKHIMOYhC1dt5gPFnIQwnj1%2BuyEw5FWF3hKIkD%2Bc&Expires=1765431685\n",
199
+ "Resolving encode-public.s3.amazonaws.com (encode-public.s3.amazonaws.com)... 3.5.81.13, 52.92.211.217, 52.92.197.57, ...\n",
200
+ "Connecting to encode-public.s3.amazonaws.com (encode-public.s3.amazonaws.com)|3.5.81.13|:443... connected.\n",
201
+ "HTTP request sent, awaiting response... 200 OK\n",
202
+ "Length: 568139478 (542M) [binary/octet-stream]\n",
203
+ "Saving to: 'ENCFF884LDL.bigWig'\n",
204
+ "\n",
205
+ "ENCFF884LDL.bigWig 100%[===================>] 541.82M 9.64MB/s in 79s \n",
206
+ "\n",
207
+ "2025-12-09 18:42:45 (6.88 MB/s) - 'ENCFF884LDL.bigWig' saved [568139478/568139478]\n",
208
+ "\n"
209
+ ]
210
+ }
211
+ ],
212
+ "source": [
213
+ "!wget -c https://www.encodeproject.org/files/ENCFF884LDL/@@download/ENCFF884LDL.bigWig"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "code",
218
+ "execution_count": 5,
219
+ "metadata": {},
220
+ "outputs": [],
221
+ "source": [
222
+ "chrom_mapping = {\n",
223
+ " \"chr1\": \"NC_000001.11\",\n",
224
+ " \"chr2\": \"NC_000002.12\",\n",
225
+ " \"chr3\": \"NC_000003.12\",\n",
226
+ " \"chr4\": \"NC_000004.12\",\n",
227
+ " \"chr5\": \"NC_000005.10\",\n",
228
+ " \"chr6\": \"NC_000006.12\",\n",
229
+ " \"chr7\": \"NC_000007.14\",\n",
230
+ " \"chr8\": \"NC_000008.11\",\n",
231
+ " \"chr9\": \"NC_000009.12\",\n",
232
+ " \"chr10\": \"NC_000010.11\",\n",
233
+ " \"chr11\": \"NC_000011.10\",\n",
234
+ " \"chr12\": \"NC_000012.12\",\n",
235
+ " \"chr13\": \"NC_000013.11\",\n",
236
+ " \"chr14\": \"NC_000014.9\",\n",
237
+ " \"chr15\": \"NC_000015.10\",\n",
238
+ " \"chr16\": \"NC_000016.10\",\n",
239
+ " \"chr17\": \"NC_000017.11\",\n",
240
+ " \"chr18\": \"NC_000018.10\",\n",
241
+ " \"chr19\": \"NC_000019.10\",\n",
242
+ " \"chr20\": \"NC_000020.11\",\n",
243
+ " \"chr21\": \"NC_000021.9\",\n",
244
+ " \"chr22\": \"NC_000022.11\",\n",
245
+ " \"chrX\": \"NC_000023.11\",\n",
246
+ " \"chrY\": \"NC_000024.10\",\n",
247
+ " # mitochondrial\n",
248
+ " \"chrM\": \"NC_012920.1\",\n",
249
+ " \"chrMT\": \"NC_012920.1\",\n",
250
+ "}\n",
251
+ "\n",
252
+ "chrom_splits = {\n",
253
+ " \"train\": [f\"chr{i}\" for i in range(1, 19)],\n",
254
+ " \"val\": [f\"chr{i}\" for i in range(19, 21)],\n",
255
+ " \"test\": [f\"chr{i}\" for i in range(21, 23)],\n",
256
+ "}"
257
+ ]
258
+ },
259
+ {
260
+ "cell_type": "markdown",
261
+ "metadata": {},
262
+ "source": [
263
+ "# 3. Model and tokenizer setup"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "code",
268
+ "execution_count": 71,
269
+ "metadata": {},
270
+ "outputs": [],
271
+ "source": [
272
+ "class LinearHead(nn.Module):\n",
273
+ " \"\"\"A linear head that predicts one scalar value per track.\"\"\"\n",
274
+ " def __init__(self, embed_dim: int, num_labels: int):\n",
275
+ " super().__init__()\n",
276
+ " self.layer_norm = nn.LayerNorm(embed_dim)\n",
277
+ " self.head = nn.Linear(embed_dim, num_labels)\n",
278
+ " \n",
279
+ " def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
280
+ " x = self.layer_norm(x)\n",
281
+ " x = self.head(x)\n",
282
+ " x = F.softplus(x) # Ensure positive values\n",
283
+ " return x\n",
284
+ "\n",
285
+ "\n",
286
+ "class HFModelWithHead(nn.Module):\n",
287
+ " \"\"\"Simple model wrapper: HF backbone + bigwig head.\"\"\"\n",
288
+ " \n",
289
+ " def __init__(\n",
290
+ " self,\n",
291
+ " model_name: str,\n",
292
+ " bigwig_track_names: List[str],\n",
293
+ " keep_target_center_fraction: float = 0.375,\n",
294
+ " pretrained: bool = True,\n",
295
+ " ):\n",
296
+ " super().__init__()\n",
297
+ " \n",
298
+ " # Load config and model\n",
299
+ " self.config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)\n",
300
+ "\n",
301
+ " if pretrained:\n",
302
+ " self.backbone = AutoModelForMaskedLM.from_pretrained(\n",
303
+ " model_name, \n",
304
+ " trust_remote_code=True,\n",
305
+ " config=self.config\n",
306
+ " )\n",
307
+ " else:\n",
308
+ " self.backbone = AutoModelForMaskedLM.from_config(\n",
309
+ " self.config, \n",
310
+ " trust_remote_code=True\n",
311
+ " )\n",
312
+ " \n",
313
+ " self.keep_target_center_fraction = keep_target_center_fraction\n",
314
+ "\n",
315
+ " if hasattr(self.config, \"embed_dim\"):\n",
316
+ " embed_dim = self.config.embed_dim\n",
317
+ " else:\n",
318
+ " raise ValueError(f\"Could not determine embed_dim for {model_name}\")\n",
319
+ " \n",
320
+ " # Bigwig head (NTv3 outputs at single-nucleotide resolution)\n",
321
+ " self.bigwig_head = LinearHead(embed_dim, len(bigwig_track_names))\n",
322
+ " self.model_name = model_name\n",
323
+ " \n",
324
+ " def forward(self, tokens: torch.Tensor, **kwargs) -> Dict[str, torch.Tensor]:\n",
325
+ " # Forward through backbone\n",
326
+ " outputs = self.backbone(input_ids=tokens)\n",
327
+ " embedding = outputs.hidden_states[-1] # Last hidden state\n",
328
+ " \n",
329
+ " # Crop to center fraction\n",
330
+ " if self.keep_target_center_fraction < 1.0:\n",
331
+ " seq_len = embedding.shape[1]\n",
332
+ " target_offset = int(seq_len * (1 - self.keep_target_center_fraction) // 2)\n",
333
+ " target_length = seq_len - 2 * target_offset\n",
334
+ " embedding = embedding[:, target_offset:target_offset + target_length, :]\n",
335
+ " \n",
336
+ " # Predict bigwig tracks\n",
337
+ " bigwig_logits = self.bigwig_head(embedding)\n",
338
+ " \n",
339
+ " return {\"bigwig_tracks_logits\": bigwig_logits}"
340
+ ]
341
+ },
342
+ {
343
+ "cell_type": "code",
344
+ "execution_count": 72,
345
+ "metadata": {},
346
+ "outputs": [
347
+ {
348
+ "name": "stdout",
349
+ "output_type": "stream",
350
+ "text": [
351
+ "Model loaded: InstaDeepAI/ntv3_8M_7downsample_pretrained_le_1mb\n",
352
+ "Number of bigwig tracks: 1\n",
353
+ "Model parameters: 7,693,244\n"
354
+ ]
355
+ }
356
+ ],
357
+ "source": [
358
+ "# Load tokenizer\n",
359
+ "tokenizer = AutoTokenizer.from_pretrained(config[\"model_name\"], trust_remote_code=True)\n",
360
+ "if tokenizer.pad_token is None:\n",
361
+ " if tokenizer.eos_token is not None:\n",
362
+ " tokenizer.pad_token = tokenizer.eos_token\n",
363
+ " else:\n",
364
+ " tokenizer.add_special_tokens({\"pad_token\": \"[PAD]\"})\n",
365
+ "\n",
366
+ "# Create model\n",
367
+ "model = HFModelWithHead(\n",
368
+ " model_name=config[\"model_name\"],\n",
369
+ " bigwig_track_names=config[\"bigwig_file_ids\"],\n",
370
+ " keep_target_center_fraction=config[\"keep_target_center_fraction\"],\n",
371
+ " pretrained=config[\"pretrained\"],\n",
372
+ ")\n",
373
+ "model = model.to(device)\n",
374
+ "model.train()\n",
375
+ "\n",
376
+ "print(f\"Model loaded: {config['model_name']}\")\n",
377
+ "print(f\"Number of bigwig tracks: {len(config['bigwig_file_ids'])}\")\n",
378
+ "print(f\"Model parameters: {sum(p.numel() for p in model.parameters()):,}\")"
379
+ ]
380
+ },
381
+ {
382
+ "cell_type": "markdown",
383
+ "metadata": {},
384
+ "source": [
385
+ "# 4. Data loading"
386
+ ]
387
+ },
388
+ {
389
+ "cell_type": "code",
390
+ "execution_count": null,
391
+ "metadata": {},
392
+ "outputs": [],
393
+ "source": [
394
+ "class GenomeBigWigDataset(Dataset):\n",
395
+ " \"\"\"\n",
396
+ " Random genomic windows from a reference genome + bigWig signal.\n",
397
+ "\n",
398
+ " Each sample:\n",
399
+ " - picks a chromosome from `chroms`,\n",
400
+ " - picks a random window of length `window_size`,\n",
401
+ " - returns (sequence, signal, chrom, start, end).\n",
402
+ "\n",
403
+ " Args\n",
404
+ " ----\n",
405
+ " fasta_path : str\n",
406
+ " Path to the reference genome FASTA (e.g. hg38.fna).\n",
407
+ " bigwig_path : str\n",
408
+ " Path to the bigWig file (e.g. ENCFF884LDL.bigWig).\n",
409
+ " chroms : List[str]\n",
410
+ " Chromosome names as they appear in the bigWig (e.g. [\"chr1\", \"chr2\", ...]).\n",
411
+ " window_size : int\n",
412
+ " Length of each random window (in bp).\n",
413
+ " num_samples : int\n",
414
+ " Number of samples the dataset will provide (len(dataset)).\n",
415
+ " chrom_mapping : Optional[Dict[str, str]]\n",
416
+ " Optional mapping from bigWig chrom name -> FASTA chrom name.\n",
417
+ " If None, assumes the same names in both.\n",
418
+ " Example for hg38 RefSeq FASTA:\n",
419
+ " {\n",
420
+ " \"chr1\": \"NC_000001.11\",\n",
421
+ " \"chr2\": \"NC_000002.12\",\n",
422
+ " ...\n",
423
+ " }\n",
424
+ " \"\"\"\n",
425
+ "\n",
426
+ " def __init__(\n",
427
+ " self,\n",
428
+ " fasta_path: str,\n",
429
+ " bigwig_path_list: list[str],\n",
430
+ " chroms: List[str],\n",
431
+ " sequence_length: int,\n",
432
+ " num_samples: int,\n",
433
+ " tokenizer: AutoTokenizer,\n",
434
+ " chrom_mapping: Optional[Dict[str, str]] = None,\n",
435
+ " keep_target_center_fraction: float = 1.0,\n",
436
+ " num_tracks: int = 1,\n",
437
+ " ):\n",
438
+ " super().__init__()\n",
439
+ "\n",
440
+ " self.fasta = Fasta(fasta_path, as_raw=True, sequence_always_upper=True)\n",
441
+ " self.bw_list = [\n",
442
+ " pyBigWig.open(bigwig_path)\n",
443
+ " for bigwig_path in bigwig_path_list\n",
444
+ " ]\n",
445
+ " self.sequence_length = sequence_length\n",
446
+ " self.num_samples = num_samples\n",
447
+ " self.tokenizer = tokenizer\n",
448
+ " self.keep_target_center_fraction = keep_target_center_fraction\n",
449
+ " self.num_tracks = num_tracks\n",
450
+ "\n",
451
+ " self.chroms = chroms\n",
452
+ " self.chrom_mapping = chrom_mapping or {c: c for c in chroms}\n",
453
+ "\n",
454
+ " # Intersect lengths between FASTA and bigWig for safety\n",
455
+ " bw_chrom_lengths = self.bw_list[0].chroms() # dict: chrom -> length\n",
456
+ "\n",
457
+ " self.valid_chroms = []\n",
458
+ " self.chrom_lengths = {}\n",
459
+ "\n",
460
+ " for c in chroms:\n",
461
+ " if c not in bw_chrom_lengths:\n",
462
+ " continue\n",
463
+ " fa_name = self.chrom_mapping.get(c, c)\n",
464
+ " if fa_name not in self.fasta:\n",
465
+ " continue\n",
466
+ "\n",
467
+ " fa_len = len(self.fasta[fa_name])\n",
468
+ " bw_len = bw_chrom_lengths[c]\n",
469
+ " L = min(fa_len, bw_len)\n",
470
+ "\n",
471
+ " if L > self.sequence_length:\n",
472
+ " self.valid_chroms.append(c)\n",
473
+ " self.chrom_lengths[c] = L\n",
474
+ "\n",
475
+ " if not self.valid_chroms:\n",
476
+ " raise ValueError(\"No valid chromosomes after intersecting FASTA and bigWig.\")\n",
477
+ "\n",
478
+ " def __len__(self):\n",
479
+ " return self.num_samples\n",
480
+ "\n",
481
+ " def __getitem__(self, idx):\n",
482
+ " # Ignore idx, sample randomly\n",
483
+ " chrom = random.choice(self.valid_chroms)\n",
484
+ " chrom_len = self.chrom_lengths[chrom]\n",
485
+ "\n",
486
+ " max_start = chrom_len - self.sequence_length\n",
487
+ " start = random.randint(0, max_start)\n",
488
+ " end = start + self.sequence_length\n",
489
+ "\n",
490
+ " # FASTA chromosome name may differ\n",
491
+ " fa_chrom = self.chrom_mapping.get(chrom, chrom)\n",
492
+ "\n",
493
+ " # Sequence\n",
494
+ " seq = self.fasta[fa_chrom][start:end] # string slice\n",
495
+ " tokens = self.tokenizer(\n",
496
+ " seq,\n",
497
+ " return_tensors=\"pt\", # Returns a dict of PyTorch tensors\n",
498
+ " )[\"input_ids\"][0]\n",
499
+ " # The 'input_ids' field contains the tokenized sequence.\n",
500
+ " # For a single input string, its shape is typically (1, len(seq))\n",
501
+ "\n",
502
+ " # Signal from bigWig tracks (numpy array) -> torch tensor\n",
503
+ " bigwig_targets = [\n",
504
+ " self.bw_list[i].values(chrom, start, end, numpy=True)\n",
505
+ " for i in range(len(self.bw_list))\n",
506
+ " ]\n",
507
+ " # pyBigWig returns NaN where no data; turn NaN into 0\n",
508
+ " bigwig_targets = torch.tensor(bigwig_targets, dtype=torch.float32)\n",
509
+ " bigwig_targets = torch.nan_to_num(bigwig_targets, nan=0.0)\n",
510
+ " \n",
511
+ " # Crop targets to center fraction\n",
512
+ " if self.keep_target_center_fraction < 1.0:\n",
513
+ " seq_len = bigwig_targets.shape[0]\n",
514
+ " target_offset = int(seq_len * (1 - self.keep_target_center_fraction) // 2)\n",
515
+ " target_length = seq_len - 2 * target_offset\n",
516
+ " bigwig_targets = bigwig_targets[target_offset:target_offset + target_length]\n",
517
+ "\n",
518
+ " sample = {\n",
519
+ " \"tokens\": tokens,\n",
520
+ " \"bigwig_targets\": bigwig_targets,\n",
521
+ " \"chrom\": chrom,\n",
522
+ " \"start\": start,\n",
523
+ " \"end\": end,\n",
524
+ " }\n",
525
+ " return sample"
526
+ ]
527
+ },
528
+ {
529
+ "cell_type": "code",
530
+ "execution_count": null,
531
+ "metadata": {},
532
+ "outputs": [
533
+ {
534
+ "name": "stdout",
535
+ "output_type": "stream",
536
+ "text": [
537
+ "Train samples: 100\n",
538
+ "Val samples: 10\n"
539
+ ]
540
+ }
541
+ ],
542
+ "source": [
543
+ "fasta_path = \"./GCF_000001405.40_GRCh38.p14_genomic.fna\"\n",
544
+ "bigwig_path_list = [\"./ENCFF884LDL.bigWig\"]\n",
545
+ "\n",
546
+ "create_dataset_fn = functools.partial(\n",
547
+ " GenomeBigWigDataset,\n",
548
+ " fasta_path=fasta_path,\n",
549
+ " bigwig_path_list=bigwig_path_list,\n",
550
+ " sequence_length=config[\"sequence_length\"],\n",
551
+ " tokenizer=tokenizer,\n",
552
+ " chrom_mapping=chrom_mapping,\n",
553
+ " keep_target_center_fraction=config[\"keep_target_center_fraction\"],\n",
554
+ " num_tracks=len(config[\"bigwig_file_ids\"]),\n",
555
+ ")\n",
556
+ "\n",
557
+ "train_dataset = create_dataset_fn(\n",
558
+ " chroms=chrom_splits[\"train\"],\n",
559
+ " num_samples=100,\n",
560
+ ")\n",
561
+ "\n",
562
+ "val_dataset = create_dataset_fn(\n",
563
+ " chroms=chrom_splits[\"val\"],\n",
564
+ " num_samples=config[\"num_validation_samples\"],\n",
565
+ ")\n",
566
+ "\n",
567
+ "test_dataset = create_dataset_fn(\n",
568
+ " chroms=chrom_splits[\"test\"],\n",
569
+ " num_samples=config[\"num_validation_samples\"],\n",
570
+ ")\n",
571
+ "\n",
572
+ "# Create dataloaders\n",
573
+ "train_loader = DataLoader(\n",
574
+ " train_dataset,\n",
575
+ " batch_size=config[\"batch_size\"],\n",
576
+ " shuffle=True,\n",
577
+ " num_workers=config[\"num_workers\"],\n",
578
+ ")\n",
579
+ "\n",
580
+ "val_loader = DataLoader(\n",
581
+ " val_dataset,\n",
582
+ " batch_size=config[\"batch_size\"],\n",
583
+ " shuffle=False,\n",
584
+ " num_workers=config[\"num_workers\"],\n",
585
+ ")\n",
586
+ "\n",
587
+ "test_loader = DataLoader(\n",
588
+ " test_dataset,\n",
589
+ " batch_size=config[\"batch_size\"],\n",
590
+ " shuffle=False,\n",
591
+ " num_workers=config[\"num_workers\"],\n",
592
+ ")\n",
593
+ "\n",
594
+ "print(f\"Train samples: {len(train_dataset)}\")\n",
595
+ "print(f\"Val samples: {len(val_dataset)}\")\n",
596
+ "print(f\"Test samples: {len(test_dataset)}\")"
597
+ ]
598
+ },
599
+ {
600
+ "cell_type": "markdown",
601
+ "metadata": {},
602
+ "source": [
603
+ "# 5. Optimizer and Learning Rate Scheduler"
604
+ ]
605
+ },
606
+ {
607
+ "cell_type": "code",
608
+ "execution_count": 59,
609
+ "metadata": {},
610
+ "outputs": [],
611
+ "source": [
612
+ "# Learning rate scheduler utils\n",
613
+ "def _modified_square_decay(\n",
614
+ " current_step: int,\n",
615
+ " lr_at_step_0: float,\n",
616
+ " lr_peak_after_warmup: float,\n",
617
+ " num_warmup_steps: int,\n",
618
+ " num_training_steps: int,\n",
619
+ ") -> float:\n",
620
+ " \"\"\"\n",
621
+ " Learning rate schedule with linear warmup and square root decay.\n",
622
+ " Simplified version of the pipeline's scheduler.\n",
623
+ " \"\"\"\n",
624
+ " if current_step < num_warmup_steps:\n",
625
+ " # Linear warmup\n",
626
+ " return lr_at_step_0 + (lr_peak_after_warmup - lr_at_step_0) * (current_step / num_warmup_steps)\n",
627
+ " else:\n",
628
+ " # Square root decay\n",
629
+ " progress = (current_step - num_warmup_steps) / (num_training_steps - num_warmup_steps)\n",
630
+ " decay_factor = (1.0 - progress) ** 0.5\n",
631
+ " return lr_peak_after_warmup * decay_factor"
632
+ ]
633
+ },
634
+ {
635
+ "cell_type": "code",
636
+ "execution_count": 60,
637
+ "metadata": {},
638
+ "outputs": [
639
+ {
640
+ "name": "stdout",
641
+ "output_type": "stream",
642
+ "text": [
643
+ "Gradient accumulation steps: 2\n",
644
+ "Effective batch size: 4\n",
645
+ "Effective tokens per update: 4096\n",
646
+ "\n",
647
+ "Training constants:\n",
648
+ " Total training steps: 32\n",
649
+ " Log training metrics every: 2 steps\n",
650
+ " Run validation every: 4 steps\n",
651
+ " Warmup steps: 3\n",
652
+ "\n",
653
+ "Optimizer setup:\n",
654
+ " Initial LR: 1e-05\n",
655
+ " Peak LR: 5e-05\n"
656
+ ]
657
+ }
658
+ ],
659
+ "source": [
660
+ "# Calculate gradient accumulation steps and effective batch size\n",
661
+ "num_devices = 1 # Single device for now\n",
662
+ "sequence_length = config[\"sequence_length\"]\n",
663
+ "batch_size = config[\"batch_size\"]\n",
664
+ "\n",
665
+ "# Calculate gradient accumulation steps\n",
666
+ "num_accumulation_gradient = max(1, int(config[\"num_tokens_per_update\"] // (batch_size * num_devices * sequence_length)))\n",
667
+ "\n",
668
+ "# Calculate effective batch size and tokens per update\n",
669
+ "effective_batch_size = batch_size * num_devices * num_accumulation_gradient\n",
670
+ "effective_num_tokens_per_update = effective_batch_size * sequence_length\n",
671
+ "\n",
672
+ "print(f\"Gradient accumulation steps: {num_accumulation_gradient}\")\n",
673
+ "print(f\"Effective batch size: {effective_batch_size}\")\n",
674
+ "print(f\"Effective tokens per update: {effective_num_tokens_per_update}\")\n",
675
+ "\n",
676
+ "# Compute logging constants (based on deepspeed pipeline: compute_logging_constants)\n",
677
+ "num_train_samples = len(train_dataset)\n",
678
+ "num_tokens_per_update = effective_num_tokens_per_update # Same as effective_num_tokens_per_update\n",
679
+ "\n",
680
+ "# Total training steps based on token budget\n",
681
+ "num_steps_training = config[\"num_tokens_training\"] // num_tokens_per_update\n",
682
+ "\n",
683
+ "# Steps for logging and validation\n",
684
+ "log_train_step = int(np.ceil(config[\"num_tokens_per_log\"] / num_tokens_per_update))\n",
685
+ "log_validation_step = int(np.ceil(config[\"num_tokens_per_validation\"] / num_tokens_per_update))\n",
686
+ "\n",
687
+ "# Warmup steps\n",
688
+ "num_warmup_steps = max(1, int(np.ceil(config[\"num_tokens_warmup\"] / effective_num_tokens_per_update)))\n",
689
+ "\n",
690
+ "print(f\"\\nTraining constants:\")\n",
691
+ "print(f\" Total training steps: {num_steps_training}\")\n",
692
+ "print(f\" Log training metrics every: {log_train_step} steps\")\n",
693
+ "print(f\" Run validation every: {log_validation_step} steps\")\n",
694
+ "print(f\" Warmup steps: {num_warmup_steps}\")\n",
695
+ "\n",
696
+ "# Setup optimizer\n",
697
+ "optimizer = AdamW(\n",
698
+ " model.parameters(),\n",
699
+ " lr=config[\"end_learning_rate\"] if config[\"schedule\"] else config[\"learning_rate\"],\n",
700
+ " weight_decay=config[\"weight_decay\"],\n",
701
+ ")\n",
702
+ "\n",
703
+ "# Setup scheduler\n",
704
+ "if config[\"schedule\"]:\n",
705
+ " lr_scheduler_fn = lambda step: _modified_square_decay(\n",
706
+ " current_step=step,\n",
707
+ " lr_at_step_0=config[\"learning_rate\"],\n",
708
+ " lr_peak_after_warmup=config[\"end_learning_rate\"],\n",
709
+ " num_warmup_steps=num_warmup_steps,\n",
710
+ " num_training_steps=num_steps_training,\n",
711
+ " )\n",
712
+ " scheduler = LambdaLR(optimizer, lr_lambda=lr_scheduler_fn)\n",
713
+ "else:\n",
714
+ " scheduler = None\n",
715
+ "\n",
716
+ "print(f\"\\nOptimizer setup:\")\n",
717
+ "print(f\" Initial LR: {config['learning_rate']}\")\n",
718
+ "print(f\" Peak LR: {config['end_learning_rate']}\")"
719
+ ]
720
+ },
721
+ {
722
+ "cell_type": "markdown",
723
+ "metadata": {},
724
+ "source": [
725
+ "# 6. Metrics setup (using TorchMetrics)"
726
+ ]
727
+ },
728
+ {
729
+ "cell_type": "code",
730
+ "execution_count": null,
731
+ "metadata": {},
732
+ "outputs": [],
733
+ "source": [
734
+ "class TracksMetrics:\n",
735
+ " \"\"\"Simple metrics tracker for tracks prediction with both scaled and raw metrics.\"\"\"\n",
736
+ " \n",
737
+ " def __init__(self, track_names: List[str]):\n",
738
+ " self.track_names = track_names\n",
739
+ " self.num_tracks = len(track_names)\n",
740
+ " # Scaled metrics: comparing scaled targets with scaled predictions\n",
741
+ " self.pearson_metrics_scaled = [\n",
742
+ " PearsonCorrCoef().to(device) for _ in range(self.num_tracks)\n",
743
+ " ]\n",
744
+ " # Raw metrics: comparing raw targets with unscaled predictions\n",
745
+ " self.pearson_metrics_raw = [\n",
746
+ " PearsonCorrCoef().to(device) for _ in range(self.num_tracks)\n",
747
+ " ]\n",
748
+ " self.losses = []\n",
749
+ " \n",
750
+ " def reset(self):\n",
751
+ " for metric in self.pearson_metrics_scaled:\n",
752
+ " metric.reset()\n",
753
+ " for metric in self.pearson_metrics_raw:\n",
754
+ " metric.reset()\n",
755
+ " self.losses = []\n",
756
+ " \n",
757
+ " def update(\n",
758
+ " self, \n",
759
+ " predictions_scaled: torch.Tensor, \n",
760
+ " targets_scaled: torch.Tensor,\n",
761
+ " predictions_raw: torch.Tensor,\n",
762
+ " targets_raw: torch.Tensor,\n",
763
+ " loss: float\n",
764
+ " ):\n",
765
+ " \"\"\"\n",
766
+ " Update both scaled and raw metrics.\n",
767
+ " Args:\n",
768
+ " predictions_scaled: (batch, seq_len, num_tracks) - scaled predictions\n",
769
+ " targets_scaled: (batch, seq_len, num_tracks) - scaled targets\n",
770
+ " predictions_raw: (batch, seq_len, num_tracks) - raw/unscaled predictions\n",
771
+ " targets_raw: (batch, seq_len, num_tracks) - raw targets\n",
772
+ " loss: scalar loss value\n",
773
+ " \"\"\"\n",
774
+ " # Flatten batch and sequence dimensions\n",
775
+ " pred_scaled_flat = predictions_scaled.detach().reshape(-1, self.num_tracks) # (N, num_tracks)\n",
776
+ " target_scaled_flat = targets_scaled.detach().reshape(-1, self.num_tracks) # (N, num_tracks)\n",
777
+ " pred_raw_flat = predictions_raw.detach().reshape(-1, self.num_tracks) # (N, num_tracks)\n",
778
+ " target_raw_flat = targets_raw.detach().reshape(-1, self.num_tracks) # (N, num_tracks)\n",
779
+ " \n",
780
+ " # Update scaled metrics\n",
781
+ " for i, metric in enumerate(self.pearson_metrics_scaled):\n",
782
+ " metric.update(pred_scaled_flat[:, i], target_scaled_flat[:, i])\n",
783
+ " \n",
784
+ " # Update raw metrics\n",
785
+ " for i, metric in enumerate(self.pearson_metrics_raw):\n",
786
+ " metric.update(pred_raw_flat[:, i], target_raw_flat[:, i])\n",
787
+ " \n",
788
+ " self.losses.append(loss)\n",
789
+ " \n",
790
+ " def compute(self) -> Dict[str, float]:\n",
791
+ " \"\"\"Compute and return all metrics (both scaled and raw).\"\"\"\n",
792
+ " metrics_dict = {}\n",
793
+ " \n",
794
+ " # Scaled metrics: per-track Pearson correlations\n",
795
+ " for i, (track_name, metric) in enumerate(zip(self.track_names, self.pearson_metrics_scaled)):\n",
796
+ " corr = metric.compute().item()\n",
797
+ " metrics_dict[f\"{track_name}/pearson_scaled\"] = corr\n",
798
+ " \n",
799
+ " # Scaled metrics: mean Pearson correlation\n",
800
+ " correlations_scaled = [metric.compute().item() for metric in self.pearson_metrics_scaled]\n",
801
+ " metrics_dict[\"mean/pearson_scaled\"] = np.nanmean(correlations_scaled)\n",
802
+ " \n",
803
+ " # Raw metrics: per-track Pearson correlations\n",
804
+ " for i, (track_name, metric) in enumerate(zip(self.track_names, self.pearson_metrics_raw)):\n",
805
+ " corr = metric.compute().item()\n",
806
+ " metrics_dict[f\"{track_name}/pearson_raw\"] = corr\n",
807
+ " \n",
808
+ " # Raw metrics: mean Pearson correlation\n",
809
+ " correlations_raw = [metric.compute().item() for metric in self.pearson_metrics_raw]\n",
810
+ " metrics_dict[\"mean/pearson_raw\"] = np.nanmean(correlations_raw)\n",
811
+ " \n",
812
+ " # Mean loss\n",
813
+ " metrics_dict[\"loss\"] = np.mean(self.losses) if self.losses else 0.0\n",
814
+ " \n",
815
+ " return metrics_dict"
816
+ ]
817
+ },
818
+ {
819
+ "cell_type": "code",
820
+ "execution_count": null,
821
+ "metadata": {},
822
+ "outputs": [],
823
+ "source": [
824
+ "train_metrics = TracksMetrics(config[\"bigwig_file_ids\"])\n",
825
+ "val_metrics = TracksMetrics(config[\"bigwig_file_ids\"])\n",
826
+ "test_metrics = TracksMetrics(config[\"bigwig_file_ids\"])"
827
+ ]
828
+ },
829
+ {
830
+ "cell_type": "markdown",
831
+ "metadata": {},
832
+ "source": [
833
+ "# 7. Scaling functions setup (copied from pipeline)"
834
+ ]
835
+ },
836
+ {
837
+ "cell_type": "code",
838
+ "execution_count": 63,
839
+ "metadata": {},
840
+ "outputs": [
841
+ {
842
+ "name": "stdout",
843
+ "output_type": "stream",
844
+ "text": [
845
+ "Scaling functions created\n"
846
+ ]
847
+ }
848
+ ],
849
+ "source": [
850
+ "def get_track_means(bigwig_file_ids: List[str]) -> np.ndarray:\n",
851
+ " \"\"\"\n",
852
+ " Get track means for normalization.\n",
853
+ " For now, return dummy values. In real pipeline, this loads from metadata.\n",
854
+ " \"\"\"\n",
855
+ " # Dummy values - in real pipeline, this would load from actual metadata\n",
856
+ " return np.ones(len(bigwig_file_ids), dtype=np.float32) * 1.0\n",
857
+ "\n",
858
+ "\n",
859
+ "def get_rna_seq_track_ids(bigwig_file_ids: List[str]) -> List[int]:\n",
860
+ " \"\"\"\n",
861
+ " Get RNA-seq track indices.\n",
862
+ " For now, return empty list. In real pipeline, this identifies RNA-seq tracks.\n",
863
+ " \"\"\"\n",
864
+ " # Dummy - in real pipeline, this would identify RNA-seq tracks\n",
865
+ " return []\n",
866
+ "\n",
867
+ "\n",
868
+ "def create_targets_scaling_fn(bigwig_file_ids: List[str]) -> Callable[[torch.Tensor], torch.Tensor]:\n",
869
+ " \"\"\"\n",
870
+ " Build a scaling function based on track means and RNA-seq squashing.\n",
871
+ " Copied from the supervised tracks pipeline.\n",
872
+ " \"\"\"\n",
873
+ " # Load track means\n",
874
+ " track_means_np = get_track_means(bigwig_file_ids)\n",
875
+ " track_means = torch.tensor(track_means_np, dtype=torch.float32)\n",
876
+ " \n",
877
+ " # Get which tracks use squashing\n",
878
+ " rna_ids = get_rna_seq_track_ids(bigwig_file_ids)\n",
879
+ " apply_squashing = torch.zeros((len(bigwig_file_ids),), dtype=torch.bool)\n",
880
+ " if len(rna_ids) > 0:\n",
881
+ " apply_squashing[rna_ids] = True\n",
882
+ " \n",
883
+ " def transform_fn(x: torch.Tensor) -> torch.Tensor:\n",
884
+ " \"\"\"\n",
885
+ " x: torch.Tensor, shape (batch, seq_len, num_tracks)\n",
886
+ " \"\"\"\n",
887
+ " device = x.device\n",
888
+ " \n",
889
+ " # Move constants to correct device\n",
890
+ " means = track_means.to(device)\n",
891
+ " squash_mask = apply_squashing.to(device)\n",
892
+ " \n",
893
+ " # Normalize\n",
894
+ " scaled = x / means\n",
895
+ " \n",
896
+ " # Power squashing where needed\n",
897
+ " squashed = torch.where(\n",
898
+ " squash_mask.view(1, 1, -1),\n",
899
+ " scaled.pow(0.75),\n",
900
+ " scaled,\n",
901
+ " )\n",
902
+ " \n",
903
+ " # Smooth clipping: if > 10, apply formula\n",
904
+ " clipped = torch.where(\n",
905
+ " squashed > 10.0,\n",
906
+ " 2.0 * torch.sqrt(squashed * 10.0) - 10.0,\n",
907
+ " squashed,\n",
908
+ " )\n",
909
+ " \n",
910
+ " return clipped\n",
911
+ " \n",
912
+ " return transform_fn\n",
913
+ "\n",
914
+ "\n",
915
+ "def create_predictions_scaling_fn(bigwig_file_ids: List[str]) -> Callable[[torch.Tensor], torch.Tensor]:\n",
916
+ " \"\"\"\n",
917
+ " Inverse scaling function to apply on predictions before computing metrics.\n",
918
+ " Copied from the supervised tracks pipeline.\n",
919
+ " \"\"\"\n",
920
+ " # Load means\n",
921
+ " track_means_np = get_track_means(bigwig_file_ids)\n",
922
+ " track_means = torch.tensor(track_means_np, dtype=torch.float32)\n",
923
+ " \n",
924
+ " # RNA-seq mask\n",
925
+ " rna_ids = get_rna_seq_track_ids(bigwig_file_ids)\n",
926
+ " apply_squashing = torch.zeros((len(bigwig_file_ids),), dtype=torch.bool)\n",
927
+ " if len(rna_ids) > 0:\n",
928
+ " apply_squashing[rna_ids] = True\n",
929
+ " \n",
930
+ " def inverse_transform_fn(x: torch.Tensor) -> torch.Tensor:\n",
931
+ " \"\"\"\n",
932
+ " x: torch.Tensor, shape (batch, seq_len, num_tracks)\n",
933
+ " \"\"\"\n",
934
+ " device = x.device\n",
935
+ " means = track_means.to(device)\n",
936
+ " squash_mask = apply_squashing.to(device)\n",
937
+ " \n",
938
+ " # Undo clipping\n",
939
+ " unclipped = torch.where(\n",
940
+ " x > 10.0,\n",
941
+ " (x + 10.0).pow(2) / (4 * 10.0),\n",
942
+ " x,\n",
943
+ " )\n",
944
+ " \n",
945
+ " # Undo squashing\n",
946
+ " unsquashed = torch.where(\n",
947
+ " squash_mask.view(1, 1, -1),\n",
948
+ " unclipped.pow(1.0 / 0.75),\n",
949
+ " unclipped,\n",
950
+ " )\n",
951
+ " \n",
952
+ " # Undo normalization\n",
953
+ " return unsquashed * means\n",
954
+ " \n",
955
+ " return inverse_transform_fn\n",
956
+ "\n",
957
+ "\n",
958
+ "# Create scaling functions\n",
959
+ "scale_targets_fn = create_targets_scaling_fn(config[\"bigwig_file_ids\"])\n",
960
+ "scale_predictions_fn = create_predictions_scaling_fn(config[\"bigwig_file_ids\"])\n",
961
+ "\n",
962
+ "print(\"Scaling functions created\")"
963
+ ]
964
+ },
965
+ {
966
+ "cell_type": "markdown",
967
+ "metadata": {},
968
+ "source": [
969
+ "# 8. Loss functions"
970
+ ]
971
+ },
972
+ {
973
+ "cell_type": "code",
974
+ "execution_count": 64,
975
+ "metadata": {},
976
+ "outputs": [],
977
+ "source": [
978
+ "def poisson_loss(ytrue: torch.Tensor, ypred: torch.Tensor, epsilon: float = 1e-7) -> torch.Tensor:\n",
979
+ " \"\"\"Poisson loss per element: ypred - ytrue * log(ypred).\"\"\"\n",
980
+ " return ypred - ytrue * torch.log(ypred + epsilon)\n",
981
+ "\n",
982
+ "\n",
983
+ "def safe_for_grad_log_torch(x: torch.Tensor) -> torch.Tensor:\n",
984
+ " \"\"\"Guarantees that the log is defined for all x > 0 in a differentiable way.\"\"\"\n",
985
+ " return torch.log(torch.where(x > 0.0, x, torch.ones_like(x)))\n",
986
+ "\n",
987
+ "\n",
988
+ "def poisson_multinomial_loss(\n",
989
+ " logits: torch.Tensor,\n",
990
+ " targets: torch.Tensor,\n",
991
+ " mask: torch.Tensor | None = None,\n",
992
+ " shape_loss_coefficient: float = 5.0,\n",
993
+ " epsilon: float = 1e-7,\n",
994
+ ") -> tuple[torch.Tensor, torch.Tensor | None, torch.Tensor | None]:\n",
995
+ " \"\"\"\n",
996
+ " Regression loss for bigwig tracks (MSE, Poisson, or Poisson-Multinomial).\n",
997
+ " \"\"\"\n",
998
+ " scale_loss, shape_loss = None, None\n",
999
+ " \n",
1000
+ " if mask is None:\n",
1001
+ " mask = torch.ones_like(targets, dtype=torch.float32, device=targets.device)\n",
1002
+ " else:\n",
1003
+ " mask = mask.float()\n",
1004
+ " \n",
1005
+ " mask_sum = mask.sum() + epsilon\n",
1006
+ " masked_logits = logits * mask\n",
1007
+ " masked_targets = targets * mask\n",
1008
+ "\n",
1009
+ " # Scale loss\n",
1010
+ " mask_sum_per_track_per_seq = mask.sum(dim=1) # (batch, num_tracks)\n",
1011
+ " mask_per_sequence = mask_sum_per_track_per_seq > 0.0 # (batch, num_tracks)\n",
1012
+ " \n",
1013
+ " sum_pred = masked_logits.sum(dim=1) # (batch, num_tracks)\n",
1014
+ " sum_true = masked_targets.sum(dim=1) # (batch, num_tracks)\n",
1015
+ " \n",
1016
+ " scale_loss = poisson_loss(sum_true, sum_pred, epsilon=epsilon)\n",
1017
+ " scale_loss = scale_loss / (mask_sum_per_track_per_seq + epsilon)\n",
1018
+ " \n",
1019
+ " if mask_per_sequence.any():\n",
1020
+ " scale_loss_filtered = scale_loss[mask_per_sequence]\n",
1021
+ " scale_loss = scale_loss_filtered.mean()\n",
1022
+ " else:\n",
1023
+ " scale_loss = torch.tensor(0.0, device=targets.device, dtype=targets.dtype)\n",
1024
+ " \n",
1025
+ " # Shape loss\n",
1026
+ " predicted_counts = masked_logits + (epsilon * mask)\n",
1027
+ " masked_targets_with_epsilon = masked_targets + (epsilon * mask)\n",
1028
+ " \n",
1029
+ " denom = predicted_counts.sum(dim=1, keepdim=True) + epsilon\n",
1030
+ " p_pred = predicted_counts / denom\n",
1031
+ " \n",
1032
+ " pl_pred = safe_for_grad_log_torch(p_pred)\n",
1033
+ " shape_loss = -(masked_targets_with_epsilon * pl_pred).sum() / mask_sum\n",
1034
+ " \n",
1035
+ " # Combine\n",
1036
+ " loss = shape_loss + scale_loss / shape_loss_coefficient\n",
1037
+ "\n",
1038
+ " return loss, scale_loss, shape_loss\n"
1039
+ ]
1040
+ },
1041
+ {
1042
+ "cell_type": "markdown",
1043
+ "metadata": {},
1044
+ "source": [
1045
+ "# 9. Training loop"
1046
+ ]
1047
+ },
1048
+ {
1049
+ "cell_type": "code",
1050
+ "execution_count": null,
1051
+ "metadata": {},
1052
+ "outputs": [],
1053
+ "source": [
1054
+ "def train_step(\n",
1055
+ " model: nn.Module,\n",
1056
+ " batch: Dict[str, torch.Tensor],\n",
1057
+ " optimizer: torch.optim.Optimizer,\n",
1058
+ " scale_targets_fn: Callable,\n",
1059
+ " config: Dict,\n",
1060
+ " num_accumulation_steps: int = 1,\n",
1061
+ ") -> float:\n",
1062
+ " \"\"\"Single training step with gradient accumulation support.\"\"\"\n",
1063
+ " tokens = batch[\"tokens\"].to(device)\n",
1064
+ " bigwig_targets = batch[\"bigwig_targets\"].to(device) # Shape: (batch, seq_len_cropped, num_tracks)\n",
1065
+ " \n",
1066
+ " # Forward pass\n",
1067
+ " outputs = model(tokens=tokens)\n",
1068
+ " bigwig_logits = outputs[\"bigwig_tracks_logits\"] # Shape: (batch, cropped_seq_len, num_tracks)\n",
1069
+ " \n",
1070
+ " # Scale targets\n",
1071
+ " scaled_targets = scale_targets_fn(bigwig_targets)\n",
1072
+ " \n",
1073
+ " # Compute loss\n",
1074
+ " loss, _, _ = poisson_multinomial_loss(\n",
1075
+ " logits=bigwig_logits,\n",
1076
+ " targets=scaled_targets,\n",
1077
+ " shape_loss_coefficient=config[\"bigwig_shape_loss_coefficient\"],\n",
1078
+ " )\n",
1079
+ " \n",
1080
+ " # Scale loss by accumulation steps (for gradient accumulation)\n",
1081
+ " loss = loss / num_accumulation_steps\n",
1082
+ " \n",
1083
+ " # Backward pass (accumulate gradients)\n",
1084
+ " loss.backward()\n",
1085
+ " \n",
1086
+ " return loss.item() * num_accumulation_steps # Return unscaled loss for logging\n",
1087
+ "\n",
1088
+ "\n",
1089
+ "def validation_step(\n",
1090
+ " model: nn.Module,\n",
1091
+ " batch: Dict[str, torch.Tensor],\n",
1092
+ " scale_targets_fn: Callable,\n",
1093
+ " scale_predictions_fn: Callable,\n",
1094
+ " metrics: TracksMetrics,\n",
1095
+ " config: Dict,\n",
1096
+ ") -> float:\n",
1097
+ " \"\"\"Single validation step.\"\"\"\n",
1098
+ " model.eval()\n",
1099
+ " \n",
1100
+ " tokens = batch[\"tokens\"].to(device)\n",
1101
+ " bigwig_targets = batch[\"bigwig_targets\"].to(device)\n",
1102
+ " \n",
1103
+ " with torch.no_grad():\n",
1104
+ " # Forward pass\n",
1105
+ " outputs = model(tokens=tokens)\n",
1106
+ " bigwig_logits = outputs[\"bigwig_tracks_logits\"]\n",
1107
+ " \n",
1108
+ " # Scale targets for loss computation\n",
1109
+ " scaled_targets = scale_targets_fn(bigwig_targets)\n",
1110
+ " \n",
1111
+ " # Compute loss (using scaled targets)\n",
1112
+ " loss, _, _ = poisson_multinomial_loss(\n",
1113
+ " logits=bigwig_logits,\n",
1114
+ " targets=scaled_targets,\n",
1115
+ " shape_loss_coefficient=config[\"bigwig_shape_loss_coefficient\"],\n",
1116
+ " )\n",
1117
+ " \n",
1118
+ " # Scale predictions back to original space for metrics\n",
1119
+ " # (predictions are in scaled space, need to inverse transform)\n",
1120
+ " unscaled_predictions = scale_predictions_fn(bigwig_logits)\n",
1121
+ " \n",
1122
+ " # Update metrics (using original space targets and predictions)\n",
1123
+ " metrics.update(\n",
1124
+ " predictions_scaled=bigwig_logits,\n",
1125
+ " targets_scaled=scaled_targets,\n",
1126
+ " predictions_raw=unscaled_predictions,\n",
1127
+ " targets_raw=bigwig_targets,\n",
1128
+ " loss=loss.item()\n",
1129
+ " )\n",
1130
+ " \n",
1131
+ " return loss.item()"
1132
+ ]
1133
+ },
1134
+ {
1135
+ "cell_type": "code",
1136
+ "execution_count": null,
1137
+ "metadata": {},
1138
+ "outputs": [
1139
+ {
1140
+ "name": "stdout",
1141
+ "output_type": "stream",
1142
+ "text": [
1143
+ "Starting training...\n",
1144
+ "Training for 32 steps with 2 gradient accumulation steps\n",
1145
+ "\n"
1146
+ ]
1147
+ },
1148
+ {
1149
+ "name": "stderr",
1150
+ "output_type": "stream",
1151
+ "text": [
1152
+ "/home/y-bornachot/venvs/ntv3-env/lib/python3.12/site-packages/torch/amp/autocast_mode.py:287: UserWarning: In CPU autocast, but the target dtype is not supported. Disabling autocast.\n",
1153
+ "CPU Autocast only supports dtype of torch.bfloat16, torch.float16 currently.\n",
1154
+ " warnings.warn(error_message)\n"
1155
+ ]
1156
+ },
1157
+ {
1158
+ "name": "stdout",
1159
+ "output_type": "stream",
1160
+ "text": [
1161
+ "Step 0/32 | Loss: 1.5993 | Mean Pearson: -0.0848 | LR: 1.17e-09 | Tokens: 4,096\n",
1162
+ "\n",
1163
+ "Running validation at step 0...\n",
1164
+ " Validation Loss: 0.6607\n",
1165
+ " Validation Mean Pearson: -0.0054\n",
1166
+ " ENCFF884LDL/pearson: -0.0054\n",
1167
+ "Step 2/32 | Loss: 0.3453 | Mean Pearson: -0.2111 | LR: 2.50e-09 | Tokens: 12,288\n",
1168
+ "Step 4/32 | Loss: 1.0248 | Mean Pearson: -0.0197 | LR: 2.41e-09 | Tokens: 20,480\n",
1169
+ "\n",
1170
+ "Running validation at step 4...\n",
1171
+ " Validation Loss: 0.5158\n",
1172
+ " Validation Mean Pearson: 0.0160\n",
1173
+ " ENCFF884LDL/pearson: 0.0160\n",
1174
+ "Step 6/32 | Loss: 0.3720 | Mean Pearson: 0.0140 | LR: 2.32e-09 | Tokens: 28,672\n",
1175
+ "Step 8/32 | Loss: 0.4894 | Mean Pearson: -0.0300 | LR: 2.23e-09 | Tokens: 36,864\n",
1176
+ "\n",
1177
+ "Running validation at step 8...\n",
1178
+ " Validation Loss: 0.5024\n",
1179
+ " Validation Mean Pearson: -0.0443\n",
1180
+ " ENCFF884LDL/pearson: -0.0443\n",
1181
+ "Step 10/32 | Loss: 0.4039 | Mean Pearson: -0.0783 | LR: 2.13e-09 | Tokens: 45,056\n",
1182
+ "Step 12/32 | Loss: 0.4974 | Mean Pearson: 0.0227 | LR: 2.02e-09 | Tokens: 53,248\n",
1183
+ "\n",
1184
+ "Running validation at step 12...\n",
1185
+ " Validation Loss: 0.5107\n",
1186
+ " Validation Mean Pearson: -0.0596\n",
1187
+ " ENCFF884LDL/pearson: -0.0596\n",
1188
+ "Step 14/32 | Loss: 0.2984 | Mean Pearson: -0.0820 | LR: 1.91e-09 | Tokens: 61,440\n",
1189
+ "Step 16/32 | Loss: 0.5219 | Mean Pearson: -0.0668 | LR: 1.80e-09 | Tokens: 69,632\n",
1190
+ "\n",
1191
+ "Running validation at step 16...\n",
1192
+ " Validation Loss: 0.8410\n",
1193
+ " Validation Mean Pearson: 0.0041\n",
1194
+ " ENCFF884LDL/pearson: 0.0041\n",
1195
+ "Step 18/32 | Loss: 0.3663 | Mean Pearson: 0.0888 | LR: 1.67e-09 | Tokens: 77,824\n",
1196
+ "Step 20/32 | Loss: 0.4024 | Mean Pearson: -0.0628 | LR: 1.54e-09 | Tokens: 86,016\n",
1197
+ "\n",
1198
+ "Running validation at step 20...\n",
1199
+ " Validation Loss: 0.4043\n",
1200
+ " Validation Mean Pearson: -0.1108\n",
1201
+ " ENCFF884LDL/pearson: -0.1108\n",
1202
+ "Step 22/32 | Loss: 0.4096 | Mean Pearson: -0.0249 | LR: 1.39e-09 | Tokens: 94,208\n",
1203
+ "Step 24/32 | Loss: 0.3930 | Mean Pearson: -0.0779 | LR: 1.23e-09 | Tokens: 102,400\n",
1204
+ "\n",
1205
+ "Running validation at step 24...\n",
1206
+ " Validation Loss: 0.3426\n",
1207
+ " Validation Mean Pearson: 0.0236\n",
1208
+ " ENCFF884LDL/pearson: 0.0236\n",
1209
+ "Step 26/32 | Loss: 0.4457 | Mean Pearson: -0.0219 | LR: 1.04e-09 | Tokens: 110,592\n",
1210
+ "Step 28/32 | Loss: 0.4520 | Mean Pearson: 0.0580 | LR: 8.04e-10 | Tokens: 118,784\n",
1211
+ "\n",
1212
+ "Running validation at step 28...\n",
1213
+ " Validation Loss: 0.3757\n",
1214
+ " Validation Mean Pearson: 0.0050\n",
1215
+ " ENCFF884LDL/pearson: 0.0050\n",
1216
+ "Step 30/32 | Loss: 0.9341 | Mean Pearson: -0.0122 | LR: 4.64e-10 | Tokens: 126,976\n",
1217
+ "\n",
1218
+ "Training completed after 32 steps!\n"
1219
+ ]
1220
+ }
1221
+ ],
1222
+ "source": [
1223
+ "# Training loop (step-based with gradient accumulation)\n",
1224
+ "print(\"Starting training...\")\n",
1225
+ "print(f\"Training for {num_steps_training} steps with {num_accumulation_gradient} gradient accumulation steps\\n\")\n",
1226
+ "\n",
1227
+ "model.train()\n",
1228
+ "train_metrics.reset()\n",
1229
+ "optimizer.zero_grad() # Initialize gradients\n",
1230
+ "\n",
1231
+ "# Create iterator for training data (will cycle if needed)\n",
1232
+ "train_iter = iter(train_loader)\n",
1233
+ "num_tokens_seen = 0\n",
1234
+ "\n",
1235
+ "# Main training loop: for loop over optimizer steps (like deepspeed pipeline)\n",
1236
+ "for optimizer_step_idx in range(num_steps_training):\n",
1237
+ " # Gradient accumulation loop\n",
1238
+ " accumulated_loss = 0.0\n",
1239
+ " for acc_idx in range(num_accumulation_gradient):\n",
1240
+ " try:\n",
1241
+ " batch = next(train_iter)\n",
1242
+ " except StopIteration:\n",
1243
+ " # Restart iterator if we run out of data\n",
1244
+ " train_iter = iter(train_loader)\n",
1245
+ " batch = next(train_iter)\n",
1246
+ " \n",
1247
+ " # Forward pass and accumulate gradients\n",
1248
+ " loss = train_step(\n",
1249
+ " model, batch, optimizer, scale_targets_fn, config, \n",
1250
+ " num_accumulation_steps=num_accumulation_gradient\n",
1251
+ " )\n",
1252
+ " accumulated_loss += loss\n",
1253
+ " \n",
1254
+ " # Update optimizer (after accumulation)\n",
1255
+ " optimizer.step()\n",
1256
+ " optimizer.zero_grad()\n",
1257
+ " \n",
1258
+ " # Update scheduler\n",
1259
+ " if scheduler is not None:\n",
1260
+ " scheduler.step()\n",
1261
+ " \n",
1262
+ " # Update tokens seen\n",
1263
+ " num_tokens_seen += effective_num_tokens_per_update\n",
1264
+ " \n",
1265
+ " # Update metrics (on last batch of accumulation)\n",
1266
+ " tokens = batch[\"tokens\"].to(device)\n",
1267
+ " bigwig_targets = batch[\"bigwig_targets\"].to(device)\n",
1268
+ " with torch.no_grad():\n",
1269
+ " outputs = model(tokens=tokens)\n",
1270
+ " bigwig_logits = outputs[\"bigwig_tracks_logits\"]\n",
1271
+ " \n",
1272
+ " # Scale targets for scaled metrics\n",
1273
+ " scaled_targets = scale_targets_fn(bigwig_targets)\n",
1274
+ " \n",
1275
+ " # Unscale predictions for raw metrics\n",
1276
+ " unscaled_predictions = scale_predictions_fn(bigwig_logits)\n",
1277
+ " \n",
1278
+ " avg_loss = accumulated_loss / num_accumulation_gradient\n",
1279
+ " train_metrics.update(\n",
1280
+ " predictions_scaled=bigwig_logits,\n",
1281
+ " targets_scaled=scaled_targets,\n",
1282
+ " predictions_raw=unscaled_predictions,\n",
1283
+ " targets_raw=bigwig_targets,\n",
1284
+ " loss=avg_loss\n",
1285
+ " )\n",
1286
+ " \n",
1287
+ " # Logging\n",
1288
+ " if optimizer_step_idx % log_train_step == 0:\n",
1289
+ " train_metrics_dict = train_metrics.compute()\n",
1290
+ " current_lr = scheduler.get_last_lr()[0] if scheduler else config[\"learning_rate\"]\n",
1291
+ " print(f\"Step {optimizer_step_idx + 1}/{num_steps_training} | \"\n",
1292
+ " f\"Loss: {avg_loss:.4f} | \"\n",
1293
+ " f\"Mean Pearson: {train_metrics_dict['mean/pearson']:.4f} | \"\n",
1294
+ " f\"LR: {current_lr:.2e} | \"\n",
1295
+ " f\"Tokens: {num_tokens_seen:,}\")\n",
1296
+ " train_metrics.reset()\n",
1297
+ " \n",
1298
+ " # Validation\n",
1299
+ " if optimizer_step_idx % log_validation_step == 0:\n",
1300
+ " print(f\"\\nRunning validation at step {optimizer_step_idx}...\")\n",
1301
+ " val_metrics.reset()\n",
1302
+ " model.eval()\n",
1303
+ " \n",
1304
+ " val_losses = []\n",
1305
+ " for val_batch in val_loader:\n",
1306
+ " val_loss = validation_step(\n",
1307
+ " model, val_batch, scale_targets_fn, scale_predictions_fn, val_metrics, config\n",
1308
+ " )\n",
1309
+ " val_losses.append(val_loss)\n",
1310
+ " \n",
1311
+ " # Print validation metrics\n",
1312
+ " val_metrics_dict = val_metrics.compute()\n",
1313
+ " print(f\" Validation Loss: {np.mean(val_losses):.4f}\")\n",
1314
+ " print(f\" Validation Mean Pearson: {val_metrics_dict['mean/pearson']:.4f}\")\n",
1315
+ " for track_name in config[\"bigwig_file_ids\"]:\n",
1316
+ " print(f\" {track_name}/pearson: {val_metrics_dict[f'{track_name}/pearson']:.4f}\")\n",
1317
+ " \n",
1318
+ " model.train() # Back to training mode\n",
1319
+ "\n",
1320
+ "print(f\"\\nTraining completed after {num_steps_training} steps!\")\n"
1321
+ ]
1322
+ },
1323
+ {
1324
+ "cell_type": "markdown",
1325
+ "metadata": {},
1326
+ "source": [
1327
+ "# 10. Test evaluation"
1328
+ ]
1329
+ },
1330
+ {
1331
+ "cell_type": "code",
1332
+ "execution_count": null,
1333
+ "metadata": {},
1334
+ "outputs": [],
1335
+ "source": [
1336
+ "def test_step(\n",
1337
+ " model: nn.Module,\n",
1338
+ " batch: Dict[str, torch.Tensor],\n",
1339
+ " scale_targets_fn: Callable,\n",
1340
+ " scale_predictions_fn: Callable,\n",
1341
+ " metrics: TracksMetrics,\n",
1342
+ ") -> None:\n",
1343
+ " \"\"\"\n",
1344
+ " Pure evaluation step for test set (no loss computation).\n",
1345
+ " Based on tracks_evaluation_step_torch from deepspeed pipeline.\n",
1346
+ " \"\"\"\n",
1347
+ " tokens = batch[\"tokens\"].to(device)\n",
1348
+ " bigwig_targets = batch[\"bigwig_targets\"].to(device) # Shape: (batch, seq_len_cropped, num_tracks)\n",
1349
+ " \n",
1350
+ " with torch.no_grad():\n",
1351
+ " # Forward pass\n",
1352
+ " outputs = model(tokens=tokens)\n",
1353
+ " bigwig_logits = outputs[\"bigwig_tracks_logits\"] # Shape: (batch, cropped_seq_len, num_tracks)\n",
1354
+ " \n",
1355
+ " # Scale targets for scaled metrics\n",
1356
+ " scaled_targets = scale_targets_fn(bigwig_targets)\n",
1357
+ " \n",
1358
+ " # Unscale predictions for raw metrics\n",
1359
+ " unscaled_predictions = scale_predictions_fn(bigwig_logits)\n",
1360
+ " \n",
1361
+ " # Update metrics with both scaled and raw values\n",
1362
+ " # Pass 0.0 as loss since we don't compute loss in test evaluation\n",
1363
+ " metrics.update(\n",
1364
+ " predictions_scaled=bigwig_logits,\n",
1365
+ " targets_scaled=scaled_targets,\n",
1366
+ " predictions_raw=unscaled_predictions,\n",
1367
+ " targets_raw=bigwig_targets,\n",
1368
+ " loss=0.0\n",
1369
+ " )"
1370
+ ]
1371
+ },
1372
+ {
1373
+ "cell_type": "code",
1374
+ "execution_count": null,
1375
+ "metadata": {},
1376
+ "outputs": [
1377
+ {
1378
+ "name": "stdout",
1379
+ "output_type": "stream",
1380
+ "text": [
1381
+ "\n",
1382
+ "==================================================\n",
1383
+ "Test Set Evaluation\n",
1384
+ "==================================================\n"
1385
+ ]
1386
+ },
1387
+ {
1388
+ "ename": "NameError",
1389
+ "evalue": "name 'test_dataset' is not defined",
1390
+ "output_type": "error",
1391
+ "traceback": [
1392
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
1393
+ "\u001b[31mNameError\u001b[39m Traceback (most recent call last)",
1394
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[68]\u001b[39m\u001b[32m, line 10\u001b[39m\n\u001b[32m 8\u001b[39m \u001b[38;5;66;03m# Calculate number of test steps (based on deepspeed pipeline)\u001b[39;00m\n\u001b[32m 9\u001b[39m test_batch_size = config[\u001b[33m\"\u001b[39m\u001b[33mbatch_size\u001b[39m\u001b[33m\"\u001b[39m]\n\u001b[32m---> \u001b[39m\u001b[32m10\u001b[39m num_test_samples = \u001b[38;5;28mlen\u001b[39m(\u001b[43mtest_dataset\u001b[49m)\n\u001b[32m 11\u001b[39m num_test_steps = num_test_samples // test_batch_size\n\u001b[32m 13\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mRunning test evaluation with \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_test_steps\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m steps (\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_test_samples\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m samples)\u001b[39m\u001b[33m\"\u001b[39m)\n",
1395
+ "\u001b[31mNameError\u001b[39m: name 'test_dataset' is not defined"
1396
+ ]
1397
+ }
1398
+ ],
1399
+ "source": [
1400
+ "print(\"\\n\" + \"=\"*50)\n",
1401
+ "print(\"Test Set Evaluation\")\n",
1402
+ "print(\"=\"*50)\n",
1403
+ "\n",
1404
+ "# Calculate number of test steps (based on deepspeed pipeline)\n",
1405
+ "num_test_samples = len(test_dataset)\n",
1406
+ "num_test_steps = num_test_samples // config[\"batch_size\"]\n",
1407
+ "\n",
1408
+ "print(f\"Running test evaluation with {num_test_steps} steps ({num_test_samples} samples)\")\n",
1409
+ "\n",
1410
+ "# Set model to eval mode\n",
1411
+ "model.eval()\n",
1412
+ "\n",
1413
+ "# Create iterator for test data\n",
1414
+ "test_iter = iter(test_loader)\n",
1415
+ "\n",
1416
+ "# Run test evaluation (based on deepspeed pipeline: for loop over test steps)\n",
1417
+ "for _ in range(num_test_steps):\n",
1418
+ " try:\n",
1419
+ " test_batch = next(test_iter)\n",
1420
+ " except StopIteration:\n",
1421
+ " break\n",
1422
+ " \n",
1423
+ " # Perform test evaluation (pure evaluation, no loss computation)\n",
1424
+ " test_step(\n",
1425
+ " model, test_batch, scale_targets_fn, scale_predictions_fn, test_metrics\n",
1426
+ " )\n",
1427
+ "\n",
1428
+ "# Compute final test metrics\n",
1429
+ "test_metrics_dict = test_metrics.compute()\n",
1430
+ "\n",
1431
+ "print(\"\\n\" + \"=\"*50)\n",
1432
+ "print(\"Test Set Results\")\n",
1433
+ "print(\"=\"*50)\n",
1434
+ "print(f\"\\nScaled Metrics (scaled predictions vs scaled targets):\")\n",
1435
+ "print(f\" Mean Pearson (scaled): {test_metrics_dict['mean/pearson_scaled']:.4f}\")\n",
1436
+ "for track_name in config[\"bigwig_file_ids\"]:\n",
1437
+ " print(f\" {track_name}/pearson_scaled: {test_metrics_dict[f'{track_name}/pearson_scaled']:.4f}\")\n",
1438
+ "\n",
1439
+ "print(f\"\\nRaw Metrics (raw predictions vs raw targets):\")\n",
1440
+ "print(f\" Mean Pearson (raw): {test_metrics_dict['mean/pearson_raw']:.4f}\")\n",
1441
+ "for track_name in config[\"bigwig_file_ids\"]:\n",
1442
+ " print(f\" {track_name}/pearson_raw: {test_metrics_dict[f'{track_name}/pearson_raw']:.4f}\")\n",
1443
+ "print(\"=\"*50)"
1444
+ ]
1445
+ },
1446
+ {
1447
+ "cell_type": "code",
1448
+ "execution_count": null,
1449
+ "metadata": {},
1450
+ "outputs": [],
1451
+ "source": []
1452
+ }
1453
+ ],
1454
+ "metadata": {
1455
+ "kernelspec": {
1456
+ "display_name": "Python 3.12 (ntv3-env)",
1457
+ "language": "python",
1458
+ "name": "ntv3-env"
1459
+ },
1460
+ "language_info": {
1461
+ "codemirror_mode": {
1462
+ "name": "ipython",
1463
+ "version": 3
1464
+ },
1465
+ "file_extension": ".py",
1466
+ "mimetype": "text/x-python",
1467
+ "name": "python",
1468
+ "nbconvert_exporter": "python",
1469
+ "pygments_lexer": "ipython3",
1470
+ "version": "3.12.3"
1471
+ }
1472
+ },
1473
+ "nbformat": 4,
1474
+ "nbformat_minor": 2
1475
+ }