Upload 17 files
Browse files- datasets/__init__.py +0 -0
- datasets/clip-rcnn-attn/test_clip_rcnn_attn.pkl +3 -0
- datasets/clip-rcnn-attn/train_clip_rcnn_attn.pkl +3 -0
- datasets/clip-rcnn-attn/val_clip_rcnn_attn.pkl +3 -0
- datasets/obj_feat/test_obj_feat.pkl +3 -0
- datasets/obj_feat/test_obj_feat_am.pkl +3 -0
- datasets/obj_feat/train_obj_feat.pkl +3 -0
- datasets/obj_feat/train_obj_feat_am.pkl +3 -0
- datasets/obj_feat/val_obj_feat.pkl +3 -0
- datasets/obj_feat/val_obj_feat_am.pkl +3 -0
- datasets/qa/test_amharic.csv +0 -0
- datasets/qa/test_english.csv +0 -0
- datasets/qa/train_amharic.csv +0 -0
- datasets/qa/train_english.csv +0 -0
- datasets/qa/val_amharic.csv +0 -0
- datasets/qa/val_english.csv +0 -0
- datasets/train.pyy +277 -0
datasets/__init__.py
ADDED
|
File without changes
|
datasets/clip-rcnn-attn/test_clip_rcnn_attn.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b4cbb9b8b72c2d8247937828728ef60bab8e33851111a89c23eb88ae1d9e59c8
|
| 3 |
+
size 23798911
|
datasets/clip-rcnn-attn/train_clip_rcnn_attn.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f002f6063c4f5b8999d573b12ce59700caf04aff6573a64e23c9b1091b168159
|
| 3 |
+
size 22960039
|
datasets/clip-rcnn-attn/val_clip_rcnn_attn.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fa0f76bc5caddc13b21755bab9aa90aee56b9d3dae6ebae442ea2142dffae1dc
|
| 3 |
+
size 3548069
|
datasets/obj_feat/test_obj_feat.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00bf63d3d41aec578d5f9b39e513a3208bb6caa15d5bef709d2c0af32ce87673
|
| 3 |
+
size 50525738
|
datasets/obj_feat/test_obj_feat_am.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df050c80857983e9c95a346d892864f39e2f04cc67a75d990a73a443c867ac6e
|
| 3 |
+
size 49656750
|
datasets/obj_feat/train_obj_feat.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:47dd3070d6fb0a409cfeed60959f3e869f28606955736ae7b0fd12646eee9bb8
|
| 3 |
+
size 82895569
|
datasets/obj_feat/train_obj_feat_am.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3343cdd0b2fc69177ab1d374f829675634126715f7919d217b7fd611779021af
|
| 3 |
+
size 43735055
|
datasets/obj_feat/val_obj_feat.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:da3af1552bb745bd3c5023429b07b9b75737dce2f3174282f193f8d95e5cab53
|
| 3 |
+
size 6994081
|
datasets/obj_feat/val_obj_feat_am.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:202a34593e1980ec15f110c18d20312158a6e10eea3b599679be35f07e790456
|
| 3 |
+
size 6899385
|
datasets/qa/test_amharic.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
datasets/qa/test_english.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
datasets/qa/train_amharic.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
datasets/qa/train_english.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
datasets/qa/val_amharic.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
datasets/qa/val_english.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
datasets/train.pyy
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#######################################
|
| 2 |
+
## Gated and Modal Alignment methods ##
|
| 3 |
+
######################################
|
| 4 |
+
|
| 5 |
+
import pickle
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from torch.utils.data import Dataset, DataLoader
|
| 9 |
+
from transformers import BertTokenizer, BertModel
|
| 10 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 11 |
+
from torch.cuda.amp import GradScaler, autocast
|
| 12 |
+
from torch.optim.lr_scheduler import ReduceLROnPlateau, LambdaLR
|
| 13 |
+
from sklearn.preprocessing import LabelEncoder
|
| 14 |
+
import pandas as pd
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
import os
|
| 17 |
+
import matplotlib.pyplot as plt
|
| 18 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 19 |
+
|
| 20 |
+
# Configuration
|
| 21 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 22 |
+
d_model = 2048
|
| 23 |
+
batch_size = 24
|
| 24 |
+
num_epochs = 30
|
| 25 |
+
lr = 1e-4
|
| 26 |
+
|
| 27 |
+
# Load data
|
| 28 |
+
train_df = pd.read_csv('/home/amerti/Documents/MSC/VQAGen/datasets/qa/train_amharic.csv')
|
| 29 |
+
val_df = pd.read_csv('/home/amerti/Documents/MSC/VQAGen/datasets/qa/val_amahric.csv')
|
| 30 |
+
test_df = pd.read_csv('/home/amerti/Documents/MSC/VQAGen/datasets/qa/test_amharic.csv')
|
| 31 |
+
|
| 32 |
+
with open('/home/amerti/Documents/MSC/VQAGen/datasets/clip-rcnn-attn/train_clip_rcnn_attn.pkl','rb') as f:
|
| 33 |
+
train_feat = pickle.load(f)
|
| 34 |
+
with open('/home/amerti/Documents/MSC/VQAGen/datasets/clip-rcnn-attn/val_clip_rcnn_attn.pkl','rb') as f:
|
| 35 |
+
val_feat = pickle.load(f)
|
| 36 |
+
with open('/home/amerti/Documents/MSC/VQAGen/datasets/clip-rcnn-attn/test_clip_rcnn_attn.pkl','rb') as f:
|
| 37 |
+
test_feat = pickle.load(f)
|
| 38 |
+
|
| 39 |
+
with open('train_temp_spa_all_features.pkl','rb') as f:
|
| 40 |
+
train_ts = pickle.load(f)
|
| 41 |
+
with open('val_temp_spa_all_features.pkl','rb') as f:
|
| 42 |
+
val_ts = pickle.load(f)
|
| 43 |
+
with open('test_temp_spa_all_features.pkl','rb') as f:
|
| 44 |
+
test_ts = pickle.load(f)
|
| 45 |
+
|
| 46 |
+
# Tokenizer & BERT
|
| 47 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
| 48 |
+
bert_model = BertModel.from_pretrained('bert-base-uncased').to(device).eval()
|
| 49 |
+
bert_dim = bert_model.config.hidden_size
|
| 50 |
+
|
| 51 |
+
# Label encoding
|
| 52 |
+
le = LabelEncoder().fit(
|
| 53 |
+
pd.concat([train_df['answer'], val_df['answer'], test_df['answer']])
|
| 54 |
+
)
|
| 55 |
+
for df in (train_df, val_df, test_df):
|
| 56 |
+
df.dropna(subset=['question','answer'], inplace=True)
|
| 57 |
+
df['label'] = le.transform(df['answer'])
|
| 58 |
+
|
| 59 |
+
class QAGraphDataset(Dataset):
|
| 60 |
+
def __init__(self, df, feat, ts, d_model, bert_dim):
|
| 61 |
+
self.df = df.reset_index(drop=True)
|
| 62 |
+
self.feat = feat
|
| 63 |
+
self.ts = ts
|
| 64 |
+
self.d_model = d_model
|
| 65 |
+
self.bert_dim = bert_dim
|
| 66 |
+
|
| 67 |
+
sample = next(iter(feat.values()))
|
| 68 |
+
self.frame_dim = sample['frame_features'][0].shape[-1]
|
| 69 |
+
self.temp_dim = 514
|
| 70 |
+
self.spat_dim = 515
|
| 71 |
+
|
| 72 |
+
self.video_proj = nn.Linear(4, d_model).to(device)
|
| 73 |
+
self.attn_proj = nn.Linear(512, d_model).to(device)
|
| 74 |
+
self.frame_proj = nn.Linear(self.frame_dim, d_model).to(device)
|
| 75 |
+
self.temp_proj = nn.Linear(self.temp_dim, d_model).to(device)
|
| 76 |
+
self.spat_proj = nn.Linear(self.spat_dim, d_model).to(device)
|
| 77 |
+
self.txt_proj = nn.Linear(bert_dim, d_model).to(device)
|
| 78 |
+
|
| 79 |
+
def __len__(self):
|
| 80 |
+
return len(self.df)
|
| 81 |
+
|
| 82 |
+
def __getitem__(self, idx):
|
| 83 |
+
row = self.df.iloc[idx]
|
| 84 |
+
vid = row['video_id']
|
| 85 |
+
ques, ans, lbl = row['question'], row['answer'], row['label']
|
| 86 |
+
|
| 87 |
+
# Visual features
|
| 88 |
+
v = torch.tensor(self.feat[vid]['video_feature'], dtype=torch.float, device=device)
|
| 89 |
+
a = torch.tensor(self.feat[vid]['attn_feature'], dtype=torch.float, device=device)
|
| 90 |
+
fr = torch.stack([
|
| 91 |
+
torch.tensor(x, dtype=torch.float, device=device)
|
| 92 |
+
for x in self.feat[vid]['frame_features']
|
| 93 |
+
]).mean(dim=0)
|
| 94 |
+
|
| 95 |
+
# Temporal features
|
| 96 |
+
t_data = self.ts[vid].get('temporal_features', [])
|
| 97 |
+
if t_data:
|
| 98 |
+
tem = torch.cat([
|
| 99 |
+
torch.tensor([x['displacement'] for x in t_data], device=device).mean().unsqueeze(0),
|
| 100 |
+
torch.tensor([x['emb_diff'] for x in t_data], device=device).mean().unsqueeze(0),
|
| 101 |
+
torch.stack([torch.tensor(x['emb_from'], device=device) for x in t_data]).mean(dim=0)
|
| 102 |
+
], dim=0).float()
|
| 103 |
+
else:
|
| 104 |
+
tem = torch.zeros(self.temp_dim, dtype=torch.float, device=device)
|
| 105 |
+
|
| 106 |
+
# Spatial features
|
| 107 |
+
s_data = self.ts[vid].get('spatial_features', [])
|
| 108 |
+
if s_data:
|
| 109 |
+
sp = torch.cat([
|
| 110 |
+
torch.tensor([x['distance'] for x in s_data], device=device).mean().unsqueeze(0),
|
| 111 |
+
torch.tensor([x['horizontal_offset'] for x in s_data],device=device).mean().unsqueeze(0),
|
| 112 |
+
torch.tensor([x['emb_similarity'] for x in s_data], device=device).mean().unsqueeze(0),
|
| 113 |
+
torch.stack([torch.tensor(x['obj1_emb'], device=device) for x in s_data]).mean(dim=0)
|
| 114 |
+
], dim=0).float()
|
| 115 |
+
else:
|
| 116 |
+
sp = torch.zeros(self.spat_dim, dtype=torch.float, device=device)
|
| 117 |
+
|
| 118 |
+
# Text features
|
| 119 |
+
inp = tokenizer(
|
| 120 |
+
ques, ans, return_tensors='pt', padding=True,
|
| 121 |
+
truncation=True, max_length=512
|
| 122 |
+
).to(device)
|
| 123 |
+
txt = bert_model(**inp).last_hidden_state.squeeze(0).float()
|
| 124 |
+
|
| 125 |
+
# Projections
|
| 126 |
+
v_p = self.video_proj(v).unsqueeze(0)
|
| 127 |
+
a_p = self.attn_proj(a).unsqueeze(0)
|
| 128 |
+
fr_p = self.frame_proj(fr).unsqueeze(0)
|
| 129 |
+
t_p = self.temp_proj(tem).unsqueeze(0)
|
| 130 |
+
sp_p = self.spat_proj(sp).unsqueeze(0)
|
| 131 |
+
txt_p = self.txt_proj(txt)
|
| 132 |
+
|
| 133 |
+
tokens = torch.cat([txt_p, v_p, a_p, fr_p, t_p, sp_p], dim=0).float()
|
| 134 |
+
return tokens, torch.tensor(lbl, device=device)
|
| 135 |
+
|
| 136 |
+
# Collate: ensure float32 dtype
|
| 137 |
+
def collate_fn(batch):
|
| 138 |
+
toks, labs = zip(*batch)
|
| 139 |
+
pad = pad_sequence(toks, batch_first=True, padding_value=0.0).float()
|
| 140 |
+
return pad, torch.stack(labs)
|
| 141 |
+
|
| 142 |
+
class UnifiedTransformerClassifier(nn.Module):
|
| 143 |
+
def __init__(self, d_model, num_classes, nhead=4, num_layers=2):
|
| 144 |
+
super().__init__()
|
| 145 |
+
layer = nn.TransformerEncoderLayer(
|
| 146 |
+
d_model, nhead, dim_feedforward=d_model*4, batch_first=True
|
| 147 |
+
)
|
| 148 |
+
self.encoder = nn.TransformerEncoder(layer, num_layers)
|
| 149 |
+
self.classifier = nn.Linear(d_model, num_classes)
|
| 150 |
+
|
| 151 |
+
def forward(self, x):
|
| 152 |
+
x = x.float()
|
| 153 |
+
enc = self.encoder(x)
|
| 154 |
+
rep = enc[:, 0]
|
| 155 |
+
return self.classifier(rep)
|
| 156 |
+
|
| 157 |
+
# Prepare datasets & loaders
|
| 158 |
+
train_ds = QAGraphDataset(train_df, train_feat, train_ts, d_model, bert_dim)
|
| 159 |
+
val_ds = QAGraphDataset(val_df, val_feat, val_ts, d_model, bert_dim)
|
| 160 |
+
test_ds = QAGraphDataset(test_df, test_feat, test_ts, d_model, bert_dim)
|
| 161 |
+
|
| 162 |
+
train_loader = DataLoader(train_ds, batch_size, shuffle=True, collate_fn=collate_fn)
|
| 163 |
+
val_loader = DataLoader(val_ds, batch_size, shuffle=False, collate_fn=collate_fn)
|
| 164 |
+
test_loader = DataLoader(test_ds, batch_size, shuffle=False, collate_fn=collate_fn)
|
| 165 |
+
|
| 166 |
+
# Model, optimizer, loss, schedulers
|
| 167 |
+
model = UnifiedTransformerClassifier(d_model, len(le.classes_)).to(device)
|
| 168 |
+
if torch.cuda.device_count()>1:
|
| 169 |
+
model = nn.DataParallel(model)
|
| 170 |
+
opt = torch.optim.Adam(model.parameters(), lr=lr)
|
| 171 |
+
crit = nn.CrossEntropyLoss()
|
| 172 |
+
scaler = GradScaler()
|
| 173 |
+
sched = ReduceLROnPlateau(opt, 'max', factor=0.5, patience=3, verbose=True)
|
| 174 |
+
wup = LambdaLR(opt, lambda e: (e+1)/5 if e<5 else 1.0)
|
| 175 |
+
writer = SummaryWriter()
|
| 176 |
+
|
| 177 |
+
# Training & validation with unified progress bar per epoch
|
| 178 |
+
train_losses, train_accs, val_losses, val_accs = [], [], [], []
|
| 179 |
+
best_acc, patience_cnt = 0, 0
|
| 180 |
+
epoch_bar = tqdm(range(1, num_epochs+1), desc='Epochs')
|
| 181 |
+
|
| 182 |
+
for epoch in epoch_bar:
|
| 183 |
+
total_train_loss = train_correct = train_total = 0
|
| 184 |
+
total_val_loss = val_correct = val_total = 0
|
| 185 |
+
|
| 186 |
+
# Single progress bar for both training and validation
|
| 187 |
+
total_batches = len(train_loader) + len(val_loader)
|
| 188 |
+
batch_bar = tqdm(total=total_batches, desc=f'Epoch {epoch} (Train/Val)', leave=False)
|
| 189 |
+
|
| 190 |
+
# Training phase
|
| 191 |
+
model.train()
|
| 192 |
+
for x, y in train_loader:
|
| 193 |
+
if y[0] < 0:
|
| 194 |
+
batch_bar.update(1)
|
| 195 |
+
continue
|
| 196 |
+
x, y = x.to(device), y.to(device)
|
| 197 |
+
opt.zero_grad()
|
| 198 |
+
with autocast():
|
| 199 |
+
out = model(x)
|
| 200 |
+
loss = crit(out, y)
|
| 201 |
+
scaler.scale(loss).backward()
|
| 202 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
|
| 203 |
+
scaler.step(opt)
|
| 204 |
+
scaler.update()
|
| 205 |
+
total_train_loss += loss.item() * x.size(0)
|
| 206 |
+
train_correct += (out.argmax(1) == y).sum().item()
|
| 207 |
+
train_total += y.size(0)
|
| 208 |
+
batch_bar.update(1)
|
| 209 |
+
batch_bar.set_postfix(
|
| 210 |
+
train_loss=f"{total_train_loss/train_total:.4f}",
|
| 211 |
+
train_acc=f"{100.*train_correct/train_total:.2f}%"
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
# Validation phase
|
| 215 |
+
model.eval()
|
| 216 |
+
with torch.no_grad():
|
| 217 |
+
for x, y in val_loader:
|
| 218 |
+
x, y = x.to(device), y.to(device)
|
| 219 |
+
out = model(x)
|
| 220 |
+
loss = crit(out, y)
|
| 221 |
+
total_val_loss += loss.item() * y.size(0)
|
| 222 |
+
val_correct += (out.argmax(1) == y).sum().item()
|
| 223 |
+
val_total += y.size(0)
|
| 224 |
+
batch_bar.update(1)
|
| 225 |
+
batch_bar.set_postfix(
|
| 226 |
+
val_loss=f"{total_val_loss/val_total:.4f}",
|
| 227 |
+
val_acc=f"{100.*val_correct/val_total:.2f}%"
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
batch_bar.close()
|
| 231 |
+
|
| 232 |
+
# Compute epoch metrics
|
| 233 |
+
train_loss = total_train_loss / train_total
|
| 234 |
+
train_acc = 100. * train_correct / train_total
|
| 235 |
+
val_loss = total_val_loss / val_total
|
| 236 |
+
val_acc = 100. * val_correct / val_total
|
| 237 |
+
|
| 238 |
+
# Store metrics
|
| 239 |
+
train_losses.append(train_loss)
|
| 240 |
+
train_accs.append(train_acc)
|
| 241 |
+
val_losses.append(val_loss)
|
| 242 |
+
val_accs.append(val_acc)
|
| 243 |
+
|
| 244 |
+
# Update epoch progress
|
| 245 |
+
epoch_bar.set_postfix(
|
| 246 |
+
train_loss=f"{train_loss:.4f}",
|
| 247 |
+
train_acc=f"{train_acc:.2f}%",
|
| 248 |
+
val_loss=f"{val_loss:.4f}",
|
| 249 |
+
val_acc=f"{val_acc:.2f}%"
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Schedulers & early stopping
|
| 253 |
+
if epoch < 5:
|
| 254 |
+
wup.step()
|
| 255 |
+
else:
|
| 256 |
+
sched.step(val_acc)
|
| 257 |
+
if val_acc > best_acc:
|
| 258 |
+
best_acc, patience_cnt = val_acc, 0
|
| 259 |
+
torch.save(model.state_dict(), '/kaggle/working/best.pt')
|
| 260 |
+
else:
|
| 261 |
+
patience_cnt += 1
|
| 262 |
+
if patience_cnt >= 5:
|
| 263 |
+
break
|
| 264 |
+
|
| 265 |
+
# Final plot & test (remains unchanged)
|
| 266 |
+
plt.figure(figsize=(12,5))
|
| 267 |
+
plt.subplot(1,2,1); plt.plot(train_losses, label='Train Loss'); plt.plot(val_losses, label='Val Loss'); plt.legend()
|
| 268 |
+
plt.subplot(1,2,2); plt.plot(train_accs, label='Train Acc'); plt.plot(val_accs, label='Val Acc'); plt.legend()
|
| 269 |
+
plt.tight_layout(); plt.savefig('/kaggle/working/metrics_plot.png'); plt.close()
|
| 270 |
+
|
| 271 |
+
test_acc = 0; model.eval()
|
| 272 |
+
with torch.no_grad():
|
| 273 |
+
for x,y in test_loader:
|
| 274 |
+
x, y = x.to(device), y.to(device)
|
| 275 |
+
out = model(x); test_acc += (out.argmax(1)==y).sum().item()
|
| 276 |
+
test_acc = test_acc/len(test_ds)*100
|
| 277 |
+
print(f"Final Test Acc: {test_acc:.2f}%")
|