Daniel-F commited on
Commit
5be3c34
·
1 Parent(s): 7d7aa78

add train folder

Browse files
Files changed (4) hide show
  1. train/dataset.py +40 -0
  2. train/eval.py +62 -0
  3. train/model.py +52 -0
  4. train/train.py +91 -0
train/dataset.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import torch
4
+ from torch.utils.data import Dataset
5
+ from PIL import Image
6
+ import torchvision.transforms as T
7
+
8
+ class NosePointDataset(Dataset):
9
+ def __init__(self, root = "/fs/scratch/PAS2099/danielf/medical/nose_clicks_lazy", image_size = (64, 64), device='cpu'):
10
+
11
+ self.root = root
12
+ self.files = sorted(os.listdir(root))
13
+ self.files = [f for f in self.files if f.endswith('.png')]
14
+ self.device = device
15
+
16
+ self.base_transform = T.Compose([
17
+ T.Resize(image_size),
18
+ T.ToTensor(), # [0, 1], shape (1, H, W)
19
+ ])
20
+
21
+ def __len__(self):
22
+ return len(self.files)
23
+
24
+ def __getitem__(self, idx):
25
+
26
+ image = Image.open(os.path.join(self.root, self.files[idx])).convert('RGB')
27
+
28
+ orig_w, orig_h = image.size
29
+ with open(os.path.join(self.root, self.files[idx].replace('.png', '.txt')), 'r') as f:
30
+ coords = f.read().strip().split(',')
31
+ x, y = float(coords[0]), float(coords[1])
32
+
33
+ x_norm = x / orig_w
34
+ y_norm = y / orig_h
35
+
36
+ image = self.base_transform(image).to(self.device) # [C, H, W], [0, 1]
37
+
38
+ coord = torch.tensor([x_norm, y_norm], dtype=torch.float32).to(self.device) # [2], normalized coordinates
39
+
40
+ return image, coord
train/eval.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #%%
2
+ import torch
3
+
4
+ from model import *
5
+
6
+ model_path = "best_model.pth"
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+ # model = NosePointRegressor(input_channels=3)
10
+ model = ResNetNoseRegressor(pretrained=False) # Set pretrained=False to load custom weights
11
+ model.load_state_dict(torch.load(model_path, map_location=device))
12
+ model.to(device)
13
+ model.eval()
14
+
15
+ # %%
16
+ import os
17
+ import numpy as np
18
+ import cv2
19
+
20
+ video_path = "/fs/scratch/PAS2099/danielf/medical/Animal_Behavior_Test/videos/WIN_20250529_15_19_13_Pro.mp4"
21
+
22
+ cap = cv2.VideoCapture(video_path)
23
+
24
+ #%%
25
+ random_frame = 1000
26
+ cap.set(cv2.CAP_PROP_POS_FRAMES, random_frame)
27
+ ret, frame = cap.read()
28
+ crop = (500, 550, 800, 620)
29
+ frame = frame[crop[1]:crop[3], crop[0]:crop[2]] # Crop the frame to the region of interest
30
+
31
+ from PIL import Image
32
+ from torchvision import transforms
33
+ import matplotlib.pyplot as plt
34
+ image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
35
+ orig_w, orig_h = image.size
36
+
37
+ transform = transforms.Compose([
38
+ transforms.Resize((64, 64)),
39
+ transforms.ToTensor(),
40
+ ])
41
+ image_tensor = transform(image).unsqueeze(0) # Add batch dimension
42
+ image_tensor = image_tensor.to(device)
43
+
44
+ with torch.no_grad():
45
+ output = model(image_tensor)
46
+
47
+ # === Inference ===
48
+ with torch.no_grad():
49
+ pred = model(image_tensor)[0].cpu().numpy() # shape: (2,) normalized
50
+ print(pred)
51
+
52
+ # === Map back to original resolution ===
53
+ x_pred = int(pred[0] * orig_w)
54
+ y_pred = int(pred[1] * orig_h)
55
+
56
+ plt.figure(figsize=(6, 4))
57
+ plt.imshow(image)
58
+ plt.scatter([x_pred], [y_pred], c='red', s=40, label='Predicted Nose')
59
+ plt.title(f'Prediction: ({x_pred}, {y_pred})')
60
+ plt.legend()
61
+ plt.tight_layout()
62
+ plt.show()
train/model.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ class NosePointRegressor(nn.Module):
6
+ def __init__(self, input_channels=1):
7
+ super(NosePointRegressor, self).__init__()
8
+
9
+ self.encoder = nn.Sequential(
10
+ nn.Conv2d(input_channels, 16, kernel_size=3, stride=2, padding=1), # -> [B, 16, H/2, W/2]
11
+ nn.ReLU(),
12
+ nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1), # -> [B, 32, H/4, W/4]
13
+ nn.ReLU(),
14
+ nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1), # -> [B, 64, H/8, W/8]
15
+ nn.ReLU(),
16
+ nn.AdaptiveAvgPool2d((1, 1)), # -> [B, 64, 1, 1]
17
+ )
18
+
19
+ self.fc = nn.Sequential(
20
+ nn.Flatten(),
21
+ nn.Linear(64, 32),
22
+ nn.ReLU(),
23
+ nn.Linear(32, 2), # Predict (x, y) coordinate
24
+ nn.Sigmoid() # Normalize output to [0, 1]
25
+ )
26
+
27
+ def forward(self, x):
28
+ x = self.encoder(x)
29
+ x = self.fc(x)
30
+ return x # shape [B, 2], where values are in [0, 1]
31
+
32
+ import torchvision.models as models
33
+ import torch.nn as nn
34
+
35
+ class ResNetNoseRegressor(nn.Module):
36
+ def __init__(self, pretrained=True):
37
+ super().__init__()
38
+ resnet = models.resnet18(pretrained=pretrained)
39
+ self.backbone = nn.Sequential(*list(resnet.children())[:-2]) # Remove last FC layers
40
+ self.pool = nn.AdaptiveAvgPool2d((1, 1))
41
+ self.head = nn.Sequential(
42
+ nn.Flatten(),
43
+ nn.Linear(512, 128),
44
+ nn.ReLU(),
45
+ nn.Linear(128, 2),
46
+ nn.Sigmoid() # Normalized (x, y)
47
+ )
48
+
49
+ def forward(self, x):
50
+ x = self.backbone(x)
51
+ x = self.pool(x)
52
+ return self.head(x)
train/train.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #%%
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+
6
+ from model import *
7
+ from dataset import NosePointDataset
8
+
9
+ image_size = (64, 64)
10
+ batch_size = 32
11
+ num_epochs = 1000
12
+ lr = 1e-3
13
+ val_split = 0.2
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+
16
+ dataset = NosePointDataset(image_size=image_size)
17
+
18
+ train, val = torch.utils.data.random_split(dataset, [int(len(dataset) * (1 - val_split)), len(dataset) - int(len(dataset) * (1 - val_split))])
19
+ train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)
20
+ val_loader = torch.utils.data.DataLoader(val, batch_size=batch_size, shuffle=False)
21
+
22
+ # model = NosePointRegressor(input_channels=3).to(device)
23
+ model = ResNetNoseRegressor(pretrained=True).to(device)
24
+ # criterion = nn.MSELoss()
25
+ criterion = nn.SmoothL1Loss()
26
+ optimizer = optim.Adam(model.parameters(), lr=lr)
27
+
28
+ # %%
29
+ import matplotlib.pyplot as plt
30
+ from tqdm import tqdm
31
+
32
+ save_path = "best_model.pth"
33
+ plot_path = "loss_plot.png"
34
+
35
+ train_losses = []
36
+ val_losses = []
37
+ best_val_loss = float('inf')
38
+
39
+ # ===== Training Loop =====
40
+ for epoch in range(num_epochs):
41
+ model.train()
42
+ train_loss = 0.0
43
+
44
+ for images, targets in tqdm(train_loader):
45
+ images, targets = images.to(device), targets.to(device)
46
+
47
+ optimizer.zero_grad()
48
+ outputs = model(images)
49
+ loss = criterion(outputs, targets)
50
+ loss.backward()
51
+ optimizer.step()
52
+
53
+ train_loss += loss.item() * images.size(0)
54
+ train_loss /= len(train_loader.dataset)
55
+
56
+ model.eval()
57
+ val_loss = 0.0
58
+ with torch.no_grad():
59
+ for images, targets in val_loader:
60
+ images, targets = images.to(device), targets.to(device)
61
+ outputs = model(images)
62
+ loss = criterion(outputs, targets)
63
+ val_loss += loss.item() * images.size(0)
64
+ val_loss /= len(val_loader.dataset)
65
+
66
+ # Logging
67
+ train_losses.append(train_loss)
68
+ val_losses.append(val_loss)
69
+ print(f"[Epoch {epoch+1}/{num_epochs}] Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}")
70
+
71
+ # Save best model
72
+ if val_loss < best_val_loss:
73
+ best_val_loss = val_loss
74
+ torch.save(model.state_dict(), save_path)
75
+ print("✅ Saved best model.")
76
+
77
+ # Save plot
78
+ plt.figure(figsize=(6, 4))
79
+ plt.plot(range(1, len(train_losses)+1), train_losses, label="Train Loss")
80
+ plt.plot(range(1, len(val_losses)+1), val_losses, label="Val Loss")
81
+ plt.xlabel("Epoch")
82
+ plt.ylabel("Loss")
83
+ plt.title("Training vs Validation Loss")
84
+ plt.legend()
85
+ plt.grid(True)
86
+ plt.tight_layout()
87
+ plt.savefig(plot_path)
88
+ plt.close()
89
+
90
+ print("✅ Training complete.")
91
+ # %%