ludaladila commited on
Commit ·
8ed41e3
1
Parent(s): 5ab5efe
update
Browse files- .DS_Store +0 -0
- models/__init__.py +0 -0
- models/model.py +28 -0
- models/vgg16_model.pth +3 -0
- requirements.txt +8 -0
- scripts/xai_eval.py +33 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
models/__init__.py
ADDED
|
File without changes
|
models/model.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# models/model.py
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torchvision.models as models
|
| 5 |
+
|
| 6 |
+
# Deep Learning Model
|
| 7 |
+
class DeepLearningModel(nn.Module):
|
| 8 |
+
def __init__(self):
|
| 9 |
+
super().__init__()
|
| 10 |
+
model = models.vgg16(pretrained=True)
|
| 11 |
+
self.features = model.features
|
| 12 |
+
self.avgpool = model.avgpool
|
| 13 |
+
self.classifier = model.classifier
|
| 14 |
+
# Modify the classifier for 3 classes
|
| 15 |
+
self.classifier[6] = nn.Sequential(
|
| 16 |
+
nn.Linear(4096, 512),
|
| 17 |
+
nn.BatchNorm1d(512),
|
| 18 |
+
nn.ReLU(),
|
| 19 |
+
nn.Dropout(0.5),
|
| 20 |
+
nn.Linear(512, 3)
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
def forward(self, x):
|
| 24 |
+
x = self.features(x)
|
| 25 |
+
x = self.avgpool(x)
|
| 26 |
+
x = torch.flatten(x, 1)
|
| 27 |
+
x = self.classifier(x)
|
| 28 |
+
return x
|
models/vgg16_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:76230eaadac39b356db06bc647089ce09ad04106a4641765cf8a30bfd1b6baf3
|
| 3 |
+
size 545461325
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit==1.26.0
|
| 2 |
+
torch==2.0.1
|
| 3 |
+
torchvision==0.15.2
|
| 4 |
+
numpy==1.23.5
|
| 5 |
+
Pillow<10
|
| 6 |
+
opencv-python==4.8.0.76
|
| 7 |
+
joblib==1.3.2
|
| 8 |
+
grad-cam
|
scripts/xai_eval.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torchvision import models, transforms
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from pytorch_grad_cam import GradCAM
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
def load_model(model_path: str):
|
| 7 |
+
"""Load a fine-tuned VGG model from model path"""
|
| 8 |
+
vgg_model = models.vgg16(pretrained=False)
|
| 9 |
+
vgg_model.classifier[6] = nn.Sequential(
|
| 10 |
+
nn.Linear(vgg_model.classifier[6].in_features, 512),
|
| 11 |
+
nn.BatchNorm1d(512),
|
| 12 |
+
nn.ReLU(),
|
| 13 |
+
nn.Dropout(0.5),
|
| 14 |
+
nn.Linear(512,3)
|
| 15 |
+
)
|
| 16 |
+
vgg_model.load_state_dict(torch.load(model_path, map_location='cpu'))
|
| 17 |
+
vgg_model.eval()
|
| 18 |
+
return vgg_model
|
| 19 |
+
|
| 20 |
+
def convert_to_gradcam(model):
|
| 21 |
+
"""Initialize a Grad-CAM explainer for the provided model"""
|
| 22 |
+
target_layers = [model.features[-1]]
|
| 23 |
+
return GradCAM(model=model, target_layers=target_layers)
|
| 24 |
+
|
| 25 |
+
def preprocess_image(image):
|
| 26 |
+
"""Apply image pre-processing for VGG-16 model"""
|
| 27 |
+
transform = transforms.Compose([
|
| 28 |
+
transforms.Resize((224, 224)),
|
| 29 |
+
transforms.ToTensor(),
|
| 30 |
+
transforms.Normalize(mean=[0.3205, 0.2244, 0.1613],
|
| 31 |
+
std=[0.2996, 0.2158, 0.1711])
|
| 32 |
+
])
|
| 33 |
+
return transform(image)
|