Spaces:
Running
Running
full switch
Browse files- PCAM-pipeline.ipynb +0 -0
- README.md +1 -1
- app.py +19 -9
- results/pcam/20_06_2025_10_49_24/model_4.pt +3 -0
PCAM-pipeline.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
README.md
CHANGED
|
@@ -51,4 +51,4 @@ The **PatchCamelyon (PCam)** benchmark is a challenging image classification dat
|
|
| 51 |
|
| 52 |
The submission on kaggle with the model trained on this notebook is
|
| 53 |
|
| 54 |
-
```Public score: 0.
|
|
|
|
| 51 |
|
| 52 |
The submission on kaggle with the model trained on this notebook is
|
| 53 |
|
| 54 |
+
```Public score: 0.9733```
|
app.py
CHANGED
|
@@ -1,30 +1,40 @@
|
|
| 1 |
import torch
|
| 2 |
import torchvision
|
|
|
|
| 3 |
from torch import nn
|
| 4 |
from torchvision import transforms
|
| 5 |
from torchvision.datasets import PCAM
|
| 6 |
import gradio as gr
|
| 7 |
from PIL import Image
|
|
|
|
| 8 |
|
| 9 |
# ---------------------------------
|
| 10 |
# 1. Load model
|
| 11 |
# ---------------------------------
|
| 12 |
torch.manual_seed(42)
|
| 13 |
torch.cuda.manual_seed_all(42)
|
| 14 |
-
model = torch.load("results/pcam/
|
| 15 |
model.eval()
|
| 16 |
|
| 17 |
# ---------------------------------
|
| 18 |
# 2. Define transform and dataset
|
| 19 |
# ---------------------------------
|
| 20 |
-
mean_stdev = [torch.Tensor([0.6981, 0.5428, 0.6933]), torch.Tensor([0.2222, 0.2665, 0.1985])]
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
def load_datasets(dataset_choice):
|
| 30 |
return [PCAM(root="data/", split=dataset_choice, download=True, transform=transform),
|
|
@@ -49,7 +59,7 @@ def get_sample(index: int, dataset_choice: str):
|
|
| 49 |
with torch.no_grad():
|
| 50 |
output = model(image_tensor.unsqueeze(0)).squeeze()
|
| 51 |
probability = torch.sigmoid(output)
|
| 52 |
-
predicted_label = "Tumor" if probability >= 0.
|
| 53 |
true_label = "Tumor" if ground_truth == 1 else "No Tumor"
|
| 54 |
error_label = ""
|
| 55 |
if predicted_label != true_label:
|
|
|
|
| 1 |
import torch
|
| 2 |
import torchvision
|
| 3 |
+
import numpy as np
|
| 4 |
from torch import nn
|
| 5 |
from torchvision import transforms
|
| 6 |
from torchvision.datasets import PCAM
|
| 7 |
import gradio as gr
|
| 8 |
from PIL import Image
|
| 9 |
+
import albumentations as A
|
| 10 |
|
| 11 |
# ---------------------------------
|
| 12 |
# 1. Load model
|
| 13 |
# ---------------------------------
|
| 14 |
torch.manual_seed(42)
|
| 15 |
torch.cuda.manual_seed_all(42)
|
| 16 |
+
model = torch.load("results/pcam/20_06_2025_10_49_24/model_4.pt", map_location="cpu", weights_only=False)
|
| 17 |
model.eval()
|
| 18 |
|
| 19 |
# ---------------------------------
|
| 20 |
# 2. Define transform and dataset
|
| 21 |
# ---------------------------------
|
|
|
|
| 22 |
|
| 23 |
+
a_transform = A.Compose([
|
| 24 |
+
A.Resize(224, 224),
|
| 25 |
+
A.Normalize(normalization="image_per_channel", p=1.0),
|
| 26 |
+
A.ToTensorV2()
|
| 27 |
+
])
|
| 28 |
+
class AlbumentationsToPytorchTransform:
|
| 29 |
+
def __init__(self, albumentations_transform):
|
| 30 |
+
self.albumentations_transform = albumentations_transform
|
| 31 |
+
|
| 32 |
+
def __call__(self, img):
|
| 33 |
+
img = np.array(img) # Convert PIL to NumPy
|
| 34 |
+
transformed = self.albumentations_transform(image=img)
|
| 35 |
+
return transformed["image"].to(torch.float32) # Return tensor
|
| 36 |
+
|
| 37 |
+
transform = AlbumentationsToPytorchTransform(a_transform)
|
| 38 |
|
| 39 |
def load_datasets(dataset_choice):
|
| 40 |
return [PCAM(root="data/", split=dataset_choice, download=True, transform=transform),
|
|
|
|
| 59 |
with torch.no_grad():
|
| 60 |
output = model(image_tensor.unsqueeze(0)).squeeze()
|
| 61 |
probability = torch.sigmoid(output)
|
| 62 |
+
predicted_label = "Tumor" if probability >= 0.4458489 else "No Tumor"
|
| 63 |
true_label = "Tumor" if ground_truth == 1 else "No Tumor"
|
| 64 |
error_label = ""
|
| 65 |
if predicted_label != true_label:
|
results/pcam/20_06_2025_10_49_24/model_4.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ac9c1eef2742e3f95e354cca4062486a95e3e3952dc49863cef5b7217f5ae9d
|
| 3 |
+
size 54427294
|