| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import logging |
| | import os |
| | import sys |
| |
|
| | import numpy as np |
| | import torch |
| | from torch.utils.data import DataLoader |
| |
|
| | import monai |
| | from monai.data import CSVSaver |
| | from monai.transforms import AddChanneld, Compose, LoadNiftid, Resized, ScaleIntensityd, ToTensord |
| |
|
| |
|
| | def main(): |
| | monai.config.print_config() |
| | logging.basicConfig(stream=sys.stdout, level=logging.INFO) |
| |
|
| | |
| | images = [ |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]), |
| | os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]), |
| | ] |
| |
|
| | |
| | labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64) |
| | val_files = [{"img": img, "label": label} for img, label in zip(images, labels)] |
| |
|
| | |
| | val_transforms = Compose( |
| | [ |
| | LoadNiftid(keys=["img"]), |
| | AddChanneld(keys=["img"]), |
| | ScaleIntensityd(keys=["img"]), |
| | Resized(keys=["img"], spatial_size=(96, 96, 96)), |
| | ToTensord(keys=["img"]), |
| | ] |
| | ) |
| |
|
| | |
| | val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) |
| | val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) |
| |
|
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| | model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device) |
| |
|
| | model.load_state_dict(torch.load("best_metric_model_classification3d_dict.pth")) |
| | model.eval() |
| | with torch.no_grad(): |
| | num_correct = 0.0 |
| | metric_count = 0 |
| | saver = CSVSaver(output_dir="./output") |
| | for val_data in val_loader: |
| | val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device) |
| | val_outputs = model(val_images).argmax(dim=1) |
| | value = torch.eq(val_outputs, val_labels) |
| | metric_count += len(value) |
| | num_correct += value.sum().item() |
| | saver.save_batch(val_outputs, val_data["img_meta_dict"]) |
| | metric = num_correct / metric_count |
| | print("evaluation metric:", metric) |
| | saver.finalize() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|